1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
58 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
59 cl::init(-1), cl::Hidden);
61 STATISTIC(numIntervals , "Number of original intervals");
62 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
63 STATISTIC(numSplits , "Number of intervals split");
64 STATISTIC(numCoalescing, "Number of early coalescing performed");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.addRequired<AliasAnalysis>();
72 AU.addPreserved<AliasAnalysis>();
73 AU.addPreserved<LiveVariables>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreservedID(MachineLoopInfoID);
76 AU.addPreservedID(MachineDominatorsID);
79 AU.addPreservedID(PHIEliminationID);
80 AU.addRequiredID(PHIEliminationID);
83 AU.addRequiredID(TwoAddressInstructionPassID);
84 AU.addPreserved<ProcessImplicitDefs>();
85 AU.addRequired<ProcessImplicitDefs>();
86 AU.addPreserved<SlotIndexes>();
87 AU.addRequiredTransitive<SlotIndexes>();
88 MachineFunctionPass::getAnalysisUsage(AU);
91 void LiveIntervals::releaseMemory() {
92 // Free the live intervals themselves.
93 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
94 E = r2iMap_.end(); I != E; ++I)
98 phiJoinCopies.clear();
100 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101 VNInfoAllocator.Reset();
102 while (!CloneMIs.empty()) {
103 MachineInstr *MI = CloneMIs.back();
105 mf_->DeleteMachineInstr(MI);
109 /// runOnMachineFunction - Register allocate the whole function
111 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113 mri_ = &mf_->getRegInfo();
114 tm_ = &fn.getTarget();
115 tri_ = tm_->getRegisterInfo();
116 tii_ = tm_->getInstrInfo();
117 aa_ = &getAnalysis<AliasAnalysis>();
118 lv_ = &getAnalysis<LiveVariables>();
119 indexes_ = &getAnalysis<SlotIndexes>();
120 allocatableRegs_ = tri_->getAllocatableSet(fn);
123 performEarlyCoalescing();
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
145 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
146 mbbi != mbbe; ++mbbi) {
147 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
148 for (MachineBasicBlock::iterator mii = mbbi->begin(),
149 mie = mbbi->end(); mii != mie; ++mii) {
150 OS << getInstructionIndex(mii) << '\t' << *mii;
155 void LiveIntervals::dumpInstrs() const {
159 /// conflictsWithPhysRegDef - Returns true if the specified register
160 /// is defined during the duration of the specified interval.
161 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
162 VirtRegMap &vrm, unsigned reg) {
163 for (LiveInterval::Ranges::const_iterator
164 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
165 for (SlotIndex index = I->start.getBaseIndex(),
166 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
168 index = index.getNextIndex()) {
169 // skip deleted instructions
170 while (index != end && !getInstructionFromIndex(index))
171 index = index.getNextIndex();
172 if (index == end) break;
174 MachineInstr *MI = getInstructionFromIndex(index);
175 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
176 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
177 if (SrcReg == li.reg || DstReg == li.reg)
179 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
180 MachineOperand& mop = MI->getOperand(i);
183 unsigned PhysReg = mop.getReg();
184 if (PhysReg == 0 || PhysReg == li.reg)
186 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
187 if (!vrm.hasPhys(PhysReg))
189 PhysReg = vrm.getPhys(PhysReg);
191 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
200 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
201 /// it can check use as well.
202 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
203 unsigned Reg, bool CheckUse,
204 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
205 for (LiveInterval::Ranges::const_iterator
206 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
207 for (SlotIndex index = I->start.getBaseIndex(),
208 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
210 index = index.getNextIndex()) {
211 // Skip deleted instructions.
212 MachineInstr *MI = 0;
213 while (index != end) {
214 MI = getInstructionFromIndex(index);
217 index = index.getNextIndex();
219 if (index == end) break;
221 if (JoinedCopies.count(MI))
223 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
224 MachineOperand& MO = MI->getOperand(i);
227 if (MO.isUse() && !CheckUse)
229 unsigned PhysReg = MO.getReg();
230 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
232 if (tri_->isSubRegister(Reg, PhysReg))
242 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
243 if (TargetRegisterInfo::isPhysicalRegister(reg))
244 errs() << tri_->getName(reg);
246 errs() << "%reg" << reg;
250 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
251 MachineBasicBlock::iterator mi,
255 LiveInterval &interval) {
257 errs() << "\t\tregister: ";
258 printRegName(interval.reg, tri_);
261 // Virtual registers may be defined multiple times (due to phi
262 // elimination and 2-addr elimination). Much of what we do only has to be
263 // done once for the vreg. We use an empty interval to detect the first
264 // time we see a vreg.
265 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
266 if (interval.empty()) {
267 // Get the Idx of the defining instructions.
268 SlotIndex defIndex = MIIdx.getDefIndex();
269 // Earlyclobbers move back one, so that they overlap the live range
271 if (MO.isEarlyClobber())
272 defIndex = MIIdx.getUseIndex();
274 MachineInstr *CopyMI = NULL;
275 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
276 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
277 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
278 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
279 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
281 // Earlyclobbers move back one.
282 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
284 assert(ValNo->id == 0 && "First value in interval is not 0?");
286 // Loop over all of the blocks that the vreg is defined in. There are
287 // two cases we have to handle here. The most common case is a vreg
288 // whose lifetime is contained within a basic block. In this case there
289 // will be a single kill, in MBB, which comes after the definition.
290 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
291 // FIXME: what about dead vars?
293 if (vi.Kills[0] != mi)
294 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
296 killIdx = defIndex.getStoreIndex();
298 // If the kill happens after the definition, we have an intra-block
300 if (killIdx > defIndex) {
301 assert(vi.AliveBlocks.empty() &&
302 "Shouldn't be alive across any blocks!");
303 LiveRange LR(defIndex, killIdx, ValNo);
304 interval.addRange(LR);
305 DEBUG(errs() << " +" << LR << "\n");
306 ValNo->addKill(killIdx);
311 // The other case we handle is when a virtual register lives to the end
312 // of the defining block, potentially live across some blocks, then is
313 // live into some number of blocks, but gets killed. Start by adding a
314 // range that goes from this definition to the end of the defining block.
315 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
317 DEBUG(errs() << " +" << NewLR);
318 interval.addRange(NewLR);
320 // Iterate over all of the blocks that the variable is completely
321 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
323 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
324 E = vi.AliveBlocks.end(); I != E; ++I) {
326 getMBBStartIdx(mf_->getBlockNumbered(*I)),
327 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
329 interval.addRange(LR);
330 DEBUG(errs() << " +" << LR);
333 // Finally, this virtual register is live from the start of any killing
334 // block to the 'use' slot of the killing instruction.
335 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
336 MachineInstr *Kill = vi.Kills[i];
338 getInstructionIndex(Kill).getDefIndex();
339 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
340 interval.addRange(LR);
341 ValNo->addKill(killIdx);
342 DEBUG(errs() << " +" << LR);
346 // If this is the second time we see a virtual register definition, it
347 // must be due to phi elimination or two addr elimination. If this is
348 // the result of two address elimination, then the vreg is one of the
349 // def-and-use register operand.
350 if (mi->isRegTiedToUseOperand(MOIdx)) {
351 // If this is a two-address definition, then we have already processed
352 // the live range. The only problem is that we didn't realize there
353 // are actually two values in the live interval. Because of this we
354 // need to take the LiveRegion that defines this register and split it
356 assert(interval.containsOneValue());
357 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
358 SlotIndex RedefIndex = MIIdx.getDefIndex();
359 if (MO.isEarlyClobber())
360 RedefIndex = MIIdx.getUseIndex();
362 const LiveRange *OldLR =
363 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
364 VNInfo *OldValNo = OldLR->valno;
366 // Delete the initial value, which should be short and continuous,
367 // because the 2-addr copy must be in the same MBB as the redef.
368 interval.removeRange(DefIndex, RedefIndex);
370 // Two-address vregs should always only be redefined once. This means
371 // that at this point, there should be exactly one value number in it.
372 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
374 // The new value number (#1) is defined by the instruction we claimed
376 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
377 false, // update at *
379 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
381 // Value#0 is now defined by the 2-addr instruction.
382 OldValNo->def = RedefIndex;
383 OldValNo->setCopy(0);
384 if (MO.isEarlyClobber())
385 OldValNo->setHasRedefByEC(true);
387 // Add the new live interval which replaces the range for the input copy.
388 LiveRange LR(DefIndex, RedefIndex, ValNo);
389 DEBUG(errs() << " replace range with " << LR);
390 interval.addRange(LR);
391 ValNo->addKill(RedefIndex);
393 // If this redefinition is dead, we need to add a dummy unit live
394 // range covering the def slot.
396 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
400 errs() << " RESULT: ";
401 interval.print(errs(), tri_);
404 // Otherwise, this must be because of phi elimination. If this is the
405 // first redefinition of the vreg that we have seen, go back and change
406 // the live range in the PHI block to be a different value number.
407 if (interval.containsOneValue()) {
408 // Remove the old range that we now know has an incorrect number.
409 VNInfo *VNI = interval.getValNumInfo(0);
410 MachineInstr *Killer = vi.Kills[0];
411 phiJoinCopies.push_back(Killer);
412 SlotIndex Start = getMBBStartIdx(Killer->getParent());
413 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
415 errs() << " Removing [" << Start << "," << End << "] from: ";
416 interval.print(errs(), tri_);
419 interval.removeRange(Start, End);
420 assert(interval.ranges.size() == 1 &&
421 "Newly discovered PHI interval has >1 ranges.");
422 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
423 VNI->addKill(indexes_->getTerminatorGap(killMBB));
424 VNI->setHasPHIKill(true);
426 errs() << " RESULT: ";
427 interval.print(errs(), tri_);
430 // Replace the interval with one of a NEW value number. Note that this
431 // value number isn't actually defined by an instruction, weird huh? :)
432 LiveRange LR(Start, End,
433 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
434 0, false, VNInfoAllocator));
435 LR.valno->setIsPHIDef(true);
436 DEBUG(errs() << " replace range with " << LR);
437 interval.addRange(LR);
438 LR.valno->addKill(End);
440 errs() << " RESULT: ";
441 interval.print(errs(), tri_);
445 // In the case of PHI elimination, each variable definition is only
446 // live until the end of the block. We've already taken care of the
447 // rest of the live range.
448 SlotIndex defIndex = MIIdx.getDefIndex();
449 if (MO.isEarlyClobber())
450 defIndex = MIIdx.getUseIndex();
453 MachineInstr *CopyMI = NULL;
454 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
455 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
456 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
457 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
458 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
460 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
462 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
463 LiveRange LR(defIndex, killIndex, ValNo);
464 interval.addRange(LR);
465 ValNo->addKill(indexes_->getTerminatorGap(mbb));
466 ValNo->setHasPHIKill(true);
467 DEBUG(errs() << " +" << LR);
471 DEBUG(errs() << '\n');
474 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
475 MachineBasicBlock::iterator mi,
478 LiveInterval &interval,
479 MachineInstr *CopyMI) {
480 // A physical register cannot be live across basic block, so its
481 // lifetime must end somewhere in its defining basic block.
483 errs() << "\t\tregister: ";
484 printRegName(interval.reg, tri_);
487 SlotIndex baseIndex = MIIdx;
488 SlotIndex start = baseIndex.getDefIndex();
489 // Earlyclobbers move back one.
490 if (MO.isEarlyClobber())
491 start = MIIdx.getUseIndex();
492 SlotIndex end = start;
494 // If it is not used after definition, it is considered dead at
495 // the instruction defining it. Hence its interval is:
496 // [defSlot(def), defSlot(def)+1)
497 // For earlyclobbers, the defSlot was pushed back one; the extra
498 // advance below compensates.
500 DEBUG(errs() << " dead");
501 end = start.getStoreIndex();
505 // If it is not dead on definition, it must be killed by a
506 // subsequent instruction. Hence its interval is:
507 // [defSlot(def), useSlot(kill)+1)
508 baseIndex = baseIndex.getNextIndex();
509 while (++mi != MBB->end()) {
511 if (getInstructionFromIndex(baseIndex) == 0)
512 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
514 if (mi->killsRegister(interval.reg, tri_)) {
515 DEBUG(errs() << " killed");
516 end = baseIndex.getDefIndex();
519 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
521 if (mi->isRegTiedToUseOperand(DefIdx)) {
522 // Two-address instruction.
523 end = baseIndex.getDefIndex();
524 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
525 "Two address instruction is an early clobber?");
527 // Another instruction redefines the register before it is ever read.
528 // Then the register is essentially dead at the instruction that defines
529 // it. Hence its interval is:
530 // [defSlot(def), defSlot(def)+1)
531 DEBUG(errs() << " dead");
532 end = start.getStoreIndex();
538 baseIndex = baseIndex.getNextIndex();
541 // The only case we should have a dead physreg here without a killing or
542 // instruction where we know it's dead is if it is live-in to the function
543 // and never used. Another possible case is the implicit use of the
544 // physical register has been deleted by two-address pass.
545 end = start.getStoreIndex();
548 assert(start < end && "did not find end of interval?");
550 // Already exists? Extend old live interval.
551 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
552 bool Extend = OldLR != interval.end();
553 VNInfo *ValNo = Extend
554 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
555 if (MO.isEarlyClobber() && Extend)
556 ValNo->setHasRedefByEC(true);
557 LiveRange LR(start, end, ValNo);
558 interval.addRange(LR);
559 LR.valno->addKill(end);
560 DEBUG(errs() << " +" << LR << '\n');
563 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
564 MachineBasicBlock::iterator MI,
568 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
569 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
570 getOrCreateInterval(MO.getReg()));
571 else if (allocatableRegs_[MO.getReg()]) {
572 MachineInstr *CopyMI = NULL;
573 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
574 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
575 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
576 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
577 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->modifiesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
595 errs() << "\t\tlivein register: ";
596 printRegName(interval.reg, tri_);
599 // Look for kills, if it reaches a def before it's killed, then it shouldn't
600 // be considered a livein.
601 MachineBasicBlock::iterator mi = MBB->begin();
602 SlotIndex baseIndex = MIIdx;
603 SlotIndex start = baseIndex;
604 if (getInstructionFromIndex(baseIndex) == 0)
605 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607 SlotIndex end = baseIndex;
608 bool SeenDefUse = false;
610 while (mi != MBB->end()) {
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DEBUG(errs() << " killed");
613 end = baseIndex.getDefIndex();
616 } else if (mi->modifiesRegister(interval.reg, tri_)) {
617 // Another instruction redefines the register before it is ever read.
618 // Then the register is essentially dead at the instruction that defines
619 // it. Hence its interval is:
620 // [defSlot(def), defSlot(def)+1)
621 DEBUG(errs() << " dead");
622 end = start.getStoreIndex();
628 if (mi != MBB->end()) {
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
633 // Live-in register might not be used at all.
636 DEBUG(errs() << " dead");
637 end = MIIdx.getStoreIndex();
639 DEBUG(errs() << " live through");
645 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
646 0, false, VNInfoAllocator);
647 vni->setIsPHIDef(true);
648 LiveRange LR(start, end, vni);
650 interval.addRange(LR);
651 LR.valno->addKill(end);
652 DEBUG(errs() << " +" << LR << '\n');
656 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
657 SmallVector<MachineInstr*,16> &IdentCopies,
658 SmallVector<MachineInstr*,16> &OtherCopies) {
659 bool HaveConflict = false;
660 unsigned NumIdent = 0;
661 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
662 re = mri_->def_end(); ri != re; ++ri) {
663 MachineInstr *MI = &*ri;
664 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
665 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
667 if (SrcReg != DstInt.reg) {
668 OtherCopies.push_back(MI);
669 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
671 IdentCopies.push_back(MI);
677 return false; // Let coalescer handle it
678 return IdentCopies.size() > OtherCopies.size();
681 void LiveIntervals::performEarlyCoalescing() {
682 if (!EarlyCoalescing)
685 /// Perform early coalescing: eliminate copies which feed into phi joins
686 /// and whose sources are defined by the phi joins.
687 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
688 MachineInstr *Join = phiJoinCopies[i];
689 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
692 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
693 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
695 assert(isMove && "PHI join instruction must be a move!");
700 LiveInterval &DstInt = getInterval(PHIDst);
701 LiveInterval &SrcInt = getInterval(PHISrc);
702 SmallVector<MachineInstr*, 16> IdentCopies;
703 SmallVector<MachineInstr*, 16> OtherCopies;
704 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
707 DEBUG(errs() << "PHI Join: " << *Join);
708 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
709 VNInfo *VNI = DstInt.getValNumInfo(0);
711 // Change the non-identity copies to directly target the phi destination.
712 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
713 MachineInstr *PHICopy = OtherCopies[i];
714 DEBUG(errs() << "Moving: " << *PHICopy);
716 SlotIndex MIIndex = getInstructionIndex(PHICopy);
717 SlotIndex DefIndex = MIIndex.getDefIndex();
718 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
719 SlotIndex StartIndex = SLR->start;
720 SlotIndex EndIndex = SLR->end;
722 // Delete val# defined by the now identity copy and add the range from
723 // beginning of the mbb to the end of the range.
724 SrcInt.removeValNo(SLR->valno);
725 DEBUG(errs() << " added range [" << StartIndex << ','
726 << EndIndex << "] to reg" << DstInt.reg << '\n');
727 if (DstInt.liveAt(StartIndex))
728 DstInt.removeRange(StartIndex, EndIndex);
729 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
731 NewVNI->setHasPHIKill(true);
732 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
733 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
734 MachineOperand &MO = PHICopy->getOperand(j);
735 if (!MO.isReg() || MO.getReg() != PHISrc)
741 // Now let's eliminate all the would-be identity copies.
742 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
743 MachineInstr *PHICopy = IdentCopies[i];
744 DEBUG(errs() << "Coalescing: " << *PHICopy);
746 SlotIndex MIIndex = getInstructionIndex(PHICopy);
747 SlotIndex DefIndex = MIIndex.getDefIndex();
748 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
749 SlotIndex StartIndex = SLR->start;
750 SlotIndex EndIndex = SLR->end;
752 // Delete val# defined by the now identity copy and add the range from
753 // beginning of the mbb to the end of the range.
754 SrcInt.removeValNo(SLR->valno);
755 RemoveMachineInstrFromMaps(PHICopy);
756 PHICopy->eraseFromParent();
757 DEBUG(errs() << " added range [" << StartIndex << ','
758 << EndIndex << "] to reg" << DstInt.reg << '\n');
759 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
762 // Remove the phi join and update the phi block liveness.
763 SlotIndex MIIndex = getInstructionIndex(Join);
764 SlotIndex UseIndex = MIIndex.getUseIndex();
765 SlotIndex DefIndex = MIIndex.getDefIndex();
766 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
767 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
768 DLR->valno->setCopy(0);
769 DLR->valno->setIsDefAccurate(false);
770 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
771 SrcInt.removeRange(SLR->start, SLR->end);
772 assert(SrcInt.empty());
773 removeInterval(PHISrc);
774 RemoveMachineInstrFromMaps(Join);
775 Join->eraseFromParent();
781 /// computeIntervals - computes the live intervals for virtual
782 /// registers. for some ordering of the machine instructions [1,N] a
783 /// live interval is an interval [i, j) where 1 <= i <= j < N for
784 /// which a variable is live
785 void LiveIntervals::computeIntervals() {
786 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
787 << "********** Function: "
788 << ((Value*)mf_->getFunction())->getName() << '\n');
790 SmallVector<unsigned, 8> UndefUses;
791 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
793 MachineBasicBlock *MBB = MBBI;
794 // Track the index of the current machine instr.
795 SlotIndex MIIndex = getMBBStartIdx(MBB);
796 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
798 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
800 // Create intervals for live-ins to this BB first.
801 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
802 LE = MBB->livein_end(); LI != LE; ++LI) {
803 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
804 // Multiple live-ins can alias the same register.
805 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
806 if (!hasInterval(*AS))
807 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
811 // Skip over empty initial indices.
812 if (getInstructionFromIndex(MIIndex) == 0)
813 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
815 for (; MI != miEnd; ++MI) {
816 DEBUG(errs() << MIIndex << "\t" << *MI);
819 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
820 MachineOperand &MO = MI->getOperand(i);
821 if (!MO.isReg() || !MO.getReg())
824 // handle register defs - build intervals
826 handleRegisterDef(MBB, MI, MIIndex, MO, i);
827 else if (MO.isUndef())
828 UndefUses.push_back(MO.getReg());
831 // Move to the next instr slot.
832 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
836 // Create empty intervals for registers defined by implicit_def's (except
837 // for those implicit_def that define values which are liveout of their
839 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
840 unsigned UndefReg = UndefUses[i];
841 (void)getOrCreateInterval(UndefReg);
845 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
846 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
847 return new LiveInterval(reg, Weight);
850 /// dupInterval - Duplicate a live interval. The caller is responsible for
851 /// managing the allocated memory.
852 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
853 LiveInterval *NewLI = createInterval(li->reg);
854 NewLI->Copy(*li, mri_, getVNInfoAllocator());
858 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
859 /// copy field and returns the source register that defines it.
860 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
864 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
865 // If it's extracting out of a physical register, return the sub-register.
866 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
867 if (TargetRegisterInfo::isPhysicalRegister(Reg))
868 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
870 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
871 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
872 return VNI->getCopy()->getOperand(2).getReg();
874 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
875 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
877 llvm_unreachable("Unrecognized copy instruction!");
881 //===----------------------------------------------------------------------===//
882 // Register allocator hooks.
885 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
886 /// allow one) virtual register operand, then its uses are implicitly using
887 /// the register. Returns the virtual register.
888 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
889 MachineInstr *MI) const {
891 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
892 MachineOperand &MO = MI->getOperand(i);
893 if (!MO.isReg() || !MO.isUse())
895 unsigned Reg = MO.getReg();
896 if (Reg == 0 || Reg == li.reg)
899 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
900 !allocatableRegs_[Reg])
902 // FIXME: For now, only remat MI with at most one register operand.
904 "Can't rematerialize instruction with multiple register operand!");
913 /// isValNoAvailableAt - Return true if the val# of the specified interval
914 /// which reaches the given instruction also reaches the specified use index.
915 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
916 SlotIndex UseIdx) const {
917 SlotIndex Index = getInstructionIndex(MI);
918 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
919 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
920 return UI != li.end() && UI->valno == ValNo;
923 /// isReMaterializable - Returns true if the definition MI of the specified
924 /// val# of the specified interval is re-materializable.
925 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
926 const VNInfo *ValNo, MachineInstr *MI,
927 SmallVectorImpl<LiveInterval*> &SpillIs,
932 if (!tii_->isTriviallyReMaterializable(MI, aa_))
935 // Target-specific code can mark an instruction as being rematerializable
936 // if it has one virtual reg use, though it had better be something like
937 // a PIC base register which is likely to be live everywhere.
938 unsigned ImpUse = getReMatImplicitUse(li, MI);
940 const LiveInterval &ImpLi = getInterval(ImpUse);
941 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
942 re = mri_->use_end(); ri != re; ++ri) {
943 MachineInstr *UseMI = &*ri;
944 SlotIndex UseIdx = getInstructionIndex(UseMI);
945 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
947 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
951 // If a register operand of the re-materialized instruction is going to
952 // be spilled next, then it's not legal to re-materialize this instruction.
953 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
954 if (ImpUse == SpillIs[i]->reg)
960 /// isReMaterializable - Returns true if the definition MI of the specified
961 /// val# of the specified interval is re-materializable.
962 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
963 const VNInfo *ValNo, MachineInstr *MI) {
964 SmallVector<LiveInterval*, 4> Dummy1;
966 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
969 /// isReMaterializable - Returns true if every definition of MI of every
970 /// val# of the specified interval is re-materializable.
971 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
972 SmallVectorImpl<LiveInterval*> &SpillIs,
975 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
977 const VNInfo *VNI = *i;
979 continue; // Dead val#.
980 // Is the def for the val# rematerializable?
981 if (!VNI->isDefAccurate())
983 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
984 bool DefIsLoad = false;
986 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
993 /// FilterFoldedOps - Filter out two-address use operands. Return
994 /// true if it finds any issue with the operands that ought to prevent
996 static bool FilterFoldedOps(MachineInstr *MI,
997 SmallVector<unsigned, 2> &Ops,
999 SmallVector<unsigned, 2> &FoldOps) {
1001 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1002 unsigned OpIdx = Ops[i];
1003 MachineOperand &MO = MI->getOperand(OpIdx);
1004 // FIXME: fold subreg use.
1008 MRInfo |= (unsigned)VirtRegMap::isMod;
1010 // Filter out two-address use operand(s).
1011 if (MI->isRegTiedToDefOperand(OpIdx)) {
1012 MRInfo = VirtRegMap::isModRef;
1015 MRInfo |= (unsigned)VirtRegMap::isRef;
1017 FoldOps.push_back(OpIdx);
1023 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1024 /// slot / to reg or any rematerialized load into ith operand of specified
1025 /// MI. If it is successul, MI is updated with the newly created MI and
1027 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1028 VirtRegMap &vrm, MachineInstr *DefMI,
1030 SmallVector<unsigned, 2> &Ops,
1031 bool isSS, int Slot, unsigned Reg) {
1032 // If it is an implicit def instruction, just delete it.
1033 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1034 RemoveMachineInstrFromMaps(MI);
1035 vrm.RemoveMachineInstrFromMaps(MI);
1036 MI->eraseFromParent();
1041 // Filter the list of operand indexes that are to be folded. Abort if
1042 // any operand will prevent folding.
1043 unsigned MRInfo = 0;
1044 SmallVector<unsigned, 2> FoldOps;
1045 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1048 // The only time it's safe to fold into a two address instruction is when
1049 // it's folding reload and spill from / into a spill stack slot.
1050 if (DefMI && (MRInfo & VirtRegMap::isMod))
1053 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1054 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1056 // Remember this instruction uses the spill slot.
1057 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1059 // Attempt to fold the memory reference into the instruction. If
1060 // we can do this, we don't need to insert spill code.
1061 MachineBasicBlock &MBB = *MI->getParent();
1062 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1063 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1064 vrm.transferSpillPts(MI, fmi);
1065 vrm.transferRestorePts(MI, fmi);
1066 vrm.transferEmergencySpills(MI, fmi);
1067 ReplaceMachineInstrInMaps(MI, fmi);
1068 MI = MBB.insert(MBB.erase(MI), fmi);
1075 /// canFoldMemoryOperand - Returns true if the specified load / store
1076 /// folding is possible.
1077 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1078 SmallVector<unsigned, 2> &Ops,
1080 // Filter the list of operand indexes that are to be folded. Abort if
1081 // any operand will prevent folding.
1082 unsigned MRInfo = 0;
1083 SmallVector<unsigned, 2> FoldOps;
1084 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1087 // It's only legal to remat for a use, not a def.
1088 if (ReMat && (MRInfo & VirtRegMap::isMod))
1091 return tii_->canFoldMemoryOperand(MI, FoldOps);
1094 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1095 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1097 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1102 for (++itr; itr != li.ranges.end(); ++itr) {
1103 MachineBasicBlock *mbb2 =
1104 indexes_->getMBBCoveringRange(itr->start, itr->end);
1113 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1114 /// interval on to-be re-materialized operands of MI) with new register.
1115 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1116 MachineInstr *MI, unsigned NewVReg,
1118 // There is an implicit use. That means one of the other operand is
1119 // being remat'ed and the remat'ed instruction has li.reg as an
1120 // use operand. Make sure we rewrite that as well.
1121 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1122 MachineOperand &MO = MI->getOperand(i);
1125 unsigned Reg = MO.getReg();
1126 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1128 if (!vrm.isReMaterialized(Reg))
1130 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1131 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1133 UseMO->setReg(NewVReg);
1137 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1138 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1139 bool LiveIntervals::
1140 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1141 bool TrySplit, SlotIndex index, SlotIndex end,
1143 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1144 unsigned Slot, int LdSlot,
1145 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1147 const TargetRegisterClass* rc,
1148 SmallVector<int, 4> &ReMatIds,
1149 const MachineLoopInfo *loopInfo,
1150 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1151 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1152 std::vector<LiveInterval*> &NewLIs) {
1153 bool CanFold = false;
1155 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1156 MachineOperand& mop = MI->getOperand(i);
1159 unsigned Reg = mop.getReg();
1160 unsigned RegI = Reg;
1161 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1166 bool TryFold = !DefIsReMat;
1167 bool FoldSS = true; // Default behavior unless it's a remat.
1168 int FoldSlot = Slot;
1170 // If this is the rematerializable definition MI itself and
1171 // all of its uses are rematerialized, simply delete it.
1172 if (MI == ReMatOrigDefMI && CanDelete) {
1173 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1175 RemoveMachineInstrFromMaps(MI);
1176 vrm.RemoveMachineInstrFromMaps(MI);
1177 MI->eraseFromParent();
1181 // If def for this use can't be rematerialized, then try folding.
1182 // If def is rematerializable and it's a load, also try folding.
1183 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1185 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1191 // Scan all of the operands of this instruction rewriting operands
1192 // to use NewVReg instead of li.reg as appropriate. We do this for
1195 // 1. If the instr reads the same spilled vreg multiple times, we
1196 // want to reuse the NewVReg.
1197 // 2. If the instr is a two-addr instruction, we are required to
1198 // keep the src/dst regs pinned.
1200 // Keep track of whether we replace a use and/or def so that we can
1201 // create the spill interval with the appropriate range.
1203 HasUse = mop.isUse();
1204 HasDef = mop.isDef();
1205 SmallVector<unsigned, 2> Ops;
1207 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1208 const MachineOperand &MOj = MI->getOperand(j);
1211 unsigned RegJ = MOj.getReg();
1212 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1216 if (!MOj.isUndef()) {
1217 HasUse |= MOj.isUse();
1218 HasDef |= MOj.isDef();
1223 // Create a new virtual register for the spill interval.
1224 // Create the new register now so we can map the fold instruction
1225 // to the new register so when it is unfolded we get the correct
1227 bool CreatedNewVReg = false;
1229 NewVReg = mri_->createVirtualRegister(rc);
1231 CreatedNewVReg = true;
1237 // Do not fold load / store here if we are splitting. We'll find an
1238 // optimal point to insert a load / store later.
1240 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1241 Ops, FoldSS, FoldSlot, NewVReg)) {
1242 // Folding the load/store can completely change the instruction in
1243 // unpredictable ways, rescan it from the beginning.
1246 // We need to give the new vreg the same stack slot as the
1247 // spilled interval.
1248 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1254 if (isNotInMIMap(MI))
1256 goto RestartInstruction;
1259 // We'll try to fold it later if it's profitable.
1260 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1264 mop.setReg(NewVReg);
1265 if (mop.isImplicit())
1266 rewriteImplicitOps(li, MI, NewVReg, vrm);
1268 // Reuse NewVReg for other reads.
1269 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1270 MachineOperand &mopj = MI->getOperand(Ops[j]);
1271 mopj.setReg(NewVReg);
1272 if (mopj.isImplicit())
1273 rewriteImplicitOps(li, MI, NewVReg, vrm);
1276 if (CreatedNewVReg) {
1278 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1279 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1280 // Each valnum may have its own remat id.
1281 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1283 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1285 if (!CanDelete || (HasUse && HasDef)) {
1286 // If this is a two-addr instruction then its use operands are
1287 // rematerializable but its def is not. It should be assigned a
1289 vrm.assignVirt2StackSlot(NewVReg, Slot);
1292 vrm.assignVirt2StackSlot(NewVReg, Slot);
1294 } else if (HasUse && HasDef &&
1295 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1296 // If this interval hasn't been assigned a stack slot (because earlier
1297 // def is a deleted remat def), do it now.
1298 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1299 vrm.assignVirt2StackSlot(NewVReg, Slot);
1302 // Re-matting an instruction with virtual register use. Add the
1303 // register as an implicit use on the use MI.
1304 if (DefIsReMat && ImpUse)
1305 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1307 // Create a new register interval for this spill / remat.
1308 LiveInterval &nI = getOrCreateInterval(NewVReg);
1309 if (CreatedNewVReg) {
1310 NewLIs.push_back(&nI);
1311 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1313 vrm.setIsSplitFromReg(NewVReg, li.reg);
1317 if (CreatedNewVReg) {
1318 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1319 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1320 DEBUG(errs() << " +" << LR);
1323 // Extend the split live interval to this def / use.
1324 SlotIndex End = index.getDefIndex();
1325 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1326 nI.getValNumInfo(nI.getNumValNums()-1));
1327 DEBUG(errs() << " +" << LR);
1332 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1333 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1334 DEBUG(errs() << " +" << LR);
1339 errs() << "\t\t\t\tAdded new interval: ";
1340 nI.print(errs(), tri_);
1346 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1348 MachineBasicBlock *MBB,
1349 SlotIndex Idx) const {
1350 SlotIndex End = getMBBEndIdx(MBB);
1351 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1352 if (VNI->kills[j].isPHI())
1355 SlotIndex KillIdx = VNI->kills[j];
1356 if (KillIdx > Idx && KillIdx < End)
1362 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1363 /// during spilling.
1365 struct RewriteInfo {
1370 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1371 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1374 struct RewriteInfoCompare {
1375 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1376 return LHS.Index < RHS.Index;
1381 void LiveIntervals::
1382 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1383 LiveInterval::Ranges::const_iterator &I,
1384 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1385 unsigned Slot, int LdSlot,
1386 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1388 const TargetRegisterClass* rc,
1389 SmallVector<int, 4> &ReMatIds,
1390 const MachineLoopInfo *loopInfo,
1391 BitVector &SpillMBBs,
1392 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1393 BitVector &RestoreMBBs,
1394 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1395 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1396 std::vector<LiveInterval*> &NewLIs) {
1397 bool AllCanFold = true;
1398 unsigned NewVReg = 0;
1399 SlotIndex start = I->start.getBaseIndex();
1400 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1402 // First collect all the def / use in this live range that will be rewritten.
1403 // Make sure they are sorted according to instruction index.
1404 std::vector<RewriteInfo> RewriteMIs;
1405 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1406 re = mri_->reg_end(); ri != re; ) {
1407 MachineInstr *MI = &*ri;
1408 MachineOperand &O = ri.getOperand();
1410 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1411 SlotIndex index = getInstructionIndex(MI);
1412 if (index < start || index >= end)
1416 // Must be defined by an implicit def. It should not be spilled. Note,
1417 // this is for correctness reason. e.g.
1418 // 8 %reg1024<def> = IMPLICIT_DEF
1419 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1420 // The live range [12, 14) are not part of the r1024 live interval since
1421 // it's defined by an implicit def. It will not conflicts with live
1422 // interval of r1025. Now suppose both registers are spilled, you can
1423 // easily see a situation where both registers are reloaded before
1424 // the INSERT_SUBREG and both target registers that would overlap.
1426 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1428 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1430 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1431 // Now rewrite the defs and uses.
1432 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1433 RewriteInfo &rwi = RewriteMIs[i];
1435 SlotIndex index = rwi.Index;
1436 bool MIHasUse = rwi.HasUse;
1437 bool MIHasDef = rwi.HasDef;
1438 MachineInstr *MI = rwi.MI;
1439 // If MI def and/or use the same register multiple times, then there
1440 // are multiple entries.
1441 unsigned NumUses = MIHasUse;
1442 while (i != e && RewriteMIs[i].MI == MI) {
1443 assert(RewriteMIs[i].Index == index);
1444 bool isUse = RewriteMIs[i].HasUse;
1445 if (isUse) ++NumUses;
1447 MIHasDef |= RewriteMIs[i].HasDef;
1450 MachineBasicBlock *MBB = MI->getParent();
1452 if (ImpUse && MI != ReMatDefMI) {
1453 // Re-matting an instruction with virtual register use. Update the
1454 // register interval's spill weight to HUGE_VALF to prevent it from
1456 LiveInterval &ImpLi = getInterval(ImpUse);
1457 ImpLi.weight = HUGE_VALF;
1460 unsigned MBBId = MBB->getNumber();
1461 unsigned ThisVReg = 0;
1463 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1464 if (NVI != MBBVRegsMap.end()) {
1465 ThisVReg = NVI->second;
1472 // It's better to start a new interval to avoid artifically
1473 // extend the new interval.
1474 if (MIHasDef && !MIHasUse) {
1475 MBBVRegsMap.erase(MBB->getNumber());
1481 bool IsNew = ThisVReg == 0;
1483 // This ends the previous live interval. If all of its def / use
1484 // can be folded, give it a low spill weight.
1485 if (NewVReg && TrySplit && AllCanFold) {
1486 LiveInterval &nI = getOrCreateInterval(NewVReg);
1493 bool HasDef = false;
1494 bool HasUse = false;
1495 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1496 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1497 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1498 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1499 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1500 if (!HasDef && !HasUse)
1503 AllCanFold &= CanFold;
1505 // Update weight of spill interval.
1506 LiveInterval &nI = getOrCreateInterval(NewVReg);
1508 // The spill weight is now infinity as it cannot be spilled again.
1509 nI.weight = HUGE_VALF;
1513 // Keep track of the last def and first use in each MBB.
1515 if (MI != ReMatOrigDefMI || !CanDelete) {
1516 bool HasKill = false;
1518 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1520 // If this is a two-address code, then this index starts a new VNInfo.
1521 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1523 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1525 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1526 SpillIdxes.find(MBBId);
1528 if (SII == SpillIdxes.end()) {
1529 std::vector<SRInfo> S;
1530 S.push_back(SRInfo(index, NewVReg, true));
1531 SpillIdxes.insert(std::make_pair(MBBId, S));
1532 } else if (SII->second.back().vreg != NewVReg) {
1533 SII->second.push_back(SRInfo(index, NewVReg, true));
1534 } else if (index > SII->second.back().index) {
1535 // If there is an earlier def and this is a two-address
1536 // instruction, then it's not possible to fold the store (which
1537 // would also fold the load).
1538 SRInfo &Info = SII->second.back();
1540 Info.canFold = !HasUse;
1542 SpillMBBs.set(MBBId);
1543 } else if (SII != SpillIdxes.end() &&
1544 SII->second.back().vreg == NewVReg &&
1545 index > SII->second.back().index) {
1546 // There is an earlier def that's not killed (must be two-address).
1547 // The spill is no longer needed.
1548 SII->second.pop_back();
1549 if (SII->second.empty()) {
1550 SpillIdxes.erase(MBBId);
1551 SpillMBBs.reset(MBBId);
1558 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1559 SpillIdxes.find(MBBId);
1560 if (SII != SpillIdxes.end() &&
1561 SII->second.back().vreg == NewVReg &&
1562 index > SII->second.back().index)
1563 // Use(s) following the last def, it's not safe to fold the spill.
1564 SII->second.back().canFold = false;
1565 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1566 RestoreIdxes.find(MBBId);
1567 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1568 // If we are splitting live intervals, only fold if it's the first
1569 // use and there isn't another use later in the MBB.
1570 RII->second.back().canFold = false;
1572 // Only need a reload if there isn't an earlier def / use.
1573 if (RII == RestoreIdxes.end()) {
1574 std::vector<SRInfo> Infos;
1575 Infos.push_back(SRInfo(index, NewVReg, true));
1576 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1578 RII->second.push_back(SRInfo(index, NewVReg, true));
1580 RestoreMBBs.set(MBBId);
1584 // Update spill weight.
1585 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1586 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1589 if (NewVReg && TrySplit && AllCanFold) {
1590 // If all of its def / use can be folded, give it a low spill weight.
1591 LiveInterval &nI = getOrCreateInterval(NewVReg);
1596 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1597 unsigned vr, BitVector &RestoreMBBs,
1598 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1599 if (!RestoreMBBs[Id])
1601 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1602 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1603 if (Restores[i].index == index &&
1604 Restores[i].vreg == vr &&
1605 Restores[i].canFold)
1610 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1611 unsigned vr, BitVector &RestoreMBBs,
1612 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1613 if (!RestoreMBBs[Id])
1615 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1616 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1617 if (Restores[i].index == index && Restores[i].vreg)
1618 Restores[i].index = SlotIndex();
1621 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1622 /// spilled and create empty intervals for their uses.
1624 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1625 const TargetRegisterClass* rc,
1626 std::vector<LiveInterval*> &NewLIs) {
1627 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1628 re = mri_->reg_end(); ri != re; ) {
1629 MachineOperand &O = ri.getOperand();
1630 MachineInstr *MI = &*ri;
1633 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1634 "Register def was not rewritten?");
1635 RemoveMachineInstrFromMaps(MI);
1636 vrm.RemoveMachineInstrFromMaps(MI);
1637 MI->eraseFromParent();
1639 // This must be an use of an implicit_def so it's not part of the live
1640 // interval. Create a new empty live interval for it.
1641 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1642 unsigned NewVReg = mri_->createVirtualRegister(rc);
1644 vrm.setIsImplicitlyDefined(NewVReg);
1645 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1646 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1647 MachineOperand &MO = MI->getOperand(i);
1648 if (MO.isReg() && MO.getReg() == li.reg) {
1657 std::vector<LiveInterval*> LiveIntervals::
1658 addIntervalsForSpillsFast(const LiveInterval &li,
1659 const MachineLoopInfo *loopInfo,
1661 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1663 std::vector<LiveInterval*> added;
1665 assert(li.weight != HUGE_VALF &&
1666 "attempt to spill already spilled interval!");
1669 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1674 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1676 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1677 while (RI != mri_->reg_end()) {
1678 MachineInstr* MI = &*RI;
1680 SmallVector<unsigned, 2> Indices;
1681 bool HasUse = false;
1682 bool HasDef = false;
1684 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1685 MachineOperand& mop = MI->getOperand(i);
1686 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1688 HasUse |= MI->getOperand(i).isUse();
1689 HasDef |= MI->getOperand(i).isDef();
1691 Indices.push_back(i);
1694 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1695 Indices, true, slot, li.reg)) {
1696 unsigned NewVReg = mri_->createVirtualRegister(rc);
1698 vrm.assignVirt2StackSlot(NewVReg, slot);
1700 // create a new register for this spill
1701 LiveInterval &nI = getOrCreateInterval(NewVReg);
1703 // the spill weight is now infinity as it
1704 // cannot be spilled again
1705 nI.weight = HUGE_VALF;
1707 // Rewrite register operands to use the new vreg.
1708 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1709 E = Indices.end(); I != E; ++I) {
1710 MI->getOperand(*I).setReg(NewVReg);
1712 if (MI->getOperand(*I).isUse())
1713 MI->getOperand(*I).setIsKill(true);
1716 // Fill in the new live interval.
1717 SlotIndex index = getInstructionIndex(MI);
1719 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1720 nI.getNextValue(SlotIndex(), 0, false,
1721 getVNInfoAllocator()));
1722 DEBUG(errs() << " +" << LR);
1724 vrm.addRestorePoint(NewVReg, MI);
1727 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1728 nI.getNextValue(SlotIndex(), 0, false,
1729 getVNInfoAllocator()));
1730 DEBUG(errs() << " +" << LR);
1732 vrm.addSpillPoint(NewVReg, true, MI);
1735 added.push_back(&nI);
1738 errs() << "\t\t\t\tadded new interval: ";
1745 RI = mri_->reg_begin(li.reg);
1751 std::vector<LiveInterval*> LiveIntervals::
1752 addIntervalsForSpills(const LiveInterval &li,
1753 SmallVectorImpl<LiveInterval*> &SpillIs,
1754 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1756 if (EnableFastSpilling)
1757 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1759 assert(li.weight != HUGE_VALF &&
1760 "attempt to spill already spilled interval!");
1763 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1764 li.print(errs(), tri_);
1768 // Each bit specify whether a spill is required in the MBB.
1769 BitVector SpillMBBs(mf_->getNumBlockIDs());
1770 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1771 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1772 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1773 DenseMap<unsigned,unsigned> MBBVRegsMap;
1774 std::vector<LiveInterval*> NewLIs;
1775 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1777 unsigned NumValNums = li.getNumValNums();
1778 SmallVector<MachineInstr*, 4> ReMatDefs;
1779 ReMatDefs.resize(NumValNums, NULL);
1780 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1781 ReMatOrigDefs.resize(NumValNums, NULL);
1782 SmallVector<int, 4> ReMatIds;
1783 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1784 BitVector ReMatDelete(NumValNums);
1785 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1787 // Spilling a split live interval. It cannot be split any further. Also,
1788 // it's also guaranteed to be a single val# / range interval.
1789 if (vrm.getPreSplitReg(li.reg)) {
1790 vrm.setIsSplitFromReg(li.reg, 0);
1791 // Unset the split kill marker on the last use.
1792 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1793 if (KillIdx != SlotIndex()) {
1794 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1795 assert(KillMI && "Last use disappeared?");
1796 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1797 assert(KillOp != -1 && "Last use disappeared?");
1798 KillMI->getOperand(KillOp).setIsKill(false);
1800 vrm.removeKillPoint(li.reg);
1801 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1802 Slot = vrm.getStackSlot(li.reg);
1803 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1804 MachineInstr *ReMatDefMI = DefIsReMat ?
1805 vrm.getReMaterializedMI(li.reg) : NULL;
1807 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1808 bool isLoad = isLoadSS ||
1809 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1810 bool IsFirstRange = true;
1811 for (LiveInterval::Ranges::const_iterator
1812 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1813 // If this is a split live interval with multiple ranges, it means there
1814 // are two-address instructions that re-defined the value. Only the
1815 // first def can be rematerialized!
1817 // Note ReMatOrigDefMI has already been deleted.
1818 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1819 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1820 false, vrm, rc, ReMatIds, loopInfo,
1821 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1822 MBBVRegsMap, NewLIs);
1824 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1825 Slot, 0, false, false, false,
1826 false, vrm, rc, ReMatIds, loopInfo,
1827 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1828 MBBVRegsMap, NewLIs);
1830 IsFirstRange = false;
1833 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1837 bool TrySplit = !intervalIsInOneMBB(li);
1840 bool NeedStackSlot = false;
1841 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1843 const VNInfo *VNI = *i;
1844 unsigned VN = VNI->id;
1845 if (VNI->isUnused())
1846 continue; // Dead val#.
1847 // Is the def for the val# rematerializable?
1848 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1849 ? getInstructionFromIndex(VNI->def) : 0;
1851 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1852 // Remember how to remat the def of this val#.
1853 ReMatOrigDefs[VN] = ReMatDefMI;
1854 // Original def may be modified so we have to make a copy here.
1855 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1856 CloneMIs.push_back(Clone);
1857 ReMatDefs[VN] = Clone;
1859 bool CanDelete = true;
1860 if (VNI->hasPHIKill()) {
1861 // A kill is a phi node, not all of its uses can be rematerialized.
1862 // It must not be deleted.
1864 // Need a stack slot if there is any live range where uses cannot be
1866 NeedStackSlot = true;
1869 ReMatDelete.set(VN);
1871 // Need a stack slot if there is any live range where uses cannot be
1873 NeedStackSlot = true;
1877 // One stack slot per live interval.
1878 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1879 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1880 Slot = vrm.assignVirt2StackSlot(li.reg);
1882 // This case only occurs when the prealloc splitter has already assigned
1883 // a stack slot to this vreg.
1885 Slot = vrm.getStackSlot(li.reg);
1888 // Create new intervals and rewrite defs and uses.
1889 for (LiveInterval::Ranges::const_iterator
1890 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1891 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1892 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1893 bool DefIsReMat = ReMatDefMI != NULL;
1894 bool CanDelete = ReMatDelete[I->valno->id];
1896 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1897 bool isLoad = isLoadSS ||
1898 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1899 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1900 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1901 CanDelete, vrm, rc, ReMatIds, loopInfo,
1902 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1903 MBBVRegsMap, NewLIs);
1906 // Insert spills / restores if we are splitting.
1908 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1912 SmallPtrSet<LiveInterval*, 4> AddedKill;
1913 SmallVector<unsigned, 2> Ops;
1914 if (NeedStackSlot) {
1915 int Id = SpillMBBs.find_first();
1917 std::vector<SRInfo> &spills = SpillIdxes[Id];
1918 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1919 SlotIndex index = spills[i].index;
1920 unsigned VReg = spills[i].vreg;
1921 LiveInterval &nI = getOrCreateInterval(VReg);
1922 bool isReMat = vrm.isReMaterialized(VReg);
1923 MachineInstr *MI = getInstructionFromIndex(index);
1924 bool CanFold = false;
1925 bool FoundUse = false;
1927 if (spills[i].canFold) {
1929 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1930 MachineOperand &MO = MI->getOperand(j);
1931 if (!MO.isReg() || MO.getReg() != VReg)
1938 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1939 RestoreMBBs, RestoreIdxes))) {
1940 // MI has two-address uses of the same register. If the use
1941 // isn't the first and only use in the BB, then we can't fold
1942 // it. FIXME: Move this to rewriteInstructionsForSpills.
1949 // Fold the store into the def if possible.
1950 bool Folded = false;
1951 if (CanFold && !Ops.empty()) {
1952 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1955 // Also folded uses, do not issue a load.
1956 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1957 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1959 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1963 // Otherwise tell the spiller to issue a spill.
1965 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1966 bool isKill = LR->end == index.getStoreIndex();
1967 if (!MI->registerDefIsDead(nI.reg))
1968 // No need to spill a dead def.
1969 vrm.addSpillPoint(VReg, isKill, MI);
1971 AddedKill.insert(&nI);
1974 Id = SpillMBBs.find_next(Id);
1978 int Id = RestoreMBBs.find_first();
1980 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1981 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1982 SlotIndex index = restores[i].index;
1983 if (index == SlotIndex())
1985 unsigned VReg = restores[i].vreg;
1986 LiveInterval &nI = getOrCreateInterval(VReg);
1987 bool isReMat = vrm.isReMaterialized(VReg);
1988 MachineInstr *MI = getInstructionFromIndex(index);
1989 bool CanFold = false;
1991 if (restores[i].canFold) {
1993 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1994 MachineOperand &MO = MI->getOperand(j);
1995 if (!MO.isReg() || MO.getReg() != VReg)
1999 // If this restore were to be folded, it would have been folded
2008 // Fold the load into the use if possible.
2009 bool Folded = false;
2010 if (CanFold && !Ops.empty()) {
2012 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2014 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2016 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2017 // If the rematerializable def is a load, also try to fold it.
2018 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2019 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2020 Ops, isLoadSS, LdSlot, VReg);
2022 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2024 // Re-matting an instruction with virtual register use. Add the
2025 // register as an implicit use on the use MI and update the register
2026 // interval's spill weight to HUGE_VALF to prevent it from being
2028 LiveInterval &ImpLi = getInterval(ImpUse);
2029 ImpLi.weight = HUGE_VALF;
2030 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2035 // If folding is not possible / failed, then tell the spiller to issue a
2036 // load / rematerialization for us.
2038 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2040 vrm.addRestorePoint(VReg, MI);
2042 Id = RestoreMBBs.find_next(Id);
2045 // Finalize intervals: add kills, finalize spill weights, and filter out
2047 std::vector<LiveInterval*> RetNewLIs;
2048 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2049 LiveInterval *LI = NewLIs[i];
2051 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2052 if (!AddedKill.count(LI)) {
2053 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2054 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2055 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2056 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2057 assert(UseIdx != -1);
2058 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2059 LastUse->getOperand(UseIdx).setIsKill();
2060 vrm.addKillPoint(LI->reg, LastUseIdx);
2063 RetNewLIs.push_back(LI);
2067 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2071 /// hasAllocatableSuperReg - Return true if the specified physical register has
2072 /// any super register that's allocatable.
2073 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2074 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2075 if (allocatableRegs_[*AS] && hasInterval(*AS))
2080 /// getRepresentativeReg - Find the largest super register of the specified
2081 /// physical register.
2082 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2083 // Find the largest super-register that is allocatable.
2084 unsigned BestReg = Reg;
2085 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2086 unsigned SuperReg = *AS;
2087 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2095 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2096 /// specified interval that conflicts with the specified physical register.
2097 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2098 unsigned PhysReg) const {
2099 unsigned NumConflicts = 0;
2100 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2101 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2102 E = mri_->reg_end(); I != E; ++I) {
2103 MachineOperand &O = I.getOperand();
2104 MachineInstr *MI = O.getParent();
2105 SlotIndex Index = getInstructionIndex(MI);
2106 if (pli.liveAt(Index))
2109 return NumConflicts;
2112 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2113 /// around all defs and uses of the specified interval. Return true if it
2114 /// was able to cut its interval.
2115 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2116 unsigned PhysReg, VirtRegMap &vrm) {
2117 unsigned SpillReg = getRepresentativeReg(PhysReg);
2119 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2120 // If there are registers which alias PhysReg, but which are not a
2121 // sub-register of the chosen representative super register. Assert
2122 // since we can't handle it yet.
2123 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2124 tri_->isSuperRegister(*AS, SpillReg));
2127 SmallVector<unsigned, 4> PRegs;
2128 if (hasInterval(SpillReg))
2129 PRegs.push_back(SpillReg);
2131 SmallSet<unsigned, 4> Added;
2132 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2133 if (Added.insert(*AS) && hasInterval(*AS)) {
2134 PRegs.push_back(*AS);
2135 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2140 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2141 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2142 E = mri_->reg_end(); I != E; ++I) {
2143 MachineOperand &O = I.getOperand();
2144 MachineInstr *MI = O.getParent();
2145 if (SeenMIs.count(MI))
2148 SlotIndex Index = getInstructionIndex(MI);
2149 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2150 unsigned PReg = PRegs[i];
2151 LiveInterval &pli = getInterval(PReg);
2152 if (!pli.liveAt(Index))
2154 vrm.addEmergencySpill(PReg, MI);
2155 SlotIndex StartIdx = Index.getLoadIndex();
2156 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2157 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2158 pli.removeRange(StartIdx, EndIdx);
2162 raw_string_ostream Msg(msg);
2163 Msg << "Ran out of registers during register allocation!";
2164 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2165 Msg << "\nPlease check your inline asm statement for invalid "
2166 << "constraints:\n";
2167 MI->print(Msg, tm_);
2169 llvm_report_error(Msg.str());
2171 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2172 if (!hasInterval(*AS))
2174 LiveInterval &spli = getInterval(*AS);
2175 if (spli.liveAt(Index))
2176 spli.removeRange(Index.getLoadIndex(),
2177 Index.getNextIndex().getBaseIndex());
2184 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2185 MachineInstr* startInst) {
2186 LiveInterval& Interval = getOrCreateInterval(reg);
2187 VNInfo* VN = Interval.getNextValue(
2188 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2189 startInst, true, getVNInfoAllocator());
2190 VN->setHasPHIKill(true);
2191 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2193 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2194 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2195 Interval.addRange(LR);