1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 static cl::opt<bool> EarlyCoalescing("early-coalescing",
57 cl::init(false), cl::Hidden);
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 AU.addPreserved<ProcessImplicitDefs>();
86 AU.addRequired<ProcessImplicitDefs>();
87 AU.addPreserved<SlotIndexes>();
88 AU.addRequiredTransitive<SlotIndexes>();
89 MachineFunctionPass::getAnalysisUsage(AU);
92 void LiveIntervals::releaseMemory() {
93 // Free the live intervals themselves.
94 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
95 E = r2iMap_.end(); I != E; ++I)
99 phiJoinCopies.clear();
101 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
124 performEarlyCoalescing();
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
146 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
147 mbbi != mbbe; ++mbbi) {
148 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
149 for (MachineBasicBlock::iterator mii = mbbi->begin(),
150 mie = mbbi->end(); mii != mie; ++mii) {
151 OS << getInstructionIndex(mii) << '\t' << *mii;
156 void LiveIntervals::dumpInstrs() const {
160 /// conflictsWithPhysRegDef - Returns true if the specified register
161 /// is defined during the duration of the specified interval.
162 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
163 VirtRegMap &vrm, unsigned reg) {
164 for (LiveInterval::Ranges::const_iterator
165 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
166 for (SlotIndex index = I->start.getBaseIndex(),
167 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
169 index = index.getNextIndex()) {
170 // skip deleted instructions
171 while (index != end && !getInstructionFromIndex(index))
172 index = index.getNextIndex();
173 if (index == end) break;
175 MachineInstr *MI = getInstructionFromIndex(index);
176 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
177 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
178 if (SrcReg == li.reg || DstReg == li.reg)
180 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
181 MachineOperand& mop = MI->getOperand(i);
184 unsigned PhysReg = mop.getReg();
185 if (PhysReg == 0 || PhysReg == li.reg)
187 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
188 if (!vrm.hasPhys(PhysReg))
190 PhysReg = vrm.getPhys(PhysReg);
192 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
201 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
202 /// it can check use as well.
203 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
204 unsigned Reg, bool CheckUse,
205 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
206 for (LiveInterval::Ranges::const_iterator
207 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
208 for (SlotIndex index = I->start.getBaseIndex(),
209 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
211 index = index.getNextIndex()) {
212 // Skip deleted instructions.
213 MachineInstr *MI = 0;
214 while (index != end) {
215 MI = getInstructionFromIndex(index);
218 index = index.getNextIndex();
220 if (index == end) break;
222 if (JoinedCopies.count(MI))
224 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
225 MachineOperand& MO = MI->getOperand(i);
228 if (MO.isUse() && !CheckUse)
230 unsigned PhysReg = MO.getReg();
231 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
233 if (tri_->isSubRegister(Reg, PhysReg))
243 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
244 if (TargetRegisterInfo::isPhysicalRegister(reg))
245 errs() << tri_->getName(reg);
247 errs() << "%reg" << reg;
251 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
252 MachineBasicBlock::iterator mi,
256 LiveInterval &interval) {
258 errs() << "\t\tregister: ";
259 printRegName(interval.reg, tri_);
262 // Virtual registers may be defined multiple times (due to phi
263 // elimination and 2-addr elimination). Much of what we do only has to be
264 // done once for the vreg. We use an empty interval to detect the first
265 // time we see a vreg.
266 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
267 if (interval.empty()) {
268 // Get the Idx of the defining instructions.
269 SlotIndex defIndex = MIIdx.getDefIndex();
270 // Earlyclobbers move back one, so that they overlap the live range
272 if (MO.isEarlyClobber())
273 defIndex = MIIdx.getUseIndex();
275 MachineInstr *CopyMI = NULL;
276 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
277 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
278 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
279 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
280 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
282 // Earlyclobbers move back one.
283 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
285 assert(ValNo->id == 0 && "First value in interval is not 0?");
287 // Loop over all of the blocks that the vreg is defined in. There are
288 // two cases we have to handle here. The most common case is a vreg
289 // whose lifetime is contained within a basic block. In this case there
290 // will be a single kill, in MBB, which comes after the definition.
291 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
292 // FIXME: what about dead vars?
294 if (vi.Kills[0] != mi)
295 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
297 killIdx = defIndex.getStoreIndex();
299 // If the kill happens after the definition, we have an intra-block
301 if (killIdx > defIndex) {
302 assert(vi.AliveBlocks.empty() &&
303 "Shouldn't be alive across any blocks!");
304 LiveRange LR(defIndex, killIdx, ValNo);
305 interval.addRange(LR);
306 DEBUG(errs() << " +" << LR << "\n");
307 ValNo->addKill(killIdx);
312 // The other case we handle is when a virtual register lives to the end
313 // of the defining block, potentially live across some blocks, then is
314 // live into some number of blocks, but gets killed. Start by adding a
315 // range that goes from this definition to the end of the defining block.
316 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
318 DEBUG(errs() << " +" << NewLR);
319 interval.addRange(NewLR);
321 // Iterate over all of the blocks that the variable is completely
322 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
324 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
325 E = vi.AliveBlocks.end(); I != E; ++I) {
327 getMBBStartIdx(mf_->getBlockNumbered(*I)),
328 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
330 interval.addRange(LR);
331 DEBUG(errs() << " +" << LR);
334 // Finally, this virtual register is live from the start of any killing
335 // block to the 'use' slot of the killing instruction.
336 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
337 MachineInstr *Kill = vi.Kills[i];
339 getInstructionIndex(Kill).getDefIndex();
340 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
341 interval.addRange(LR);
342 ValNo->addKill(killIdx);
343 DEBUG(errs() << " +" << LR);
347 // If this is the second time we see a virtual register definition, it
348 // must be due to phi elimination or two addr elimination. If this is
349 // the result of two address elimination, then the vreg is one of the
350 // def-and-use register operand.
351 if (mi->isRegTiedToUseOperand(MOIdx)) {
352 // If this is a two-address definition, then we have already processed
353 // the live range. The only problem is that we didn't realize there
354 // are actually two values in the live interval. Because of this we
355 // need to take the LiveRegion that defines this register and split it
357 assert(interval.containsOneValue());
358 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
359 SlotIndex RedefIndex = MIIdx.getDefIndex();
360 if (MO.isEarlyClobber())
361 RedefIndex = MIIdx.getUseIndex();
363 const LiveRange *OldLR =
364 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
365 VNInfo *OldValNo = OldLR->valno;
367 // Delete the initial value, which should be short and continuous,
368 // because the 2-addr copy must be in the same MBB as the redef.
369 interval.removeRange(DefIndex, RedefIndex);
371 // Two-address vregs should always only be redefined once. This means
372 // that at this point, there should be exactly one value number in it.
373 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
375 // The new value number (#1) is defined by the instruction we claimed
377 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
378 false, // update at *
380 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
382 // Value#0 is now defined by the 2-addr instruction.
383 OldValNo->def = RedefIndex;
384 OldValNo->setCopy(0);
385 if (MO.isEarlyClobber())
386 OldValNo->setHasRedefByEC(true);
388 // Add the new live interval which replaces the range for the input copy.
389 LiveRange LR(DefIndex, RedefIndex, ValNo);
390 DEBUG(errs() << " replace range with " << LR);
391 interval.addRange(LR);
392 ValNo->addKill(RedefIndex);
394 // If this redefinition is dead, we need to add a dummy unit live
395 // range covering the def slot.
397 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
401 errs() << " RESULT: ";
402 interval.print(errs(), tri_);
405 // Otherwise, this must be because of phi elimination. If this is the
406 // first redefinition of the vreg that we have seen, go back and change
407 // the live range in the PHI block to be a different value number.
408 if (interval.containsOneValue()) {
409 // Remove the old range that we now know has an incorrect number.
410 VNInfo *VNI = interval.getValNumInfo(0);
411 MachineInstr *Killer = vi.Kills[0];
412 phiJoinCopies.push_back(Killer);
413 SlotIndex Start = getMBBStartIdx(Killer->getParent());
414 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
416 errs() << " Removing [" << Start << "," << End << "] from: ";
417 interval.print(errs(), tri_);
420 interval.removeRange(Start, End);
421 assert(interval.ranges.size() == 1 &&
422 "Newly discovered PHI interval has >1 ranges.");
423 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
424 VNI->addKill(indexes_->getTerminatorGap(killMBB));
425 VNI->setHasPHIKill(true);
427 errs() << " RESULT: ";
428 interval.print(errs(), tri_);
431 // Replace the interval with one of a NEW value number. Note that this
432 // value number isn't actually defined by an instruction, weird huh? :)
433 LiveRange LR(Start, End,
434 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
435 0, false, VNInfoAllocator));
436 LR.valno->setIsPHIDef(true);
437 DEBUG(errs() << " replace range with " << LR);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
441 errs() << " RESULT: ";
442 interval.print(errs(), tri_);
446 // In the case of PHI elimination, each variable definition is only
447 // live until the end of the block. We've already taken care of the
448 // rest of the live range.
449 SlotIndex defIndex = MIIdx.getDefIndex();
450 if (MO.isEarlyClobber())
451 defIndex = MIIdx.getUseIndex();
454 MachineInstr *CopyMI = NULL;
455 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
456 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
458 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
459 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
461 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
463 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
464 LiveRange LR(defIndex, killIndex, ValNo);
465 interval.addRange(LR);
466 ValNo->addKill(indexes_->getTerminatorGap(mbb));
467 ValNo->setHasPHIKill(true);
468 DEBUG(errs() << " +" << LR);
472 DEBUG(errs() << '\n');
475 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
476 MachineBasicBlock::iterator mi,
479 LiveInterval &interval,
480 MachineInstr *CopyMI) {
481 // A physical register cannot be live across basic block, so its
482 // lifetime must end somewhere in its defining basic block.
484 errs() << "\t\tregister: ";
485 printRegName(interval.reg, tri_);
488 SlotIndex baseIndex = MIIdx;
489 SlotIndex start = baseIndex.getDefIndex();
490 // Earlyclobbers move back one.
491 if (MO.isEarlyClobber())
492 start = MIIdx.getUseIndex();
493 SlotIndex end = start;
495 // If it is not used after definition, it is considered dead at
496 // the instruction defining it. Hence its interval is:
497 // [defSlot(def), defSlot(def)+1)
498 // For earlyclobbers, the defSlot was pushed back one; the extra
499 // advance below compensates.
501 DEBUG(errs() << " dead");
502 end = start.getStoreIndex();
506 // If it is not dead on definition, it must be killed by a
507 // subsequent instruction. Hence its interval is:
508 // [defSlot(def), useSlot(kill)+1)
509 baseIndex = baseIndex.getNextIndex();
510 while (++mi != MBB->end()) {
512 if (getInstructionFromIndex(baseIndex) == 0)
513 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
515 if (mi->killsRegister(interval.reg, tri_)) {
516 DEBUG(errs() << " killed");
517 end = baseIndex.getDefIndex();
520 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
522 if (mi->isRegTiedToUseOperand(DefIdx)) {
523 // Two-address instruction.
524 end = baseIndex.getDefIndex();
525 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
526 "Two address instruction is an early clobber?");
528 // Another instruction redefines the register before it is ever read.
529 // Then the register is essentially dead at the instruction that defines
530 // it. Hence its interval is:
531 // [defSlot(def), defSlot(def)+1)
532 DEBUG(errs() << " dead");
533 end = start.getStoreIndex();
539 baseIndex = baseIndex.getNextIndex();
542 // The only case we should have a dead physreg here without a killing or
543 // instruction where we know it's dead is if it is live-in to the function
544 // and never used. Another possible case is the implicit use of the
545 // physical register has been deleted by two-address pass.
546 end = start.getStoreIndex();
549 assert(start < end && "did not find end of interval?");
551 // Already exists? Extend old live interval.
552 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
553 bool Extend = OldLR != interval.end();
554 VNInfo *ValNo = Extend
555 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
556 if (MO.isEarlyClobber() && Extend)
557 ValNo->setHasRedefByEC(true);
558 LiveRange LR(start, end, ValNo);
559 interval.addRange(LR);
560 LR.valno->addKill(end);
561 DEBUG(errs() << " +" << LR << '\n');
564 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
565 MachineBasicBlock::iterator MI,
569 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
570 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
571 getOrCreateInterval(MO.getReg()));
572 else if (allocatableRegs_[MO.getReg()]) {
573 MachineInstr *CopyMI = NULL;
574 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
575 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
576 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
577 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
578 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
580 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
581 getOrCreateInterval(MO.getReg()), CopyMI);
582 // Def of a register also defines its sub-registers.
583 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
584 // If MI also modifies the sub-register explicitly, avoid processing it
585 // more than once. Do not pass in TRI here so it checks for exact match.
586 if (!MI->modifiesRegister(*AS))
587 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
588 getOrCreateInterval(*AS), 0);
592 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
594 LiveInterval &interval, bool isAlias) {
596 errs() << "\t\tlivein register: ";
597 printRegName(interval.reg, tri_);
600 // Look for kills, if it reaches a def before it's killed, then it shouldn't
601 // be considered a livein.
602 MachineBasicBlock::iterator mi = MBB->begin();
603 SlotIndex baseIndex = MIIdx;
604 SlotIndex start = baseIndex;
605 if (getInstructionFromIndex(baseIndex) == 0)
606 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
608 SlotIndex end = baseIndex;
609 bool SeenDefUse = false;
611 while (mi != MBB->end()) {
612 if (mi->killsRegister(interval.reg, tri_)) {
613 DEBUG(errs() << " killed");
614 end = baseIndex.getDefIndex();
617 } else if (mi->modifiesRegister(interval.reg, tri_)) {
618 // Another instruction redefines the register before it is ever read.
619 // Then the register is essentially dead at the instruction that defines
620 // it. Hence its interval is:
621 // [defSlot(def), defSlot(def)+1)
622 DEBUG(errs() << " dead");
623 end = start.getStoreIndex();
629 if (mi != MBB->end()) {
630 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
634 // Live-in register might not be used at all.
637 DEBUG(errs() << " dead");
638 end = MIIdx.getStoreIndex();
640 DEBUG(errs() << " live through");
646 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
647 0, false, VNInfoAllocator);
648 vni->setIsPHIDef(true);
649 LiveRange LR(start, end, vni);
651 interval.addRange(LR);
652 LR.valno->addKill(end);
653 DEBUG(errs() << " +" << LR << '\n');
657 isSafeAndProfitableToCoalesce(LiveInterval &DstInt,
658 LiveInterval &SrcInt,
659 SmallVector<MachineInstr*,16> &IdentCopies,
660 SmallVector<MachineInstr*,16> &OtherCopies) {
661 unsigned NumIdent = 0;
662 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
663 re = mri_->def_end(); ri != re; ++ri) {
664 MachineInstr *MI = &*ri;
665 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
666 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
668 if (SrcReg != DstInt.reg) {
669 // Non-identity copy - we cannot handle overlapping intervals
670 if (DstInt.liveAt(getInstructionIndex(MI)))
672 OtherCopies.push_back(MI);
674 IdentCopies.push_back(MI);
679 return IdentCopies.size() > OtherCopies.size();
682 void LiveIntervals::performEarlyCoalescing() {
683 if (!EarlyCoalescing)
686 /// Perform early coalescing: eliminate copies which feed into phi joins
687 /// and whose sources are defined by the phi joins.
688 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
689 MachineInstr *Join = phiJoinCopies[i];
690 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
693 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
694 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
696 assert(isMove && "PHI join instruction must be a move!");
701 LiveInterval &DstInt = getInterval(PHIDst);
702 LiveInterval &SrcInt = getInterval(PHISrc);
703 SmallVector<MachineInstr*, 16> IdentCopies;
704 SmallVector<MachineInstr*, 16> OtherCopies;
705 if (!isSafeAndProfitableToCoalesce(DstInt, SrcInt,
706 IdentCopies, OtherCopies))
709 DEBUG(errs() << "PHI Join: " << *Join);
710 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
711 assert(std::distance(mri_->use_begin(PHISrc), mri_->use_end()) == 1 &&
712 "PHI join src should not be used elsewhere");
713 VNInfo *VNI = DstInt.getValNumInfo(0);
715 // Change the non-identity copies to directly target the phi destination.
716 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
717 MachineInstr *PHICopy = OtherCopies[i];
718 SlotIndex MIIndex = getInstructionIndex(PHICopy);
719 DEBUG(errs() << "Moving: " << MIIndex << ' ' << *PHICopy);
720 SlotIndex DefIndex = MIIndex.getDefIndex();
721 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
722 SlotIndex StartIndex = SLR->start;
723 SlotIndex EndIndex = SLR->end;
725 // Delete val# defined by the now identity copy and add the range from
726 // beginning of the mbb to the end of the range.
727 SrcInt.removeValNo(SLR->valno);
728 DEBUG(errs() << " added range [" << StartIndex << ','
729 << EndIndex << "] to reg" << DstInt.reg << '\n');
730 assert (!DstInt.liveAt(StartIndex) && "Cannot coalesce when dst live!");
731 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
733 NewVNI->setHasPHIKill(true);
734 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
735 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
736 MachineOperand &MO = PHICopy->getOperand(j);
737 if (!MO.isReg() || MO.getReg() != PHISrc)
743 // Now let's eliminate all the would-be identity copies.
744 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
745 MachineInstr *PHICopy = IdentCopies[i];
746 DEBUG(errs() << "Coalescing: " << *PHICopy);
748 SlotIndex MIIndex = getInstructionIndex(PHICopy);
749 SlotIndex DefIndex = MIIndex.getDefIndex();
750 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
751 SlotIndex StartIndex = SLR->start;
752 SlotIndex EndIndex = SLR->end;
754 // Delete val# defined by the now identity copy and add the range from
755 // beginning of the mbb to the end of the range.
756 SrcInt.removeValNo(SLR->valno);
757 RemoveMachineInstrFromMaps(PHICopy);
758 PHICopy->eraseFromParent();
759 DEBUG(errs() << " added range [" << StartIndex << ','
760 << EndIndex << "] to reg" << DstInt.reg << '\n');
761 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
764 // Remove the phi join and update the phi block liveness.
765 SlotIndex MIIndex = getInstructionIndex(Join);
766 SlotIndex UseIndex = MIIndex.getUseIndex();
767 SlotIndex DefIndex = MIIndex.getDefIndex();
768 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
769 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
770 DLR->valno->setCopy(0);
771 DLR->valno->setIsDefAccurate(false);
772 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
773 SrcInt.removeRange(SLR->start, SLR->end);
774 assert(SrcInt.empty());
775 removeInterval(PHISrc);
776 RemoveMachineInstrFromMaps(Join);
777 Join->eraseFromParent();
783 /// computeIntervals - computes the live intervals for virtual
784 /// registers. for some ordering of the machine instructions [1,N] a
785 /// live interval is an interval [i, j) where 1 <= i <= j < N for
786 /// which a variable is live
787 void LiveIntervals::computeIntervals() {
788 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
789 << "********** Function: "
790 << ((Value*)mf_->getFunction())->getName() << '\n');
792 SmallVector<unsigned, 8> UndefUses;
793 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
795 MachineBasicBlock *MBB = MBBI;
796 // Track the index of the current machine instr.
797 SlotIndex MIIndex = getMBBStartIdx(MBB);
798 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
800 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
802 // Create intervals for live-ins to this BB first.
803 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
804 LE = MBB->livein_end(); LI != LE; ++LI) {
805 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
806 // Multiple live-ins can alias the same register.
807 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
808 if (!hasInterval(*AS))
809 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
813 // Skip over empty initial indices.
814 if (getInstructionFromIndex(MIIndex) == 0)
815 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
817 for (; MI != miEnd; ++MI) {
818 DEBUG(errs() << MIIndex << "\t" << *MI);
821 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
822 MachineOperand &MO = MI->getOperand(i);
823 if (!MO.isReg() || !MO.getReg())
826 // handle register defs - build intervals
828 handleRegisterDef(MBB, MI, MIIndex, MO, i);
829 else if (MO.isUndef())
830 UndefUses.push_back(MO.getReg());
833 // Move to the next instr slot.
834 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
838 // Create empty intervals for registers defined by implicit_def's (except
839 // for those implicit_def that define values which are liveout of their
841 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
842 unsigned UndefReg = UndefUses[i];
843 (void)getOrCreateInterval(UndefReg);
847 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
848 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
849 return new LiveInterval(reg, Weight);
852 /// dupInterval - Duplicate a live interval. The caller is responsible for
853 /// managing the allocated memory.
854 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
855 LiveInterval *NewLI = createInterval(li->reg);
856 NewLI->Copy(*li, mri_, getVNInfoAllocator());
860 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
861 /// copy field and returns the source register that defines it.
862 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
866 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
867 // If it's extracting out of a physical register, return the sub-register.
868 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
869 if (TargetRegisterInfo::isPhysicalRegister(Reg))
870 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
872 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
873 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
874 return VNI->getCopy()->getOperand(2).getReg();
876 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
877 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
879 llvm_unreachable("Unrecognized copy instruction!");
883 //===----------------------------------------------------------------------===//
884 // Register allocator hooks.
887 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
888 /// allow one) virtual register operand, then its uses are implicitly using
889 /// the register. Returns the virtual register.
890 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
891 MachineInstr *MI) const {
893 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
894 MachineOperand &MO = MI->getOperand(i);
895 if (!MO.isReg() || !MO.isUse())
897 unsigned Reg = MO.getReg();
898 if (Reg == 0 || Reg == li.reg)
901 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
902 !allocatableRegs_[Reg])
904 // FIXME: For now, only remat MI with at most one register operand.
906 "Can't rematerialize instruction with multiple register operand!");
915 /// isValNoAvailableAt - Return true if the val# of the specified interval
916 /// which reaches the given instruction also reaches the specified use index.
917 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
918 SlotIndex UseIdx) const {
919 SlotIndex Index = getInstructionIndex(MI);
920 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
921 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
922 return UI != li.end() && UI->valno == ValNo;
925 /// isReMaterializable - Returns true if the definition MI of the specified
926 /// val# of the specified interval is re-materializable.
927 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
928 const VNInfo *ValNo, MachineInstr *MI,
929 SmallVectorImpl<LiveInterval*> &SpillIs,
934 if (!tii_->isTriviallyReMaterializable(MI, aa_))
937 // Target-specific code can mark an instruction as being rematerializable
938 // if it has one virtual reg use, though it had better be something like
939 // a PIC base register which is likely to be live everywhere.
940 unsigned ImpUse = getReMatImplicitUse(li, MI);
942 const LiveInterval &ImpLi = getInterval(ImpUse);
943 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
944 re = mri_->use_end(); ri != re; ++ri) {
945 MachineInstr *UseMI = &*ri;
946 SlotIndex UseIdx = getInstructionIndex(UseMI);
947 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
949 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
953 // If a register operand of the re-materialized instruction is going to
954 // be spilled next, then it's not legal to re-materialize this instruction.
955 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
956 if (ImpUse == SpillIs[i]->reg)
962 /// isReMaterializable - Returns true if the definition MI of the specified
963 /// val# of the specified interval is re-materializable.
964 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
965 const VNInfo *ValNo, MachineInstr *MI) {
966 SmallVector<LiveInterval*, 4> Dummy1;
968 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
971 /// isReMaterializable - Returns true if every definition of MI of every
972 /// val# of the specified interval is re-materializable.
973 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
974 SmallVectorImpl<LiveInterval*> &SpillIs,
977 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
979 const VNInfo *VNI = *i;
981 continue; // Dead val#.
982 // Is the def for the val# rematerializable?
983 if (!VNI->isDefAccurate())
985 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
986 bool DefIsLoad = false;
988 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
995 /// FilterFoldedOps - Filter out two-address use operands. Return
996 /// true if it finds any issue with the operands that ought to prevent
998 static bool FilterFoldedOps(MachineInstr *MI,
999 SmallVector<unsigned, 2> &Ops,
1001 SmallVector<unsigned, 2> &FoldOps) {
1003 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1004 unsigned OpIdx = Ops[i];
1005 MachineOperand &MO = MI->getOperand(OpIdx);
1006 // FIXME: fold subreg use.
1010 MRInfo |= (unsigned)VirtRegMap::isMod;
1012 // Filter out two-address use operand(s).
1013 if (MI->isRegTiedToDefOperand(OpIdx)) {
1014 MRInfo = VirtRegMap::isModRef;
1017 MRInfo |= (unsigned)VirtRegMap::isRef;
1019 FoldOps.push_back(OpIdx);
1025 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1026 /// slot / to reg or any rematerialized load into ith operand of specified
1027 /// MI. If it is successul, MI is updated with the newly created MI and
1029 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1030 VirtRegMap &vrm, MachineInstr *DefMI,
1032 SmallVector<unsigned, 2> &Ops,
1033 bool isSS, int Slot, unsigned Reg) {
1034 // If it is an implicit def instruction, just delete it.
1035 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1036 RemoveMachineInstrFromMaps(MI);
1037 vrm.RemoveMachineInstrFromMaps(MI);
1038 MI->eraseFromParent();
1043 // Filter the list of operand indexes that are to be folded. Abort if
1044 // any operand will prevent folding.
1045 unsigned MRInfo = 0;
1046 SmallVector<unsigned, 2> FoldOps;
1047 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1050 // The only time it's safe to fold into a two address instruction is when
1051 // it's folding reload and spill from / into a spill stack slot.
1052 if (DefMI && (MRInfo & VirtRegMap::isMod))
1055 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1056 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1058 // Remember this instruction uses the spill slot.
1059 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1061 // Attempt to fold the memory reference into the instruction. If
1062 // we can do this, we don't need to insert spill code.
1063 MachineBasicBlock &MBB = *MI->getParent();
1064 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1065 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1066 vrm.transferSpillPts(MI, fmi);
1067 vrm.transferRestorePts(MI, fmi);
1068 vrm.transferEmergencySpills(MI, fmi);
1069 ReplaceMachineInstrInMaps(MI, fmi);
1070 MI = MBB.insert(MBB.erase(MI), fmi);
1077 /// canFoldMemoryOperand - Returns true if the specified load / store
1078 /// folding is possible.
1079 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1080 SmallVector<unsigned, 2> &Ops,
1082 // Filter the list of operand indexes that are to be folded. Abort if
1083 // any operand will prevent folding.
1084 unsigned MRInfo = 0;
1085 SmallVector<unsigned, 2> FoldOps;
1086 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1089 // It's only legal to remat for a use, not a def.
1090 if (ReMat && (MRInfo & VirtRegMap::isMod))
1093 return tii_->canFoldMemoryOperand(MI, FoldOps);
1096 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1097 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1099 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1104 for (++itr; itr != li.ranges.end(); ++itr) {
1105 MachineBasicBlock *mbb2 =
1106 indexes_->getMBBCoveringRange(itr->start, itr->end);
1115 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1116 /// interval on to-be re-materialized operands of MI) with new register.
1117 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1118 MachineInstr *MI, unsigned NewVReg,
1120 // There is an implicit use. That means one of the other operand is
1121 // being remat'ed and the remat'ed instruction has li.reg as an
1122 // use operand. Make sure we rewrite that as well.
1123 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1124 MachineOperand &MO = MI->getOperand(i);
1127 unsigned Reg = MO.getReg();
1128 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1130 if (!vrm.isReMaterialized(Reg))
1132 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1133 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1135 UseMO->setReg(NewVReg);
1139 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1140 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1141 bool LiveIntervals::
1142 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1143 bool TrySplit, SlotIndex index, SlotIndex end,
1145 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1146 unsigned Slot, int LdSlot,
1147 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1149 const TargetRegisterClass* rc,
1150 SmallVector<int, 4> &ReMatIds,
1151 const MachineLoopInfo *loopInfo,
1152 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1153 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1154 std::vector<LiveInterval*> &NewLIs) {
1155 bool CanFold = false;
1157 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1158 MachineOperand& mop = MI->getOperand(i);
1161 unsigned Reg = mop.getReg();
1162 unsigned RegI = Reg;
1163 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1168 bool TryFold = !DefIsReMat;
1169 bool FoldSS = true; // Default behavior unless it's a remat.
1170 int FoldSlot = Slot;
1172 // If this is the rematerializable definition MI itself and
1173 // all of its uses are rematerialized, simply delete it.
1174 if (MI == ReMatOrigDefMI && CanDelete) {
1175 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1177 RemoveMachineInstrFromMaps(MI);
1178 vrm.RemoveMachineInstrFromMaps(MI);
1179 MI->eraseFromParent();
1183 // If def for this use can't be rematerialized, then try folding.
1184 // If def is rematerializable and it's a load, also try folding.
1185 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1187 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1193 // Scan all of the operands of this instruction rewriting operands
1194 // to use NewVReg instead of li.reg as appropriate. We do this for
1197 // 1. If the instr reads the same spilled vreg multiple times, we
1198 // want to reuse the NewVReg.
1199 // 2. If the instr is a two-addr instruction, we are required to
1200 // keep the src/dst regs pinned.
1202 // Keep track of whether we replace a use and/or def so that we can
1203 // create the spill interval with the appropriate range.
1205 HasUse = mop.isUse();
1206 HasDef = mop.isDef();
1207 SmallVector<unsigned, 2> Ops;
1209 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1210 const MachineOperand &MOj = MI->getOperand(j);
1213 unsigned RegJ = MOj.getReg();
1214 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1218 if (!MOj.isUndef()) {
1219 HasUse |= MOj.isUse();
1220 HasDef |= MOj.isDef();
1225 // Create a new virtual register for the spill interval.
1226 // Create the new register now so we can map the fold instruction
1227 // to the new register so when it is unfolded we get the correct
1229 bool CreatedNewVReg = false;
1231 NewVReg = mri_->createVirtualRegister(rc);
1233 CreatedNewVReg = true;
1239 // Do not fold load / store here if we are splitting. We'll find an
1240 // optimal point to insert a load / store later.
1242 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1243 Ops, FoldSS, FoldSlot, NewVReg)) {
1244 // Folding the load/store can completely change the instruction in
1245 // unpredictable ways, rescan it from the beginning.
1248 // We need to give the new vreg the same stack slot as the
1249 // spilled interval.
1250 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1256 if (isNotInMIMap(MI))
1258 goto RestartInstruction;
1261 // We'll try to fold it later if it's profitable.
1262 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1266 mop.setReg(NewVReg);
1267 if (mop.isImplicit())
1268 rewriteImplicitOps(li, MI, NewVReg, vrm);
1270 // Reuse NewVReg for other reads.
1271 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1272 MachineOperand &mopj = MI->getOperand(Ops[j]);
1273 mopj.setReg(NewVReg);
1274 if (mopj.isImplicit())
1275 rewriteImplicitOps(li, MI, NewVReg, vrm);
1278 if (CreatedNewVReg) {
1280 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1281 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1282 // Each valnum may have its own remat id.
1283 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1285 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1287 if (!CanDelete || (HasUse && HasDef)) {
1288 // If this is a two-addr instruction then its use operands are
1289 // rematerializable but its def is not. It should be assigned a
1291 vrm.assignVirt2StackSlot(NewVReg, Slot);
1294 vrm.assignVirt2StackSlot(NewVReg, Slot);
1296 } else if (HasUse && HasDef &&
1297 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1298 // If this interval hasn't been assigned a stack slot (because earlier
1299 // def is a deleted remat def), do it now.
1300 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1301 vrm.assignVirt2StackSlot(NewVReg, Slot);
1304 // Re-matting an instruction with virtual register use. Add the
1305 // register as an implicit use on the use MI.
1306 if (DefIsReMat && ImpUse)
1307 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1309 // Create a new register interval for this spill / remat.
1310 LiveInterval &nI = getOrCreateInterval(NewVReg);
1311 if (CreatedNewVReg) {
1312 NewLIs.push_back(&nI);
1313 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1315 vrm.setIsSplitFromReg(NewVReg, li.reg);
1319 if (CreatedNewVReg) {
1320 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1321 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1322 DEBUG(errs() << " +" << LR);
1325 // Extend the split live interval to this def / use.
1326 SlotIndex End = index.getDefIndex();
1327 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1328 nI.getValNumInfo(nI.getNumValNums()-1));
1329 DEBUG(errs() << " +" << LR);
1334 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1335 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1336 DEBUG(errs() << " +" << LR);
1341 errs() << "\t\t\t\tAdded new interval: ";
1342 nI.print(errs(), tri_);
1348 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1350 MachineBasicBlock *MBB,
1351 SlotIndex Idx) const {
1352 SlotIndex End = getMBBEndIdx(MBB);
1353 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1354 if (VNI->kills[j].isPHI())
1357 SlotIndex KillIdx = VNI->kills[j];
1358 if (KillIdx > Idx && KillIdx < End)
1364 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1365 /// during spilling.
1367 struct RewriteInfo {
1372 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1373 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1376 struct RewriteInfoCompare {
1377 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1378 return LHS.Index < RHS.Index;
1383 void LiveIntervals::
1384 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1385 LiveInterval::Ranges::const_iterator &I,
1386 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1387 unsigned Slot, int LdSlot,
1388 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1390 const TargetRegisterClass* rc,
1391 SmallVector<int, 4> &ReMatIds,
1392 const MachineLoopInfo *loopInfo,
1393 BitVector &SpillMBBs,
1394 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1395 BitVector &RestoreMBBs,
1396 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1397 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1398 std::vector<LiveInterval*> &NewLIs) {
1399 bool AllCanFold = true;
1400 unsigned NewVReg = 0;
1401 SlotIndex start = I->start.getBaseIndex();
1402 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1404 // First collect all the def / use in this live range that will be rewritten.
1405 // Make sure they are sorted according to instruction index.
1406 std::vector<RewriteInfo> RewriteMIs;
1407 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1408 re = mri_->reg_end(); ri != re; ) {
1409 MachineInstr *MI = &*ri;
1410 MachineOperand &O = ri.getOperand();
1412 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1413 SlotIndex index = getInstructionIndex(MI);
1414 if (index < start || index >= end)
1418 // Must be defined by an implicit def. It should not be spilled. Note,
1419 // this is for correctness reason. e.g.
1420 // 8 %reg1024<def> = IMPLICIT_DEF
1421 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1422 // The live range [12, 14) are not part of the r1024 live interval since
1423 // it's defined by an implicit def. It will not conflicts with live
1424 // interval of r1025. Now suppose both registers are spilled, you can
1425 // easily see a situation where both registers are reloaded before
1426 // the INSERT_SUBREG and both target registers that would overlap.
1428 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1430 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1432 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1433 // Now rewrite the defs and uses.
1434 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1435 RewriteInfo &rwi = RewriteMIs[i];
1437 SlotIndex index = rwi.Index;
1438 bool MIHasUse = rwi.HasUse;
1439 bool MIHasDef = rwi.HasDef;
1440 MachineInstr *MI = rwi.MI;
1441 // If MI def and/or use the same register multiple times, then there
1442 // are multiple entries.
1443 unsigned NumUses = MIHasUse;
1444 while (i != e && RewriteMIs[i].MI == MI) {
1445 assert(RewriteMIs[i].Index == index);
1446 bool isUse = RewriteMIs[i].HasUse;
1447 if (isUse) ++NumUses;
1449 MIHasDef |= RewriteMIs[i].HasDef;
1452 MachineBasicBlock *MBB = MI->getParent();
1454 if (ImpUse && MI != ReMatDefMI) {
1455 // Re-matting an instruction with virtual register use. Update the
1456 // register interval's spill weight to HUGE_VALF to prevent it from
1458 LiveInterval &ImpLi = getInterval(ImpUse);
1459 ImpLi.weight = HUGE_VALF;
1462 unsigned MBBId = MBB->getNumber();
1463 unsigned ThisVReg = 0;
1465 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1466 if (NVI != MBBVRegsMap.end()) {
1467 ThisVReg = NVI->second;
1474 // It's better to start a new interval to avoid artifically
1475 // extend the new interval.
1476 if (MIHasDef && !MIHasUse) {
1477 MBBVRegsMap.erase(MBB->getNumber());
1483 bool IsNew = ThisVReg == 0;
1485 // This ends the previous live interval. If all of its def / use
1486 // can be folded, give it a low spill weight.
1487 if (NewVReg && TrySplit && AllCanFold) {
1488 LiveInterval &nI = getOrCreateInterval(NewVReg);
1495 bool HasDef = false;
1496 bool HasUse = false;
1497 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1498 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1499 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1500 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1501 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1502 if (!HasDef && !HasUse)
1505 AllCanFold &= CanFold;
1507 // Update weight of spill interval.
1508 LiveInterval &nI = getOrCreateInterval(NewVReg);
1510 // The spill weight is now infinity as it cannot be spilled again.
1511 nI.weight = HUGE_VALF;
1515 // Keep track of the last def and first use in each MBB.
1517 if (MI != ReMatOrigDefMI || !CanDelete) {
1518 bool HasKill = false;
1520 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1522 // If this is a two-address code, then this index starts a new VNInfo.
1523 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1525 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1528 SpillIdxes.find(MBBId);
1530 if (SII == SpillIdxes.end()) {
1531 std::vector<SRInfo> S;
1532 S.push_back(SRInfo(index, NewVReg, true));
1533 SpillIdxes.insert(std::make_pair(MBBId, S));
1534 } else if (SII->second.back().vreg != NewVReg) {
1535 SII->second.push_back(SRInfo(index, NewVReg, true));
1536 } else if (index > SII->second.back().index) {
1537 // If there is an earlier def and this is a two-address
1538 // instruction, then it's not possible to fold the store (which
1539 // would also fold the load).
1540 SRInfo &Info = SII->second.back();
1542 Info.canFold = !HasUse;
1544 SpillMBBs.set(MBBId);
1545 } else if (SII != SpillIdxes.end() &&
1546 SII->second.back().vreg == NewVReg &&
1547 index > SII->second.back().index) {
1548 // There is an earlier def that's not killed (must be two-address).
1549 // The spill is no longer needed.
1550 SII->second.pop_back();
1551 if (SII->second.empty()) {
1552 SpillIdxes.erase(MBBId);
1553 SpillMBBs.reset(MBBId);
1560 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1561 SpillIdxes.find(MBBId);
1562 if (SII != SpillIdxes.end() &&
1563 SII->second.back().vreg == NewVReg &&
1564 index > SII->second.back().index)
1565 // Use(s) following the last def, it's not safe to fold the spill.
1566 SII->second.back().canFold = false;
1567 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1568 RestoreIdxes.find(MBBId);
1569 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1570 // If we are splitting live intervals, only fold if it's the first
1571 // use and there isn't another use later in the MBB.
1572 RII->second.back().canFold = false;
1574 // Only need a reload if there isn't an earlier def / use.
1575 if (RII == RestoreIdxes.end()) {
1576 std::vector<SRInfo> Infos;
1577 Infos.push_back(SRInfo(index, NewVReg, true));
1578 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1580 RII->second.push_back(SRInfo(index, NewVReg, true));
1582 RestoreMBBs.set(MBBId);
1586 // Update spill weight.
1587 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1588 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1591 if (NewVReg && TrySplit && AllCanFold) {
1592 // If all of its def / use can be folded, give it a low spill weight.
1593 LiveInterval &nI = getOrCreateInterval(NewVReg);
1598 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1599 unsigned vr, BitVector &RestoreMBBs,
1600 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1601 if (!RestoreMBBs[Id])
1603 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1604 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1605 if (Restores[i].index == index &&
1606 Restores[i].vreg == vr &&
1607 Restores[i].canFold)
1612 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1613 unsigned vr, BitVector &RestoreMBBs,
1614 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1615 if (!RestoreMBBs[Id])
1617 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1618 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1619 if (Restores[i].index == index && Restores[i].vreg)
1620 Restores[i].index = SlotIndex();
1623 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1624 /// spilled and create empty intervals for their uses.
1626 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1627 const TargetRegisterClass* rc,
1628 std::vector<LiveInterval*> &NewLIs) {
1629 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1630 re = mri_->reg_end(); ri != re; ) {
1631 MachineOperand &O = ri.getOperand();
1632 MachineInstr *MI = &*ri;
1635 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1636 "Register def was not rewritten?");
1637 RemoveMachineInstrFromMaps(MI);
1638 vrm.RemoveMachineInstrFromMaps(MI);
1639 MI->eraseFromParent();
1641 // This must be an use of an implicit_def so it's not part of the live
1642 // interval. Create a new empty live interval for it.
1643 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1644 unsigned NewVReg = mri_->createVirtualRegister(rc);
1646 vrm.setIsImplicitlyDefined(NewVReg);
1647 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1648 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1649 MachineOperand &MO = MI->getOperand(i);
1650 if (MO.isReg() && MO.getReg() == li.reg) {
1659 std::vector<LiveInterval*> LiveIntervals::
1660 addIntervalsForSpillsFast(const LiveInterval &li,
1661 const MachineLoopInfo *loopInfo,
1663 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1665 std::vector<LiveInterval*> added;
1667 assert(li.weight != HUGE_VALF &&
1668 "attempt to spill already spilled interval!");
1671 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1676 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1678 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1679 while (RI != mri_->reg_end()) {
1680 MachineInstr* MI = &*RI;
1682 SmallVector<unsigned, 2> Indices;
1683 bool HasUse = false;
1684 bool HasDef = false;
1686 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1687 MachineOperand& mop = MI->getOperand(i);
1688 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1690 HasUse |= MI->getOperand(i).isUse();
1691 HasDef |= MI->getOperand(i).isDef();
1693 Indices.push_back(i);
1696 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1697 Indices, true, slot, li.reg)) {
1698 unsigned NewVReg = mri_->createVirtualRegister(rc);
1700 vrm.assignVirt2StackSlot(NewVReg, slot);
1702 // create a new register for this spill
1703 LiveInterval &nI = getOrCreateInterval(NewVReg);
1705 // the spill weight is now infinity as it
1706 // cannot be spilled again
1707 nI.weight = HUGE_VALF;
1709 // Rewrite register operands to use the new vreg.
1710 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1711 E = Indices.end(); I != E; ++I) {
1712 MI->getOperand(*I).setReg(NewVReg);
1714 if (MI->getOperand(*I).isUse())
1715 MI->getOperand(*I).setIsKill(true);
1718 // Fill in the new live interval.
1719 SlotIndex index = getInstructionIndex(MI);
1721 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1722 nI.getNextValue(SlotIndex(), 0, false,
1723 getVNInfoAllocator()));
1724 DEBUG(errs() << " +" << LR);
1726 vrm.addRestorePoint(NewVReg, MI);
1729 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1730 nI.getNextValue(SlotIndex(), 0, false,
1731 getVNInfoAllocator()));
1732 DEBUG(errs() << " +" << LR);
1734 vrm.addSpillPoint(NewVReg, true, MI);
1737 added.push_back(&nI);
1740 errs() << "\t\t\t\tadded new interval: ";
1747 RI = mri_->reg_begin(li.reg);
1753 std::vector<LiveInterval*> LiveIntervals::
1754 addIntervalsForSpills(const LiveInterval &li,
1755 SmallVectorImpl<LiveInterval*> &SpillIs,
1756 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1758 if (EnableFastSpilling)
1759 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1761 assert(li.weight != HUGE_VALF &&
1762 "attempt to spill already spilled interval!");
1765 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1766 li.print(errs(), tri_);
1770 // Each bit specify whether a spill is required in the MBB.
1771 BitVector SpillMBBs(mf_->getNumBlockIDs());
1772 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1773 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1774 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1775 DenseMap<unsigned,unsigned> MBBVRegsMap;
1776 std::vector<LiveInterval*> NewLIs;
1777 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1779 unsigned NumValNums = li.getNumValNums();
1780 SmallVector<MachineInstr*, 4> ReMatDefs;
1781 ReMatDefs.resize(NumValNums, NULL);
1782 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1783 ReMatOrigDefs.resize(NumValNums, NULL);
1784 SmallVector<int, 4> ReMatIds;
1785 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1786 BitVector ReMatDelete(NumValNums);
1787 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1789 // Spilling a split live interval. It cannot be split any further. Also,
1790 // it's also guaranteed to be a single val# / range interval.
1791 if (vrm.getPreSplitReg(li.reg)) {
1792 vrm.setIsSplitFromReg(li.reg, 0);
1793 // Unset the split kill marker on the last use.
1794 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1795 if (KillIdx != SlotIndex()) {
1796 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1797 assert(KillMI && "Last use disappeared?");
1798 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1799 assert(KillOp != -1 && "Last use disappeared?");
1800 KillMI->getOperand(KillOp).setIsKill(false);
1802 vrm.removeKillPoint(li.reg);
1803 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1804 Slot = vrm.getStackSlot(li.reg);
1805 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1806 MachineInstr *ReMatDefMI = DefIsReMat ?
1807 vrm.getReMaterializedMI(li.reg) : NULL;
1809 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810 bool isLoad = isLoadSS ||
1811 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1812 bool IsFirstRange = true;
1813 for (LiveInterval::Ranges::const_iterator
1814 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1815 // If this is a split live interval with multiple ranges, it means there
1816 // are two-address instructions that re-defined the value. Only the
1817 // first def can be rematerialized!
1819 // Note ReMatOrigDefMI has already been deleted.
1820 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1821 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1822 false, vrm, rc, ReMatIds, loopInfo,
1823 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1824 MBBVRegsMap, NewLIs);
1826 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1827 Slot, 0, false, false, false,
1828 false, vrm, rc, ReMatIds, loopInfo,
1829 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1830 MBBVRegsMap, NewLIs);
1832 IsFirstRange = false;
1835 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1839 bool TrySplit = !intervalIsInOneMBB(li);
1842 bool NeedStackSlot = false;
1843 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1845 const VNInfo *VNI = *i;
1846 unsigned VN = VNI->id;
1847 if (VNI->isUnused())
1848 continue; // Dead val#.
1849 // Is the def for the val# rematerializable?
1850 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1851 ? getInstructionFromIndex(VNI->def) : 0;
1853 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1854 // Remember how to remat the def of this val#.
1855 ReMatOrigDefs[VN] = ReMatDefMI;
1856 // Original def may be modified so we have to make a copy here.
1857 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1858 CloneMIs.push_back(Clone);
1859 ReMatDefs[VN] = Clone;
1861 bool CanDelete = true;
1862 if (VNI->hasPHIKill()) {
1863 // A kill is a phi node, not all of its uses can be rematerialized.
1864 // It must not be deleted.
1866 // Need a stack slot if there is any live range where uses cannot be
1868 NeedStackSlot = true;
1871 ReMatDelete.set(VN);
1873 // Need a stack slot if there is any live range where uses cannot be
1875 NeedStackSlot = true;
1879 // One stack slot per live interval.
1880 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1881 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1882 Slot = vrm.assignVirt2StackSlot(li.reg);
1884 // This case only occurs when the prealloc splitter has already assigned
1885 // a stack slot to this vreg.
1887 Slot = vrm.getStackSlot(li.reg);
1890 // Create new intervals and rewrite defs and uses.
1891 for (LiveInterval::Ranges::const_iterator
1892 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1893 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1894 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1895 bool DefIsReMat = ReMatDefMI != NULL;
1896 bool CanDelete = ReMatDelete[I->valno->id];
1898 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1899 bool isLoad = isLoadSS ||
1900 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1901 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1902 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1903 CanDelete, vrm, rc, ReMatIds, loopInfo,
1904 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1905 MBBVRegsMap, NewLIs);
1908 // Insert spills / restores if we are splitting.
1910 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1914 SmallPtrSet<LiveInterval*, 4> AddedKill;
1915 SmallVector<unsigned, 2> Ops;
1916 if (NeedStackSlot) {
1917 int Id = SpillMBBs.find_first();
1919 std::vector<SRInfo> &spills = SpillIdxes[Id];
1920 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1921 SlotIndex index = spills[i].index;
1922 unsigned VReg = spills[i].vreg;
1923 LiveInterval &nI = getOrCreateInterval(VReg);
1924 bool isReMat = vrm.isReMaterialized(VReg);
1925 MachineInstr *MI = getInstructionFromIndex(index);
1926 bool CanFold = false;
1927 bool FoundUse = false;
1929 if (spills[i].canFold) {
1931 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1932 MachineOperand &MO = MI->getOperand(j);
1933 if (!MO.isReg() || MO.getReg() != VReg)
1940 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1941 RestoreMBBs, RestoreIdxes))) {
1942 // MI has two-address uses of the same register. If the use
1943 // isn't the first and only use in the BB, then we can't fold
1944 // it. FIXME: Move this to rewriteInstructionsForSpills.
1951 // Fold the store into the def if possible.
1952 bool Folded = false;
1953 if (CanFold && !Ops.empty()) {
1954 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1957 // Also folded uses, do not issue a load.
1958 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1959 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1961 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1965 // Otherwise tell the spiller to issue a spill.
1967 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1968 bool isKill = LR->end == index.getStoreIndex();
1969 if (!MI->registerDefIsDead(nI.reg))
1970 // No need to spill a dead def.
1971 vrm.addSpillPoint(VReg, isKill, MI);
1973 AddedKill.insert(&nI);
1976 Id = SpillMBBs.find_next(Id);
1980 int Id = RestoreMBBs.find_first();
1982 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1983 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1984 SlotIndex index = restores[i].index;
1985 if (index == SlotIndex())
1987 unsigned VReg = restores[i].vreg;
1988 LiveInterval &nI = getOrCreateInterval(VReg);
1989 bool isReMat = vrm.isReMaterialized(VReg);
1990 MachineInstr *MI = getInstructionFromIndex(index);
1991 bool CanFold = false;
1993 if (restores[i].canFold) {
1995 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1996 MachineOperand &MO = MI->getOperand(j);
1997 if (!MO.isReg() || MO.getReg() != VReg)
2001 // If this restore were to be folded, it would have been folded
2010 // Fold the load into the use if possible.
2011 bool Folded = false;
2012 if (CanFold && !Ops.empty()) {
2014 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2016 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2018 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2019 // If the rematerializable def is a load, also try to fold it.
2020 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2021 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2022 Ops, isLoadSS, LdSlot, VReg);
2024 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2026 // Re-matting an instruction with virtual register use. Add the
2027 // register as an implicit use on the use MI and update the register
2028 // interval's spill weight to HUGE_VALF to prevent it from being
2030 LiveInterval &ImpLi = getInterval(ImpUse);
2031 ImpLi.weight = HUGE_VALF;
2032 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2037 // If folding is not possible / failed, then tell the spiller to issue a
2038 // load / rematerialization for us.
2040 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2042 vrm.addRestorePoint(VReg, MI);
2044 Id = RestoreMBBs.find_next(Id);
2047 // Finalize intervals: add kills, finalize spill weights, and filter out
2049 std::vector<LiveInterval*> RetNewLIs;
2050 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2051 LiveInterval *LI = NewLIs[i];
2053 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2054 if (!AddedKill.count(LI)) {
2055 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2056 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2057 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2058 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2059 assert(UseIdx != -1);
2060 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2061 LastUse->getOperand(UseIdx).setIsKill();
2062 vrm.addKillPoint(LI->reg, LastUseIdx);
2065 RetNewLIs.push_back(LI);
2069 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2073 /// hasAllocatableSuperReg - Return true if the specified physical register has
2074 /// any super register that's allocatable.
2075 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2076 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2077 if (allocatableRegs_[*AS] && hasInterval(*AS))
2082 /// getRepresentativeReg - Find the largest super register of the specified
2083 /// physical register.
2084 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2085 // Find the largest super-register that is allocatable.
2086 unsigned BestReg = Reg;
2087 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2088 unsigned SuperReg = *AS;
2089 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2097 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2098 /// specified interval that conflicts with the specified physical register.
2099 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2100 unsigned PhysReg) const {
2101 unsigned NumConflicts = 0;
2102 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2103 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2104 E = mri_->reg_end(); I != E; ++I) {
2105 MachineOperand &O = I.getOperand();
2106 MachineInstr *MI = O.getParent();
2107 SlotIndex Index = getInstructionIndex(MI);
2108 if (pli.liveAt(Index))
2111 return NumConflicts;
2114 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2115 /// around all defs and uses of the specified interval. Return true if it
2116 /// was able to cut its interval.
2117 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2118 unsigned PhysReg, VirtRegMap &vrm) {
2119 unsigned SpillReg = getRepresentativeReg(PhysReg);
2121 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2122 // If there are registers which alias PhysReg, but which are not a
2123 // sub-register of the chosen representative super register. Assert
2124 // since we can't handle it yet.
2125 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2126 tri_->isSuperRegister(*AS, SpillReg));
2129 SmallVector<unsigned, 4> PRegs;
2130 if (hasInterval(SpillReg))
2131 PRegs.push_back(SpillReg);
2133 SmallSet<unsigned, 4> Added;
2134 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2135 if (Added.insert(*AS) && hasInterval(*AS)) {
2136 PRegs.push_back(*AS);
2137 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2142 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2143 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2144 E = mri_->reg_end(); I != E; ++I) {
2145 MachineOperand &O = I.getOperand();
2146 MachineInstr *MI = O.getParent();
2147 if (SeenMIs.count(MI))
2150 SlotIndex Index = getInstructionIndex(MI);
2151 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2152 unsigned PReg = PRegs[i];
2153 LiveInterval &pli = getInterval(PReg);
2154 if (!pli.liveAt(Index))
2156 vrm.addEmergencySpill(PReg, MI);
2157 SlotIndex StartIdx = Index.getLoadIndex();
2158 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2159 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2160 pli.removeRange(StartIdx, EndIdx);
2164 raw_string_ostream Msg(msg);
2165 Msg << "Ran out of registers during register allocation!";
2166 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2167 Msg << "\nPlease check your inline asm statement for invalid "
2168 << "constraints:\n";
2169 MI->print(Msg, tm_);
2171 llvm_report_error(Msg.str());
2173 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2174 if (!hasInterval(*AS))
2176 LiveInterval &spli = getInterval(*AS);
2177 if (spli.liveAt(Index))
2178 spli.removeRange(Index.getLoadIndex(),
2179 Index.getNextIndex().getBaseIndex());
2186 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2187 MachineInstr* startInst) {
2188 LiveInterval& Interval = getOrCreateInterval(reg);
2189 VNInfo* VN = Interval.getNextValue(
2190 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2191 startInst, true, getVNInfoAllocator());
2192 VN->setHasPHIKill(true);
2193 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2195 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2196 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2197 Interval.addRange(LR);