1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219 /// it can check use as well.
220 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221 unsigned Reg, bool CheckUse,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 if (MO.isUse() && !CheckUse)
241 unsigned PhysReg = MO.getReg();
242 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
244 if (tri_->isSubRegister(Reg, PhysReg))
254 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255 if (TargetRegisterInfo::isPhysicalRegister(reg))
256 errs() << tri_->getName(reg);
258 errs() << "%reg" << reg;
262 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263 MachineBasicBlock::iterator mi,
267 LiveInterval &interval) {
269 errs() << "\t\tregister: ";
270 printRegName(interval.reg, tri_);
273 // Virtual registers may be defined multiple times (due to phi
274 // elimination and 2-addr elimination). Much of what we do only has to be
275 // done once for the vreg. We use an empty interval to detect the first
276 // time we see a vreg.
277 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278 if (interval.empty()) {
279 // Get the Idx of the defining instructions.
280 SlotIndex defIndex = MIIdx.getDefIndex();
281 // Earlyclobbers move back one, so that they overlap the live range
283 if (MO.isEarlyClobber())
284 defIndex = MIIdx.getUseIndex();
286 MachineInstr *CopyMI = NULL;
287 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 // Earlyclobbers move back one.
294 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296 assert(ValNo->id == 0 && "First value in interval is not 0?");
298 // Loop over all of the blocks that the vreg is defined in. There are
299 // two cases we have to handle here. The most common case is a vreg
300 // whose lifetime is contained within a basic block. In this case there
301 // will be a single kill, in MBB, which comes after the definition.
302 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303 // FIXME: what about dead vars?
305 if (vi.Kills[0] != mi)
306 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 killIdx = defIndex.getStoreIndex();
310 // If the kill happens after the definition, we have an intra-block
312 if (killIdx > defIndex) {
313 assert(vi.AliveBlocks.empty() &&
314 "Shouldn't be alive across any blocks!");
315 LiveRange LR(defIndex, killIdx, ValNo);
316 interval.addRange(LR);
317 DEBUG(errs() << " +" << LR << "\n");
318 ValNo->addKill(killIdx);
323 // The other case we handle is when a virtual register lives to the end
324 // of the defining block, potentially live across some blocks, then is
325 // live into some number of blocks, but gets killed. Start by adding a
326 // range that goes from this definition to the end of the defining block.
327 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
329 DEBUG(errs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
338 getMBBStartIdx(mf_->getBlockNumbered(*I)),
339 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
341 interval.addRange(LR);
342 DEBUG(errs() << " +" << LR);
345 // Finally, this virtual register is live from the start of any killing
346 // block to the 'use' slot of the killing instruction.
347 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348 MachineInstr *Kill = vi.Kills[i];
350 getInstructionIndex(Kill).getDefIndex();
351 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352 interval.addRange(LR);
353 ValNo->addKill(killIdx);
354 DEBUG(errs() << " +" << LR);
358 // If this is the second time we see a virtual register definition, it
359 // must be due to phi elimination or two addr elimination. If this is
360 // the result of two address elimination, then the vreg is one of the
361 // def-and-use register operand.
362 if (mi->isRegTiedToUseOperand(MOIdx)) {
363 // If this is a two-address definition, then we have already processed
364 // the live range. The only problem is that we didn't realize there
365 // are actually two values in the live interval. Because of this we
366 // need to take the LiveRegion that defines this register and split it
368 assert(interval.containsOneValue());
369 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370 SlotIndex RedefIndex = MIIdx.getDefIndex();
371 if (MO.isEarlyClobber())
372 RedefIndex = MIIdx.getUseIndex();
374 const LiveRange *OldLR =
375 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376 VNInfo *OldValNo = OldLR->valno;
378 // Delete the initial value, which should be short and continuous,
379 // because the 2-addr copy must be in the same MBB as the redef.
380 interval.removeRange(DefIndex, RedefIndex);
382 // Two-address vregs should always only be redefined once. This means
383 // that at this point, there should be exactly one value number in it.
384 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
386 // The new value number (#1) is defined by the instruction we claimed
388 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389 false, // update at *
391 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
393 // Value#0 is now defined by the 2-addr instruction.
394 OldValNo->def = RedefIndex;
395 OldValNo->setCopy(0);
397 // Add the new live interval which replaces the range for the input copy.
398 LiveRange LR(DefIndex, RedefIndex, ValNo);
399 DEBUG(errs() << " replace range with " << LR);
400 interval.addRange(LR);
401 ValNo->addKill(RedefIndex);
403 // If this redefinition is dead, we need to add a dummy unit live
404 // range covering the def slot.
406 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
410 errs() << " RESULT: ";
411 interval.print(errs(), tri_);
414 // Otherwise, this must be because of phi elimination. If this is the
415 // first redefinition of the vreg that we have seen, go back and change
416 // the live range in the PHI block to be a different value number.
417 if (interval.containsOneValue()) {
419 VNInfo *VNI = interval.getValNumInfo(0);
420 // Phi elimination may have reused the register for multiple identical
421 // phi nodes. There will be a kill per phi. Remove the old ranges that
422 // we now know have an incorrect number.
423 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
424 MachineInstr *Killer = vi.Kills[ki];
425 SlotIndex Start = getMBBStartIdx(Killer->getParent());
426 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
428 errs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
429 interval.print(errs(), tri_);
431 interval.removeRange(Start, End);
433 // Replace the interval with one of a NEW value number. Note that
434 // this value number isn't actually defined by an instruction, weird
436 LiveRange LR(Start, End,
437 interval.getNextValue(SlotIndex(Start, true),
438 0, false, VNInfoAllocator));
439 LR.valno->setIsPHIDef(true);
440 interval.addRange(LR);
441 LR.valno->addKill(End);
444 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
445 VNI->addKill(indexes_->getTerminatorGap(killMBB));
446 VNI->setHasPHIKill(true);
448 errs() << " RESULT: ";
449 interval.print(errs(), tri_);
453 // In the case of PHI elimination, each variable definition is only
454 // live until the end of the block. We've already taken care of the
455 // rest of the live range.
456 SlotIndex defIndex = MIIdx.getDefIndex();
457 if (MO.isEarlyClobber())
458 defIndex = MIIdx.getUseIndex();
461 MachineInstr *CopyMI = NULL;
462 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
463 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
464 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
465 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
466 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
468 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
470 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
471 LiveRange LR(defIndex, killIndex, ValNo);
472 interval.addRange(LR);
473 ValNo->addKill(indexes_->getTerminatorGap(mbb));
474 ValNo->setHasPHIKill(true);
475 DEBUG(errs() << " +" << LR);
479 DEBUG(errs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
491 errs() << "\t\tregister: ";
492 printRegName(interval.reg, tri_);
495 SlotIndex baseIndex = MIIdx;
496 SlotIndex start = baseIndex.getDefIndex();
497 // Earlyclobbers move back one.
498 if (MO.isEarlyClobber())
499 start = MIIdx.getUseIndex();
500 SlotIndex end = start;
502 // If it is not used after definition, it is considered dead at
503 // the instruction defining it. Hence its interval is:
504 // [defSlot(def), defSlot(def)+1)
505 // For earlyclobbers, the defSlot was pushed back one; the extra
506 // advance below compensates.
508 DEBUG(errs() << " dead");
509 end = start.getStoreIndex();
513 // If it is not dead on definition, it must be killed by a
514 // subsequent instruction. Hence its interval is:
515 // [defSlot(def), useSlot(kill)+1)
516 baseIndex = baseIndex.getNextIndex();
517 while (++mi != MBB->end()) {
519 if (getInstructionFromIndex(baseIndex) == 0)
520 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
522 if (mi->killsRegister(interval.reg, tri_)) {
523 DEBUG(errs() << " killed");
524 end = baseIndex.getDefIndex();
527 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
529 if (mi->isRegTiedToUseOperand(DefIdx)) {
530 // Two-address instruction.
531 end = baseIndex.getDefIndex();
533 // Another instruction redefines the register before it is ever read.
534 // Then the register is essentially dead at the instruction that defines
535 // it. Hence its interval is:
536 // [defSlot(def), defSlot(def)+1)
537 DEBUG(errs() << " dead");
538 end = start.getStoreIndex();
544 baseIndex = baseIndex.getNextIndex();
547 // The only case we should have a dead physreg here without a killing or
548 // instruction where we know it's dead is if it is live-in to the function
549 // and never used. Another possible case is the implicit use of the
550 // physical register has been deleted by two-address pass.
551 end = start.getStoreIndex();
554 assert(start < end && "did not find end of interval?");
556 // Already exists? Extend old live interval.
557 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
558 bool Extend = OldLR != interval.end();
559 VNInfo *ValNo = Extend
560 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
561 if (MO.isEarlyClobber() && Extend)
562 ValNo->setHasRedefByEC(true);
563 LiveRange LR(start, end, ValNo);
564 interval.addRange(LR);
565 LR.valno->addKill(end);
566 DEBUG(errs() << " +" << LR << '\n');
569 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
570 MachineBasicBlock::iterator MI,
574 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
575 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
576 getOrCreateInterval(MO.getReg()));
577 else if (allocatableRegs_[MO.getReg()]) {
578 MachineInstr *CopyMI = NULL;
579 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
580 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
581 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
582 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
583 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(MO.getReg()), CopyMI);
587 // Def of a register also defines its sub-registers.
588 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
589 // If MI also modifies the sub-register explicitly, avoid processing it
590 // more than once. Do not pass in TRI here so it checks for exact match.
591 if (!MI->modifiesRegister(*AS))
592 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
593 getOrCreateInterval(*AS), 0);
597 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
599 LiveInterval &interval, bool isAlias) {
601 errs() << "\t\tlivein register: ";
602 printRegName(interval.reg, tri_);
605 // Look for kills, if it reaches a def before it's killed, then it shouldn't
606 // be considered a livein.
607 MachineBasicBlock::iterator mi = MBB->begin();
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
616 while (mi != MBB->end()) {
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(errs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->modifiesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(errs() << " dead");
628 end = start.getStoreIndex();
634 if (mi != MBB->end()) {
635 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
639 // Live-in register might not be used at all.
642 DEBUG(errs() << " dead");
643 end = MIIdx.getStoreIndex();
645 DEBUG(errs() << " live through");
651 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
652 0, false, VNInfoAllocator);
653 vni->setIsPHIDef(true);
654 LiveRange LR(start, end, vni);
656 interval.addRange(LR);
657 LR.valno->addKill(end);
658 DEBUG(errs() << " +" << LR << '\n');
661 /// computeIntervals - computes the live intervals for virtual
662 /// registers. for some ordering of the machine instructions [1,N] a
663 /// live interval is an interval [i, j) where 1 <= i <= j < N for
664 /// which a variable is live
665 void LiveIntervals::computeIntervals() {
666 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
667 << "********** Function: "
668 << ((Value*)mf_->getFunction())->getName() << '\n');
670 SmallVector<unsigned, 8> UndefUses;
671 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
673 MachineBasicBlock *MBB = MBBI;
674 // Track the index of the current machine instr.
675 SlotIndex MIIndex = getMBBStartIdx(MBB);
676 DEBUG(errs() << MBB->getName() << ":\n");
678 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
680 // Create intervals for live-ins to this BB first.
681 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
682 LE = MBB->livein_end(); LI != LE; ++LI) {
683 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
684 // Multiple live-ins can alias the same register.
685 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
686 if (!hasInterval(*AS))
687 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
691 // Skip over empty initial indices.
692 if (getInstructionFromIndex(MIIndex) == 0)
693 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
695 for (; MI != miEnd; ++MI) {
696 DEBUG(errs() << MIIndex << "\t" << *MI);
699 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
700 MachineOperand &MO = MI->getOperand(i);
701 if (!MO.isReg() || !MO.getReg())
704 // handle register defs - build intervals
706 handleRegisterDef(MBB, MI, MIIndex, MO, i);
707 else if (MO.isUndef())
708 UndefUses.push_back(MO.getReg());
711 // Move to the next instr slot.
712 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
716 // Create empty intervals for registers defined by implicit_def's (except
717 // for those implicit_def that define values which are liveout of their
719 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
720 unsigned UndefReg = UndefUses[i];
721 (void)getOrCreateInterval(UndefReg);
725 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
726 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
727 return new LiveInterval(reg, Weight);
730 /// dupInterval - Duplicate a live interval. The caller is responsible for
731 /// managing the allocated memory.
732 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
733 LiveInterval *NewLI = createInterval(li->reg);
734 NewLI->Copy(*li, mri_, getVNInfoAllocator());
738 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
739 /// copy field and returns the source register that defines it.
740 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
744 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
745 // If it's extracting out of a physical register, return the sub-register.
746 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
747 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
748 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
749 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
750 if (SrcSubReg == DstSubReg)
751 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
752 // reg1034 can still be coalesced to EDX.
754 assert(DstSubReg == 0);
755 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
758 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
759 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
760 return VNI->getCopy()->getOperand(2).getReg();
762 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
763 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
765 llvm_unreachable("Unrecognized copy instruction!");
769 //===----------------------------------------------------------------------===//
770 // Register allocator hooks.
773 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
774 /// allow one) virtual register operand, then its uses are implicitly using
775 /// the register. Returns the virtual register.
776 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
777 MachineInstr *MI) const {
779 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
780 MachineOperand &MO = MI->getOperand(i);
781 if (!MO.isReg() || !MO.isUse())
783 unsigned Reg = MO.getReg();
784 if (Reg == 0 || Reg == li.reg)
787 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
788 !allocatableRegs_[Reg])
790 // FIXME: For now, only remat MI with at most one register operand.
792 "Can't rematerialize instruction with multiple register operand!");
801 /// isValNoAvailableAt - Return true if the val# of the specified interval
802 /// which reaches the given instruction also reaches the specified use index.
803 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
804 SlotIndex UseIdx) const {
805 SlotIndex Index = getInstructionIndex(MI);
806 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
807 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
808 return UI != li.end() && UI->valno == ValNo;
811 /// isReMaterializable - Returns true if the definition MI of the specified
812 /// val# of the specified interval is re-materializable.
813 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
814 const VNInfo *ValNo, MachineInstr *MI,
815 SmallVectorImpl<LiveInterval*> &SpillIs,
820 if (!tii_->isTriviallyReMaterializable(MI, aa_))
823 // Target-specific code can mark an instruction as being rematerializable
824 // if it has one virtual reg use, though it had better be something like
825 // a PIC base register which is likely to be live everywhere.
826 unsigned ImpUse = getReMatImplicitUse(li, MI);
828 const LiveInterval &ImpLi = getInterval(ImpUse);
829 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
830 re = mri_->use_end(); ri != re; ++ri) {
831 MachineInstr *UseMI = &*ri;
832 SlotIndex UseIdx = getInstructionIndex(UseMI);
833 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
835 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
839 // If a register operand of the re-materialized instruction is going to
840 // be spilled next, then it's not legal to re-materialize this instruction.
841 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
842 if (ImpUse == SpillIs[i]->reg)
848 /// isReMaterializable - Returns true if the definition MI of the specified
849 /// val# of the specified interval is re-materializable.
850 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
851 const VNInfo *ValNo, MachineInstr *MI) {
852 SmallVector<LiveInterval*, 4> Dummy1;
854 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
857 /// isReMaterializable - Returns true if every definition of MI of every
858 /// val# of the specified interval is re-materializable.
859 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
860 SmallVectorImpl<LiveInterval*> &SpillIs,
863 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
865 const VNInfo *VNI = *i;
867 continue; // Dead val#.
868 // Is the def for the val# rematerializable?
869 if (!VNI->isDefAccurate())
871 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
872 bool DefIsLoad = false;
874 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
881 /// FilterFoldedOps - Filter out two-address use operands. Return
882 /// true if it finds any issue with the operands that ought to prevent
884 static bool FilterFoldedOps(MachineInstr *MI,
885 SmallVector<unsigned, 2> &Ops,
887 SmallVector<unsigned, 2> &FoldOps) {
889 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
890 unsigned OpIdx = Ops[i];
891 MachineOperand &MO = MI->getOperand(OpIdx);
892 // FIXME: fold subreg use.
896 MRInfo |= (unsigned)VirtRegMap::isMod;
898 // Filter out two-address use operand(s).
899 if (MI->isRegTiedToDefOperand(OpIdx)) {
900 MRInfo = VirtRegMap::isModRef;
903 MRInfo |= (unsigned)VirtRegMap::isRef;
905 FoldOps.push_back(OpIdx);
911 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
912 /// slot / to reg or any rematerialized load into ith operand of specified
913 /// MI. If it is successul, MI is updated with the newly created MI and
915 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
916 VirtRegMap &vrm, MachineInstr *DefMI,
918 SmallVector<unsigned, 2> &Ops,
919 bool isSS, int Slot, unsigned Reg) {
920 // If it is an implicit def instruction, just delete it.
921 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
922 RemoveMachineInstrFromMaps(MI);
923 vrm.RemoveMachineInstrFromMaps(MI);
924 MI->eraseFromParent();
929 // Filter the list of operand indexes that are to be folded. Abort if
930 // any operand will prevent folding.
932 SmallVector<unsigned, 2> FoldOps;
933 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
936 // The only time it's safe to fold into a two address instruction is when
937 // it's folding reload and spill from / into a spill stack slot.
938 if (DefMI && (MRInfo & VirtRegMap::isMod))
941 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
942 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
944 // Remember this instruction uses the spill slot.
945 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
947 // Attempt to fold the memory reference into the instruction. If
948 // we can do this, we don't need to insert spill code.
949 MachineBasicBlock &MBB = *MI->getParent();
950 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
951 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
952 vrm.transferSpillPts(MI, fmi);
953 vrm.transferRestorePts(MI, fmi);
954 vrm.transferEmergencySpills(MI, fmi);
955 ReplaceMachineInstrInMaps(MI, fmi);
956 MI = MBB.insert(MBB.erase(MI), fmi);
963 /// canFoldMemoryOperand - Returns true if the specified load / store
964 /// folding is possible.
965 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
966 SmallVector<unsigned, 2> &Ops,
968 // Filter the list of operand indexes that are to be folded. Abort if
969 // any operand will prevent folding.
971 SmallVector<unsigned, 2> FoldOps;
972 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
975 // It's only legal to remat for a use, not a def.
976 if (ReMat && (MRInfo & VirtRegMap::isMod))
979 return tii_->canFoldMemoryOperand(MI, FoldOps);
982 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
983 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
985 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
990 for (++itr; itr != li.ranges.end(); ++itr) {
991 MachineBasicBlock *mbb2 =
992 indexes_->getMBBCoveringRange(itr->start, itr->end);
1001 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1002 /// interval on to-be re-materialized operands of MI) with new register.
1003 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1004 MachineInstr *MI, unsigned NewVReg,
1006 // There is an implicit use. That means one of the other operand is
1007 // being remat'ed and the remat'ed instruction has li.reg as an
1008 // use operand. Make sure we rewrite that as well.
1009 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1010 MachineOperand &MO = MI->getOperand(i);
1013 unsigned Reg = MO.getReg();
1014 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1016 if (!vrm.isReMaterialized(Reg))
1018 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1019 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1021 UseMO->setReg(NewVReg);
1025 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1026 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1027 bool LiveIntervals::
1028 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1029 bool TrySplit, SlotIndex index, SlotIndex end,
1031 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1032 unsigned Slot, int LdSlot,
1033 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1035 const TargetRegisterClass* rc,
1036 SmallVector<int, 4> &ReMatIds,
1037 const MachineLoopInfo *loopInfo,
1038 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1039 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1040 std::vector<LiveInterval*> &NewLIs) {
1041 bool CanFold = false;
1043 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1044 MachineOperand& mop = MI->getOperand(i);
1047 unsigned Reg = mop.getReg();
1048 unsigned RegI = Reg;
1049 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1054 bool TryFold = !DefIsReMat;
1055 bool FoldSS = true; // Default behavior unless it's a remat.
1056 int FoldSlot = Slot;
1058 // If this is the rematerializable definition MI itself and
1059 // all of its uses are rematerialized, simply delete it.
1060 if (MI == ReMatOrigDefMI && CanDelete) {
1061 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1063 RemoveMachineInstrFromMaps(MI);
1064 vrm.RemoveMachineInstrFromMaps(MI);
1065 MI->eraseFromParent();
1069 // If def for this use can't be rematerialized, then try folding.
1070 // If def is rematerializable and it's a load, also try folding.
1071 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1073 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1079 // Scan all of the operands of this instruction rewriting operands
1080 // to use NewVReg instead of li.reg as appropriate. We do this for
1083 // 1. If the instr reads the same spilled vreg multiple times, we
1084 // want to reuse the NewVReg.
1085 // 2. If the instr is a two-addr instruction, we are required to
1086 // keep the src/dst regs pinned.
1088 // Keep track of whether we replace a use and/or def so that we can
1089 // create the spill interval with the appropriate range.
1091 HasUse = mop.isUse();
1092 HasDef = mop.isDef();
1093 SmallVector<unsigned, 2> Ops;
1095 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1096 const MachineOperand &MOj = MI->getOperand(j);
1099 unsigned RegJ = MOj.getReg();
1100 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1104 if (!MOj.isUndef()) {
1105 HasUse |= MOj.isUse();
1106 HasDef |= MOj.isDef();
1111 // Create a new virtual register for the spill interval.
1112 // Create the new register now so we can map the fold instruction
1113 // to the new register so when it is unfolded we get the correct
1115 bool CreatedNewVReg = false;
1117 NewVReg = mri_->createVirtualRegister(rc);
1119 CreatedNewVReg = true;
1121 // The new virtual register should get the same allocation hints as the
1123 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1124 if (Hint.first || Hint.second)
1125 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1131 // Do not fold load / store here if we are splitting. We'll find an
1132 // optimal point to insert a load / store later.
1134 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1135 Ops, FoldSS, FoldSlot, NewVReg)) {
1136 // Folding the load/store can completely change the instruction in
1137 // unpredictable ways, rescan it from the beginning.
1140 // We need to give the new vreg the same stack slot as the
1141 // spilled interval.
1142 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1148 if (isNotInMIMap(MI))
1150 goto RestartInstruction;
1153 // We'll try to fold it later if it's profitable.
1154 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1158 mop.setReg(NewVReg);
1159 if (mop.isImplicit())
1160 rewriteImplicitOps(li, MI, NewVReg, vrm);
1162 // Reuse NewVReg for other reads.
1163 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1164 MachineOperand &mopj = MI->getOperand(Ops[j]);
1165 mopj.setReg(NewVReg);
1166 if (mopj.isImplicit())
1167 rewriteImplicitOps(li, MI, NewVReg, vrm);
1170 if (CreatedNewVReg) {
1172 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1173 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1174 // Each valnum may have its own remat id.
1175 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1177 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1179 if (!CanDelete || (HasUse && HasDef)) {
1180 // If this is a two-addr instruction then its use operands are
1181 // rematerializable but its def is not. It should be assigned a
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1186 vrm.assignVirt2StackSlot(NewVReg, Slot);
1188 } else if (HasUse && HasDef &&
1189 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1190 // If this interval hasn't been assigned a stack slot (because earlier
1191 // def is a deleted remat def), do it now.
1192 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1193 vrm.assignVirt2StackSlot(NewVReg, Slot);
1196 // Re-matting an instruction with virtual register use. Add the
1197 // register as an implicit use on the use MI.
1198 if (DefIsReMat && ImpUse)
1199 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1201 // Create a new register interval for this spill / remat.
1202 LiveInterval &nI = getOrCreateInterval(NewVReg);
1203 if (CreatedNewVReg) {
1204 NewLIs.push_back(&nI);
1205 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1207 vrm.setIsSplitFromReg(NewVReg, li.reg);
1211 if (CreatedNewVReg) {
1212 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1213 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1214 DEBUG(errs() << " +" << LR);
1217 // Extend the split live interval to this def / use.
1218 SlotIndex End = index.getDefIndex();
1219 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1220 nI.getValNumInfo(nI.getNumValNums()-1));
1221 DEBUG(errs() << " +" << LR);
1226 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1227 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1228 DEBUG(errs() << " +" << LR);
1233 errs() << "\t\t\t\tAdded new interval: ";
1234 nI.print(errs(), tri_);
1240 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1242 MachineBasicBlock *MBB,
1243 SlotIndex Idx) const {
1244 SlotIndex End = getMBBEndIdx(MBB);
1245 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1246 if (VNI->kills[j].isPHI())
1249 SlotIndex KillIdx = VNI->kills[j];
1250 if (KillIdx > Idx && KillIdx < End)
1256 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1257 /// during spilling.
1259 struct RewriteInfo {
1264 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1265 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1268 struct RewriteInfoCompare {
1269 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1270 return LHS.Index < RHS.Index;
1275 void LiveIntervals::
1276 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1277 LiveInterval::Ranges::const_iterator &I,
1278 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1279 unsigned Slot, int LdSlot,
1280 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1282 const TargetRegisterClass* rc,
1283 SmallVector<int, 4> &ReMatIds,
1284 const MachineLoopInfo *loopInfo,
1285 BitVector &SpillMBBs,
1286 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1287 BitVector &RestoreMBBs,
1288 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1289 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1290 std::vector<LiveInterval*> &NewLIs) {
1291 bool AllCanFold = true;
1292 unsigned NewVReg = 0;
1293 SlotIndex start = I->start.getBaseIndex();
1294 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1296 // First collect all the def / use in this live range that will be rewritten.
1297 // Make sure they are sorted according to instruction index.
1298 std::vector<RewriteInfo> RewriteMIs;
1299 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1300 re = mri_->reg_end(); ri != re; ) {
1301 MachineInstr *MI = &*ri;
1302 MachineOperand &O = ri.getOperand();
1304 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1305 SlotIndex index = getInstructionIndex(MI);
1306 if (index < start || index >= end)
1310 // Must be defined by an implicit def. It should not be spilled. Note,
1311 // this is for correctness reason. e.g.
1312 // 8 %reg1024<def> = IMPLICIT_DEF
1313 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1314 // The live range [12, 14) are not part of the r1024 live interval since
1315 // it's defined by an implicit def. It will not conflicts with live
1316 // interval of r1025. Now suppose both registers are spilled, you can
1317 // easily see a situation where both registers are reloaded before
1318 // the INSERT_SUBREG and both target registers that would overlap.
1320 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1322 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1324 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1325 // Now rewrite the defs and uses.
1326 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1327 RewriteInfo &rwi = RewriteMIs[i];
1329 SlotIndex index = rwi.Index;
1330 bool MIHasUse = rwi.HasUse;
1331 bool MIHasDef = rwi.HasDef;
1332 MachineInstr *MI = rwi.MI;
1333 // If MI def and/or use the same register multiple times, then there
1334 // are multiple entries.
1335 unsigned NumUses = MIHasUse;
1336 while (i != e && RewriteMIs[i].MI == MI) {
1337 assert(RewriteMIs[i].Index == index);
1338 bool isUse = RewriteMIs[i].HasUse;
1339 if (isUse) ++NumUses;
1341 MIHasDef |= RewriteMIs[i].HasDef;
1344 MachineBasicBlock *MBB = MI->getParent();
1346 if (ImpUse && MI != ReMatDefMI) {
1347 // Re-matting an instruction with virtual register use. Update the
1348 // register interval's spill weight to HUGE_VALF to prevent it from
1350 LiveInterval &ImpLi = getInterval(ImpUse);
1351 ImpLi.weight = HUGE_VALF;
1354 unsigned MBBId = MBB->getNumber();
1355 unsigned ThisVReg = 0;
1357 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1358 if (NVI != MBBVRegsMap.end()) {
1359 ThisVReg = NVI->second;
1366 // It's better to start a new interval to avoid artifically
1367 // extend the new interval.
1368 if (MIHasDef && !MIHasUse) {
1369 MBBVRegsMap.erase(MBB->getNumber());
1375 bool IsNew = ThisVReg == 0;
1377 // This ends the previous live interval. If all of its def / use
1378 // can be folded, give it a low spill weight.
1379 if (NewVReg && TrySplit && AllCanFold) {
1380 LiveInterval &nI = getOrCreateInterval(NewVReg);
1387 bool HasDef = false;
1388 bool HasUse = false;
1389 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1390 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1391 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1392 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1393 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1394 if (!HasDef && !HasUse)
1397 AllCanFold &= CanFold;
1399 // Update weight of spill interval.
1400 LiveInterval &nI = getOrCreateInterval(NewVReg);
1402 // The spill weight is now infinity as it cannot be spilled again.
1403 nI.weight = HUGE_VALF;
1407 // Keep track of the last def and first use in each MBB.
1409 if (MI != ReMatOrigDefMI || !CanDelete) {
1410 bool HasKill = false;
1412 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1414 // If this is a two-address code, then this index starts a new VNInfo.
1415 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1417 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1419 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1420 SpillIdxes.find(MBBId);
1422 if (SII == SpillIdxes.end()) {
1423 std::vector<SRInfo> S;
1424 S.push_back(SRInfo(index, NewVReg, true));
1425 SpillIdxes.insert(std::make_pair(MBBId, S));
1426 } else if (SII->second.back().vreg != NewVReg) {
1427 SII->second.push_back(SRInfo(index, NewVReg, true));
1428 } else if (index > SII->second.back().index) {
1429 // If there is an earlier def and this is a two-address
1430 // instruction, then it's not possible to fold the store (which
1431 // would also fold the load).
1432 SRInfo &Info = SII->second.back();
1434 Info.canFold = !HasUse;
1436 SpillMBBs.set(MBBId);
1437 } else if (SII != SpillIdxes.end() &&
1438 SII->second.back().vreg == NewVReg &&
1439 index > SII->second.back().index) {
1440 // There is an earlier def that's not killed (must be two-address).
1441 // The spill is no longer needed.
1442 SII->second.pop_back();
1443 if (SII->second.empty()) {
1444 SpillIdxes.erase(MBBId);
1445 SpillMBBs.reset(MBBId);
1452 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1453 SpillIdxes.find(MBBId);
1454 if (SII != SpillIdxes.end() &&
1455 SII->second.back().vreg == NewVReg &&
1456 index > SII->second.back().index)
1457 // Use(s) following the last def, it's not safe to fold the spill.
1458 SII->second.back().canFold = false;
1459 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1460 RestoreIdxes.find(MBBId);
1461 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1462 // If we are splitting live intervals, only fold if it's the first
1463 // use and there isn't another use later in the MBB.
1464 RII->second.back().canFold = false;
1466 // Only need a reload if there isn't an earlier def / use.
1467 if (RII == RestoreIdxes.end()) {
1468 std::vector<SRInfo> Infos;
1469 Infos.push_back(SRInfo(index, NewVReg, true));
1470 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1472 RII->second.push_back(SRInfo(index, NewVReg, true));
1474 RestoreMBBs.set(MBBId);
1478 // Update spill weight.
1479 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1480 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1483 if (NewVReg && TrySplit && AllCanFold) {
1484 // If all of its def / use can be folded, give it a low spill weight.
1485 LiveInterval &nI = getOrCreateInterval(NewVReg);
1490 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1491 unsigned vr, BitVector &RestoreMBBs,
1492 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1493 if (!RestoreMBBs[Id])
1495 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1496 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1497 if (Restores[i].index == index &&
1498 Restores[i].vreg == vr &&
1499 Restores[i].canFold)
1504 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1505 unsigned vr, BitVector &RestoreMBBs,
1506 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1507 if (!RestoreMBBs[Id])
1509 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1510 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1511 if (Restores[i].index == index && Restores[i].vreg)
1512 Restores[i].index = SlotIndex();
1515 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1516 /// spilled and create empty intervals for their uses.
1518 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1519 const TargetRegisterClass* rc,
1520 std::vector<LiveInterval*> &NewLIs) {
1521 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1522 re = mri_->reg_end(); ri != re; ) {
1523 MachineOperand &O = ri.getOperand();
1524 MachineInstr *MI = &*ri;
1527 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1528 "Register def was not rewritten?");
1529 RemoveMachineInstrFromMaps(MI);
1530 vrm.RemoveMachineInstrFromMaps(MI);
1531 MI->eraseFromParent();
1533 // This must be an use of an implicit_def so it's not part of the live
1534 // interval. Create a new empty live interval for it.
1535 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1536 unsigned NewVReg = mri_->createVirtualRegister(rc);
1538 vrm.setIsImplicitlyDefined(NewVReg);
1539 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1540 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1541 MachineOperand &MO = MI->getOperand(i);
1542 if (MO.isReg() && MO.getReg() == li.reg) {
1551 std::vector<LiveInterval*> LiveIntervals::
1552 addIntervalsForSpillsFast(const LiveInterval &li,
1553 const MachineLoopInfo *loopInfo,
1555 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1557 std::vector<LiveInterval*> added;
1559 assert(li.weight != HUGE_VALF &&
1560 "attempt to spill already spilled interval!");
1563 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1568 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1570 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1571 while (RI != mri_->reg_end()) {
1572 MachineInstr* MI = &*RI;
1574 SmallVector<unsigned, 2> Indices;
1575 bool HasUse = false;
1576 bool HasDef = false;
1578 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1579 MachineOperand& mop = MI->getOperand(i);
1580 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1582 HasUse |= MI->getOperand(i).isUse();
1583 HasDef |= MI->getOperand(i).isDef();
1585 Indices.push_back(i);
1588 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1589 Indices, true, slot, li.reg)) {
1590 unsigned NewVReg = mri_->createVirtualRegister(rc);
1592 vrm.assignVirt2StackSlot(NewVReg, slot);
1594 // create a new register for this spill
1595 LiveInterval &nI = getOrCreateInterval(NewVReg);
1597 // the spill weight is now infinity as it
1598 // cannot be spilled again
1599 nI.weight = HUGE_VALF;
1601 // Rewrite register operands to use the new vreg.
1602 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1603 E = Indices.end(); I != E; ++I) {
1604 MI->getOperand(*I).setReg(NewVReg);
1606 if (MI->getOperand(*I).isUse())
1607 MI->getOperand(*I).setIsKill(true);
1610 // Fill in the new live interval.
1611 SlotIndex index = getInstructionIndex(MI);
1613 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1614 nI.getNextValue(SlotIndex(), 0, false,
1615 getVNInfoAllocator()));
1616 DEBUG(errs() << " +" << LR);
1618 vrm.addRestorePoint(NewVReg, MI);
1621 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1622 nI.getNextValue(SlotIndex(), 0, false,
1623 getVNInfoAllocator()));
1624 DEBUG(errs() << " +" << LR);
1626 vrm.addSpillPoint(NewVReg, true, MI);
1629 added.push_back(&nI);
1632 errs() << "\t\t\t\tadded new interval: ";
1639 RI = mri_->reg_begin(li.reg);
1645 std::vector<LiveInterval*> LiveIntervals::
1646 addIntervalsForSpills(const LiveInterval &li,
1647 SmallVectorImpl<LiveInterval*> &SpillIs,
1648 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1650 if (EnableFastSpilling)
1651 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1653 assert(li.weight != HUGE_VALF &&
1654 "attempt to spill already spilled interval!");
1657 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1658 li.print(errs(), tri_);
1662 // Each bit specify whether a spill is required in the MBB.
1663 BitVector SpillMBBs(mf_->getNumBlockIDs());
1664 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1665 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1666 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1667 DenseMap<unsigned,unsigned> MBBVRegsMap;
1668 std::vector<LiveInterval*> NewLIs;
1669 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1671 unsigned NumValNums = li.getNumValNums();
1672 SmallVector<MachineInstr*, 4> ReMatDefs;
1673 ReMatDefs.resize(NumValNums, NULL);
1674 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1675 ReMatOrigDefs.resize(NumValNums, NULL);
1676 SmallVector<int, 4> ReMatIds;
1677 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1678 BitVector ReMatDelete(NumValNums);
1679 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1681 // Spilling a split live interval. It cannot be split any further. Also,
1682 // it's also guaranteed to be a single val# / range interval.
1683 if (vrm.getPreSplitReg(li.reg)) {
1684 vrm.setIsSplitFromReg(li.reg, 0);
1685 // Unset the split kill marker on the last use.
1686 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1687 if (KillIdx != SlotIndex()) {
1688 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1689 assert(KillMI && "Last use disappeared?");
1690 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1691 assert(KillOp != -1 && "Last use disappeared?");
1692 KillMI->getOperand(KillOp).setIsKill(false);
1694 vrm.removeKillPoint(li.reg);
1695 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1696 Slot = vrm.getStackSlot(li.reg);
1697 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1698 MachineInstr *ReMatDefMI = DefIsReMat ?
1699 vrm.getReMaterializedMI(li.reg) : NULL;
1701 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1702 bool isLoad = isLoadSS ||
1703 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1704 bool IsFirstRange = true;
1705 for (LiveInterval::Ranges::const_iterator
1706 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1707 // If this is a split live interval with multiple ranges, it means there
1708 // are two-address instructions that re-defined the value. Only the
1709 // first def can be rematerialized!
1711 // Note ReMatOrigDefMI has already been deleted.
1712 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1713 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1714 false, vrm, rc, ReMatIds, loopInfo,
1715 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1716 MBBVRegsMap, NewLIs);
1718 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1719 Slot, 0, false, false, false,
1720 false, vrm, rc, ReMatIds, loopInfo,
1721 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1722 MBBVRegsMap, NewLIs);
1724 IsFirstRange = false;
1727 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1731 bool TrySplit = !intervalIsInOneMBB(li);
1734 bool NeedStackSlot = false;
1735 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1737 const VNInfo *VNI = *i;
1738 unsigned VN = VNI->id;
1739 if (VNI->isUnused())
1740 continue; // Dead val#.
1741 // Is the def for the val# rematerializable?
1742 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1743 ? getInstructionFromIndex(VNI->def) : 0;
1745 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1746 // Remember how to remat the def of this val#.
1747 ReMatOrigDefs[VN] = ReMatDefMI;
1748 // Original def may be modified so we have to make a copy here.
1749 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1750 CloneMIs.push_back(Clone);
1751 ReMatDefs[VN] = Clone;
1753 bool CanDelete = true;
1754 if (VNI->hasPHIKill()) {
1755 // A kill is a phi node, not all of its uses can be rematerialized.
1756 // It must not be deleted.
1758 // Need a stack slot if there is any live range where uses cannot be
1760 NeedStackSlot = true;
1763 ReMatDelete.set(VN);
1765 // Need a stack slot if there is any live range where uses cannot be
1767 NeedStackSlot = true;
1771 // One stack slot per live interval.
1772 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1773 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1774 Slot = vrm.assignVirt2StackSlot(li.reg);
1776 // This case only occurs when the prealloc splitter has already assigned
1777 // a stack slot to this vreg.
1779 Slot = vrm.getStackSlot(li.reg);
1782 // Create new intervals and rewrite defs and uses.
1783 for (LiveInterval::Ranges::const_iterator
1784 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1785 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1786 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1787 bool DefIsReMat = ReMatDefMI != NULL;
1788 bool CanDelete = ReMatDelete[I->valno->id];
1790 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1791 bool isLoad = isLoadSS ||
1792 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1793 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1794 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1795 CanDelete, vrm, rc, ReMatIds, loopInfo,
1796 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1797 MBBVRegsMap, NewLIs);
1800 // Insert spills / restores if we are splitting.
1802 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1806 SmallPtrSet<LiveInterval*, 4> AddedKill;
1807 SmallVector<unsigned, 2> Ops;
1808 if (NeedStackSlot) {
1809 int Id = SpillMBBs.find_first();
1811 std::vector<SRInfo> &spills = SpillIdxes[Id];
1812 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1813 SlotIndex index = spills[i].index;
1814 unsigned VReg = spills[i].vreg;
1815 LiveInterval &nI = getOrCreateInterval(VReg);
1816 bool isReMat = vrm.isReMaterialized(VReg);
1817 MachineInstr *MI = getInstructionFromIndex(index);
1818 bool CanFold = false;
1819 bool FoundUse = false;
1821 if (spills[i].canFold) {
1823 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1824 MachineOperand &MO = MI->getOperand(j);
1825 if (!MO.isReg() || MO.getReg() != VReg)
1832 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1833 RestoreMBBs, RestoreIdxes))) {
1834 // MI has two-address uses of the same register. If the use
1835 // isn't the first and only use in the BB, then we can't fold
1836 // it. FIXME: Move this to rewriteInstructionsForSpills.
1843 // Fold the store into the def if possible.
1844 bool Folded = false;
1845 if (CanFold && !Ops.empty()) {
1846 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1849 // Also folded uses, do not issue a load.
1850 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1851 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1853 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1857 // Otherwise tell the spiller to issue a spill.
1859 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1860 bool isKill = LR->end == index.getStoreIndex();
1861 if (!MI->registerDefIsDead(nI.reg))
1862 // No need to spill a dead def.
1863 vrm.addSpillPoint(VReg, isKill, MI);
1865 AddedKill.insert(&nI);
1868 Id = SpillMBBs.find_next(Id);
1872 int Id = RestoreMBBs.find_first();
1874 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1875 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1876 SlotIndex index = restores[i].index;
1877 if (index == SlotIndex())
1879 unsigned VReg = restores[i].vreg;
1880 LiveInterval &nI = getOrCreateInterval(VReg);
1881 bool isReMat = vrm.isReMaterialized(VReg);
1882 MachineInstr *MI = getInstructionFromIndex(index);
1883 bool CanFold = false;
1885 if (restores[i].canFold) {
1887 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1888 MachineOperand &MO = MI->getOperand(j);
1889 if (!MO.isReg() || MO.getReg() != VReg)
1893 // If this restore were to be folded, it would have been folded
1902 // Fold the load into the use if possible.
1903 bool Folded = false;
1904 if (CanFold && !Ops.empty()) {
1906 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1908 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1910 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1911 // If the rematerializable def is a load, also try to fold it.
1912 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1913 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1914 Ops, isLoadSS, LdSlot, VReg);
1916 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1918 // Re-matting an instruction with virtual register use. Add the
1919 // register as an implicit use on the use MI and update the register
1920 // interval's spill weight to HUGE_VALF to prevent it from being
1922 LiveInterval &ImpLi = getInterval(ImpUse);
1923 ImpLi.weight = HUGE_VALF;
1924 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1929 // If folding is not possible / failed, then tell the spiller to issue a
1930 // load / rematerialization for us.
1932 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1934 vrm.addRestorePoint(VReg, MI);
1936 Id = RestoreMBBs.find_next(Id);
1939 // Finalize intervals: add kills, finalize spill weights, and filter out
1941 std::vector<LiveInterval*> RetNewLIs;
1942 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1943 LiveInterval *LI = NewLIs[i];
1945 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1946 if (!AddedKill.count(LI)) {
1947 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1948 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1949 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1950 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1951 assert(UseIdx != -1);
1952 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1953 LastUse->getOperand(UseIdx).setIsKill();
1954 vrm.addKillPoint(LI->reg, LastUseIdx);
1957 RetNewLIs.push_back(LI);
1961 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1965 /// hasAllocatableSuperReg - Return true if the specified physical register has
1966 /// any super register that's allocatable.
1967 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1968 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1969 if (allocatableRegs_[*AS] && hasInterval(*AS))
1974 /// getRepresentativeReg - Find the largest super register of the specified
1975 /// physical register.
1976 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1977 // Find the largest super-register that is allocatable.
1978 unsigned BestReg = Reg;
1979 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1980 unsigned SuperReg = *AS;
1981 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1989 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1990 /// specified interval that conflicts with the specified physical register.
1991 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1992 unsigned PhysReg) const {
1993 unsigned NumConflicts = 0;
1994 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1995 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1996 E = mri_->reg_end(); I != E; ++I) {
1997 MachineOperand &O = I.getOperand();
1998 MachineInstr *MI = O.getParent();
1999 SlotIndex Index = getInstructionIndex(MI);
2000 if (pli.liveAt(Index))
2003 return NumConflicts;
2006 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2007 /// around all defs and uses of the specified interval. Return true if it
2008 /// was able to cut its interval.
2009 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2010 unsigned PhysReg, VirtRegMap &vrm) {
2011 unsigned SpillReg = getRepresentativeReg(PhysReg);
2013 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2014 // If there are registers which alias PhysReg, but which are not a
2015 // sub-register of the chosen representative super register. Assert
2016 // since we can't handle it yet.
2017 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2018 tri_->isSuperRegister(*AS, SpillReg));
2021 SmallVector<unsigned, 4> PRegs;
2022 if (hasInterval(SpillReg))
2023 PRegs.push_back(SpillReg);
2025 SmallSet<unsigned, 4> Added;
2026 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2027 if (Added.insert(*AS) && hasInterval(*AS)) {
2028 PRegs.push_back(*AS);
2029 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2034 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2035 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2036 E = mri_->reg_end(); I != E; ++I) {
2037 MachineOperand &O = I.getOperand();
2038 MachineInstr *MI = O.getParent();
2039 if (SeenMIs.count(MI))
2042 SlotIndex Index = getInstructionIndex(MI);
2043 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2044 unsigned PReg = PRegs[i];
2045 LiveInterval &pli = getInterval(PReg);
2046 if (!pli.liveAt(Index))
2048 vrm.addEmergencySpill(PReg, MI);
2049 SlotIndex StartIdx = Index.getLoadIndex();
2050 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2051 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2052 pli.removeRange(StartIdx, EndIdx);
2056 raw_string_ostream Msg(msg);
2057 Msg << "Ran out of registers during register allocation!";
2058 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2059 Msg << "\nPlease check your inline asm statement for invalid "
2060 << "constraints:\n";
2061 MI->print(Msg, tm_);
2063 llvm_report_error(Msg.str());
2065 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2066 if (!hasInterval(*AS))
2068 LiveInterval &spli = getInterval(*AS);
2069 if (spli.liveAt(Index))
2070 spli.removeRange(Index.getLoadIndex(),
2071 Index.getNextIndex().getBaseIndex());
2078 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2079 MachineInstr* startInst) {
2080 LiveInterval& Interval = getOrCreateInterval(reg);
2081 VNInfo* VN = Interval.getNextValue(
2082 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2083 startInst, true, getVNInfoAllocator());
2084 VN->setHasPHIKill(true);
2085 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2087 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2088 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2089 Interval.addRange(LR);