1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "regalloc"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/Value.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/Target/TargetRegisterInfo.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/ADT/DenseSet.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 STATISTIC(numIntervals , "Number of original intervals");
47 char LiveIntervals::ID = 0;
48 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
49 "Live Interval Analysis", false, false)
50 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
51 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
52 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
53 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
54 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
55 "Live Interval Analysis", false, false)
57 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
59 AU.addRequired<AliasAnalysis>();
60 AU.addPreserved<AliasAnalysis>();
61 AU.addRequired<LiveVariables>();
62 AU.addPreserved<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreserved<SlotIndexes>();
66 AU.addRequiredTransitive<SlotIndexes>();
67 MachineFunctionPass::getAnalysisUsage(AU);
70 void LiveIntervals::releaseMemory() {
71 // Free the live intervals themselves.
72 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
73 E = r2iMap_.end(); I != E; ++I)
79 RegMaskBlocks.clear();
81 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
82 VNInfoAllocator.Reset();
85 /// runOnMachineFunction - Register allocate the whole function
87 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
89 mri_ = &mf_->getRegInfo();
90 tm_ = &fn.getTarget();
91 tri_ = tm_->getRegisterInfo();
92 tii_ = tm_->getInstrInfo();
93 aa_ = &getAnalysis<AliasAnalysis>();
94 lv_ = &getAnalysis<LiveVariables>();
95 indexes_ = &getAnalysis<SlotIndexes>();
96 allocatableRegs_ = tri_->getAllocatableSet(fn);
100 numIntervals += getNumIntervals();
106 /// print - Implement the dump method.
107 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
108 OS << "********** INTERVALS **********\n";
109 for (const_iterator I = begin(), E = end(); I != E; ++I) {
110 I->second->print(OS, tri_);
117 void LiveIntervals::printInstrs(raw_ostream &OS) const {
118 OS << "********** MACHINEINSTRS **********\n";
119 mf_->print(OS, indexes_);
122 void LiveIntervals::dumpInstrs() const {
127 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
128 unsigned Reg = MI.getOperand(MOIdx).getReg();
129 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
130 const MachineOperand &MO = MI.getOperand(i);
133 if (MO.getReg() == Reg && MO.isDef()) {
134 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
135 MI.getOperand(MOIdx).getSubReg() &&
136 (MO.getSubReg() || MO.isImplicit()));
143 /// isPartialRedef - Return true if the specified def at the specific index is
144 /// partially re-defining the specified live interval. A common case of this is
145 /// a definition of the sub-register.
146 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
147 LiveInterval &interval) {
148 if (!MO.getSubReg() || MO.isEarlyClobber())
151 SlotIndex RedefIndex = MIIdx.getRegSlot();
152 const LiveRange *OldLR =
153 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
154 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
156 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
161 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
162 MachineBasicBlock::iterator mi,
166 LiveInterval &interval) {
167 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
169 // Virtual registers may be defined multiple times (due to phi
170 // elimination and 2-addr elimination). Much of what we do only has to be
171 // done once for the vreg. We use an empty interval to detect the first
172 // time we see a vreg.
173 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
174 if (interval.empty()) {
175 // Get the Idx of the defining instructions.
176 SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
178 // Make sure the first definition is not a partial redefinition. Add an
179 // <imp-def> of the full register.
180 // FIXME: LiveIntervals shouldn't modify the code like this. Whoever
181 // created the machine instruction should annotate it with <undef> flags
182 // as needed. Then we can simply assert here. The REG_SEQUENCE lowering
183 // is the main suspect.
184 if (MO.getSubReg()) {
185 mi->addRegisterDefined(interval.reg);
186 // Mark all defs of interval.reg on this instruction as reading <undef>.
187 for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
188 MachineOperand &MO2 = mi->getOperand(i);
189 if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
194 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
195 assert(ValNo->id == 0 && "First value in interval is not 0?");
197 // Loop over all of the blocks that the vreg is defined in. There are
198 // two cases we have to handle here. The most common case is a vreg
199 // whose lifetime is contained within a basic block. In this case there
200 // will be a single kill, in MBB, which comes after the definition.
201 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
202 // FIXME: what about dead vars?
204 if (vi.Kills[0] != mi)
205 killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
207 killIdx = defIndex.getDeadSlot();
209 // If the kill happens after the definition, we have an intra-block
211 if (killIdx > defIndex) {
212 assert(vi.AliveBlocks.empty() &&
213 "Shouldn't be alive across any blocks!");
214 LiveRange LR(defIndex, killIdx, ValNo);
215 interval.addRange(LR);
216 DEBUG(dbgs() << " +" << LR << "\n");
221 // The other case we handle is when a virtual register lives to the end
222 // of the defining block, potentially live across some blocks, then is
223 // live into some number of blocks, but gets killed. Start by adding a
224 // range that goes from this definition to the end of the defining block.
225 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
226 DEBUG(dbgs() << " +" << NewLR);
227 interval.addRange(NewLR);
229 bool PHIJoin = lv_->isPHIJoin(interval.reg);
232 // A phi join register is killed at the end of the MBB and revived as a new
233 // valno in the killing blocks.
234 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
235 DEBUG(dbgs() << " phi-join");
236 ValNo->setHasPHIKill(true);
238 // Iterate over all of the blocks that the variable is completely
239 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
241 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
242 E = vi.AliveBlocks.end(); I != E; ++I) {
243 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
244 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
245 interval.addRange(LR);
246 DEBUG(dbgs() << " +" << LR);
250 // Finally, this virtual register is live from the start of any killing
251 // block to the 'use' slot of the killing instruction.
252 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
253 MachineInstr *Kill = vi.Kills[i];
254 SlotIndex Start = getMBBStartIdx(Kill->getParent());
255 SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
257 // Create interval with one of a NEW value number. Note that this value
258 // number isn't actually defined by an instruction, weird huh? :)
260 assert(getInstructionFromIndex(Start) == 0 &&
261 "PHI def index points at actual instruction.");
262 ValNo = interval.getNextValue(Start, VNInfoAllocator);
263 ValNo->setIsPHIDef(true);
265 LiveRange LR(Start, killIdx, ValNo);
266 interval.addRange(LR);
267 DEBUG(dbgs() << " +" << LR);
271 if (MultipleDefsBySameMI(*mi, MOIdx))
272 // Multiple defs of the same virtual register by the same instruction.
273 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
274 // This is likely due to elimination of REG_SEQUENCE instructions. Return
275 // here since there is nothing to do.
278 // If this is the second time we see a virtual register definition, it
279 // must be due to phi elimination or two addr elimination. If this is
280 // the result of two address elimination, then the vreg is one of the
281 // def-and-use register operand.
283 // It may also be partial redef like this:
284 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
285 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
286 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
287 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
288 // If this is a two-address definition, then we have already processed
289 // the live range. The only problem is that we didn't realize there
290 // are actually two values in the live interval. Because of this we
291 // need to take the LiveRegion that defines this register and split it
293 SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
295 const LiveRange *OldLR =
296 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
297 VNInfo *OldValNo = OldLR->valno;
298 SlotIndex DefIndex = OldValNo->def.getRegSlot();
300 // Delete the previous value, which should be short and continuous,
301 // because the 2-addr copy must be in the same MBB as the redef.
302 interval.removeRange(DefIndex, RedefIndex);
304 // The new value number (#1) is defined by the instruction we claimed
306 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
308 // Value#0 is now defined by the 2-addr instruction.
309 OldValNo->def = RedefIndex;
311 // Add the new live interval which replaces the range for the input copy.
312 LiveRange LR(DefIndex, RedefIndex, ValNo);
313 DEBUG(dbgs() << " replace range with " << LR);
314 interval.addRange(LR);
316 // If this redefinition is dead, we need to add a dummy unit live
317 // range covering the def slot.
319 interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
323 dbgs() << " RESULT: ";
324 interval.print(dbgs(), tri_);
326 } else if (lv_->isPHIJoin(interval.reg)) {
327 // In the case of PHI elimination, each variable definition is only
328 // live until the end of the block. We've already taken care of the
329 // rest of the live range.
331 SlotIndex defIndex = MIIdx.getRegSlot();
332 if (MO.isEarlyClobber())
333 defIndex = MIIdx.getRegSlot(true);
335 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
337 SlotIndex killIndex = getMBBEndIdx(mbb);
338 LiveRange LR(defIndex, killIndex, ValNo);
339 interval.addRange(LR);
340 ValNo->setHasPHIKill(true);
341 DEBUG(dbgs() << " phi-join +" << LR);
343 llvm_unreachable("Multiply defined register");
347 DEBUG(dbgs() << '\n');
350 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
351 MachineBasicBlock::iterator mi,
354 LiveInterval &interval) {
355 // A physical register cannot be live across basic block, so its
356 // lifetime must end somewhere in its defining basic block.
357 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
359 SlotIndex baseIndex = MIIdx;
360 SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber());
361 SlotIndex end = start;
363 // If it is not used after definition, it is considered dead at
364 // the instruction defining it. Hence its interval is:
365 // [defSlot(def), defSlot(def)+1)
366 // For earlyclobbers, the defSlot was pushed back one; the extra
367 // advance below compensates.
369 DEBUG(dbgs() << " dead");
370 end = start.getDeadSlot();
374 // If it is not dead on definition, it must be killed by a
375 // subsequent instruction. Hence its interval is:
376 // [defSlot(def), useSlot(kill)+1)
377 baseIndex = baseIndex.getNextIndex();
378 while (++mi != MBB->end()) {
380 if (mi->isDebugValue())
382 if (getInstructionFromIndex(baseIndex) == 0)
383 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
385 if (mi->killsRegister(interval.reg, tri_)) {
386 DEBUG(dbgs() << " killed");
387 end = baseIndex.getRegSlot();
390 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
392 if (mi->isRegTiedToUseOperand(DefIdx)) {
393 // Two-address instruction.
394 end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber());
396 // Another instruction redefines the register before it is ever read.
397 // Then the register is essentially dead at the instruction that
398 // defines it. Hence its interval is:
399 // [defSlot(def), defSlot(def)+1)
400 DEBUG(dbgs() << " dead");
401 end = start.getDeadSlot();
407 baseIndex = baseIndex.getNextIndex();
410 // The only case we should have a dead physreg here without a killing or
411 // instruction where we know it's dead is if it is live-in to the function
412 // and never used. Another possible case is the implicit use of the
413 // physical register has been deleted by two-address pass.
414 end = start.getDeadSlot();
417 assert(start < end && "did not find end of interval?");
419 // Already exists? Extend old live interval.
420 VNInfo *ValNo = interval.getVNInfoAt(start);
421 bool Extend = ValNo != 0;
423 ValNo = interval.getNextValue(start, VNInfoAllocator);
424 LiveRange LR(start, end, ValNo);
425 interval.addRange(LR);
426 DEBUG(dbgs() << " +" << LR << '\n');
429 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
430 MachineBasicBlock::iterator MI,
434 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
435 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
436 getOrCreateInterval(MO.getReg()));
438 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
439 getOrCreateInterval(MO.getReg()));
442 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
444 LiveInterval &interval) {
445 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
447 // Look for kills, if it reaches a def before it's killed, then it shouldn't
448 // be considered a livein.
449 MachineBasicBlock::iterator mi = MBB->begin();
450 MachineBasicBlock::iterator E = MBB->end();
451 // Skip over DBG_VALUE at the start of the MBB.
452 if (mi != E && mi->isDebugValue()) {
453 while (++mi != E && mi->isDebugValue())
456 // MBB is empty except for DBG_VALUE's.
460 SlotIndex baseIndex = MIIdx;
461 SlotIndex start = baseIndex;
462 if (getInstructionFromIndex(baseIndex) == 0)
463 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
465 SlotIndex end = baseIndex;
466 bool SeenDefUse = false;
469 if (mi->killsRegister(interval.reg, tri_)) {
470 DEBUG(dbgs() << " killed");
471 end = baseIndex.getRegSlot();
474 } else if (mi->definesRegister(interval.reg, tri_)) {
475 // Another instruction redefines the register before it is ever read.
476 // Then the register is essentially dead at the instruction that defines
477 // it. Hence its interval is:
478 // [defSlot(def), defSlot(def)+1)
479 DEBUG(dbgs() << " dead");
480 end = start.getDeadSlot();
485 while (++mi != E && mi->isDebugValue())
486 // Skip over DBG_VALUE.
489 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
492 // Live-in register might not be used at all.
494 DEBUG(dbgs() << " live through");
495 end = getMBBEndIdx(MBB);
498 SlotIndex defIdx = getMBBStartIdx(MBB);
499 assert(getInstructionFromIndex(defIdx) == 0 &&
500 "PHI def index points at actual instruction.");
501 VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator);
502 vni->setIsPHIDef(true);
503 LiveRange LR(start, end, vni);
505 interval.addRange(LR);
506 DEBUG(dbgs() << " +" << LR << '\n');
509 /// computeIntervals - computes the live intervals for virtual
510 /// registers. for some ordering of the machine instructions [1,N] a
511 /// live interval is an interval [i, j) where 1 <= i <= j < N for
512 /// which a variable is live
513 void LiveIntervals::computeIntervals() {
514 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
515 << "********** Function: "
516 << ((Value*)mf_->getFunction())->getName() << '\n');
518 RegMaskBlocks.resize(mf_->getNumBlockIDs());
520 SmallVector<unsigned, 8> UndefUses;
521 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
523 MachineBasicBlock *MBB = MBBI;
524 RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
529 // Track the index of the current machine instr.
530 SlotIndex MIIndex = getMBBStartIdx(MBB);
531 DEBUG(dbgs() << "BB#" << MBB->getNumber()
532 << ":\t\t# derived from " << MBB->getName() << "\n");
534 // Create intervals for live-ins to this BB first.
535 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
536 LE = MBB->livein_end(); LI != LE; ++LI) {
537 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
540 // Skip over empty initial indices.
541 if (getInstructionFromIndex(MIIndex) == 0)
542 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
544 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
546 DEBUG(dbgs() << MIIndex << "\t" << *MI);
547 if (MI->isDebugValue())
549 assert(indexes_->getInstructionFromIndex(MIIndex) == MI &&
550 "Lost SlotIndex synchronization");
553 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
554 MachineOperand &MO = MI->getOperand(i);
556 // Collect register masks.
557 if (MO.isRegMask()) {
558 RegMaskSlots.push_back(MIIndex.getRegSlot());
559 RegMaskBits.push_back(MO.getRegMask());
563 if (!MO.isReg() || !MO.getReg())
566 // handle register defs - build intervals
568 handleRegisterDef(MBB, MI, MIIndex, MO, i);
569 else if (MO.isUndef())
570 UndefUses.push_back(MO.getReg());
573 // Move to the next instr slot.
574 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
577 // Compute the number of register mask instructions in this block.
578 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
579 RMB.second = RegMaskSlots.size() - RMB.first;;
582 // Create empty intervals for registers defined by implicit_def's (except
583 // for those implicit_def that define values which are liveout of their
585 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
586 unsigned UndefReg = UndefUses[i];
587 (void)getOrCreateInterval(UndefReg);
591 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
592 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
593 return new LiveInterval(reg, Weight);
596 /// dupInterval - Duplicate a live interval. The caller is responsible for
597 /// managing the allocated memory.
598 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
599 LiveInterval *NewLI = createInterval(li->reg);
600 NewLI->Copy(*li, mri_, getVNInfoAllocator());
604 /// shrinkToUses - After removing some uses of a register, shrink its live
605 /// range to just the remaining uses. This method does not compute reaching
606 /// defs for new uses, and it doesn't remove dead defs.
607 bool LiveIntervals::shrinkToUses(LiveInterval *li,
608 SmallVectorImpl<MachineInstr*> *dead) {
609 DEBUG(dbgs() << "Shrink: " << *li << '\n');
610 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
611 && "Can only shrink virtual registers");
612 // Find all the values used, including PHI kills.
613 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
615 // Blocks that have already been added to WorkList as live-out.
616 SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
618 // Visit all instructions reading li->reg.
619 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
620 MachineInstr *UseMI = I.skipInstruction();) {
621 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
623 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
624 // Note: This intentionally picks up the wrong VNI in case of an EC redef.
626 VNInfo *VNI = li->getVNInfoBefore(Idx);
628 // This shouldn't happen: readsVirtualRegister returns true, but there is
629 // no live value. It is likely caused by a target getting <undef> flags
631 DEBUG(dbgs() << Idx << '\t' << *UseMI
632 << "Warning: Instr claims to read non-existent value in "
636 // Special case: An early-clobber tied operand reads and writes the
637 // register one slot early. The getVNInfoBefore call above would have
638 // picked up the value defined by UseMI. Adjust the kill slot and value.
639 if (SlotIndex::isSameInstr(VNI->def, Idx)) {
641 VNI = li->getVNInfoBefore(Idx);
642 assert(VNI && "Early-clobber tied value not available");
644 WorkList.push_back(std::make_pair(Idx, VNI));
647 // Create a new live interval with only minimal live segments per def.
648 LiveInterval NewLI(li->reg, 0);
649 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
654 NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
657 // Keep track of the PHIs that are in use.
658 SmallPtrSet<VNInfo*, 8> UsedPHIs;
660 // Extend intervals to reach all uses in WorkList.
661 while (!WorkList.empty()) {
662 SlotIndex Idx = WorkList.back().first;
663 VNInfo *VNI = WorkList.back().second;
665 const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
666 SlotIndex BlockStart = getMBBStartIdx(MBB);
668 // Extend the live range for VNI to be live at Idx.
669 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
671 assert(ExtVNI == VNI && "Unexpected existing value number");
672 // Is this a PHIDef we haven't seen before?
673 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
675 // The PHI is live, make sure the predecessors are live-out.
676 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
677 PE = MBB->pred_end(); PI != PE; ++PI) {
678 if (!LiveOut.insert(*PI))
680 SlotIndex Stop = getMBBEndIdx(*PI);
681 // A predecessor is not required to have a live-out value for a PHI.
682 if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
683 WorkList.push_back(std::make_pair(Stop, PVNI));
688 // VNI is live-in to MBB.
689 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
690 NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
692 // Make sure VNI is live-out from the predecessors.
693 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
694 PE = MBB->pred_end(); PI != PE; ++PI) {
695 if (!LiveOut.insert(*PI))
697 SlotIndex Stop = getMBBEndIdx(*PI);
698 assert(li->getVNInfoBefore(Stop) == VNI &&
699 "Wrong value out of predecessor");
700 WorkList.push_back(std::make_pair(Stop, VNI));
704 // Handle dead values.
705 bool CanSeparate = false;
706 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
711 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
712 assert(LII != NewLI.end() && "Missing live range for PHI");
713 if (LII->end != VNI->def.getDeadSlot())
715 if (VNI->isPHIDef()) {
716 // This is a dead PHI. Remove it.
717 VNI->setIsUnused(true);
718 NewLI.removeRange(*LII);
719 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
722 // This is a dead def. Make sure the instruction knows.
723 MachineInstr *MI = getInstructionFromIndex(VNI->def);
724 assert(MI && "No instruction defining live value");
725 MI->addRegisterDead(li->reg, tri_);
726 if (dead && MI->allDefsAreDead()) {
727 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
733 // Move the trimmed ranges back.
734 li->ranges.swap(NewLI.ranges);
735 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
740 //===----------------------------------------------------------------------===//
741 // Register allocator hooks.
744 void LiveIntervals::addKillFlags() {
745 for (iterator I = begin(), E = end(); I != E; ++I) {
746 unsigned Reg = I->first;
747 if (TargetRegisterInfo::isPhysicalRegister(Reg))
749 if (mri_->reg_nodbg_empty(Reg))
751 LiveInterval *LI = I->second;
753 // Every instruction that kills Reg corresponds to a live range end point.
754 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
756 // A block index indicates an MBB edge.
757 if (RI->end.isBlock())
759 MachineInstr *MI = getInstructionFromIndex(RI->end);
762 MI->addRegisterKilled(Reg, NULL);
768 static bool intervalRangesSane(const LiveInterval& li) {
773 SlotIndex lastEnd = li.begin()->start;
774 for (LiveInterval::const_iterator lrItr = li.begin(), lrEnd = li.end();
775 lrItr != lrEnd; ++lrItr) {
776 const LiveRange& lr = *lrItr;
777 if (lastEnd > lr.start || lr.start >= lr.end)
786 template <typename DefSetT>
787 static void handleMoveDefs(LiveIntervals& lis, SlotIndex origIdx,
788 SlotIndex miIdx, const DefSetT& defs) {
789 for (typename DefSetT::const_iterator defItr = defs.begin(),
791 defItr != defEnd; ++defItr) {
792 unsigned def = *defItr;
793 LiveInterval& li = lis.getInterval(def);
794 LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
795 assert(lr != 0 && "No range for def?");
796 lr->start = miIdx.getRegSlot();
797 lr->valno->def = miIdx.getRegSlot();
798 assert(intervalRangesSane(li) && "Broke live interval moving def.");
802 template <typename DeadDefSetT>
803 static void handleMoveDeadDefs(LiveIntervals& lis, SlotIndex origIdx,
804 SlotIndex miIdx, const DeadDefSetT& deadDefs) {
805 for (typename DeadDefSetT::const_iterator deadDefItr = deadDefs.begin(),
806 deadDefEnd = deadDefs.end();
807 deadDefItr != deadDefEnd; ++deadDefItr) {
808 unsigned deadDef = *deadDefItr;
809 LiveInterval& li = lis.getInterval(deadDef);
810 LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
811 assert(lr != 0 && "No range for dead def?");
812 assert(lr->start == origIdx.getRegSlot() && "Bad dead range start?");
813 assert(lr->end == origIdx.getDeadSlot() && "Bad dead range end?");
814 assert(lr->valno->def == origIdx.getRegSlot() && "Bad dead valno def.");
816 t.start = miIdx.getRegSlot();
817 t.valno->def = miIdx.getRegSlot();
818 t.end = miIdx.getDeadSlot();
821 assert(intervalRangesSane(li) && "Broke live interval moving dead def.");
825 template <typename ECSetT>
826 static void handleMoveECs(LiveIntervals& lis, SlotIndex origIdx,
827 SlotIndex miIdx, const ECSetT& ecs) {
828 for (typename ECSetT::const_iterator ecItr = ecs.begin(), ecEnd = ecs.end();
829 ecItr != ecEnd; ++ecItr) {
830 unsigned ec = *ecItr;
831 LiveInterval& li = lis.getInterval(ec);
832 LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot(true));
833 assert(lr != 0 && "No range for early clobber?");
834 assert(lr->start == origIdx.getRegSlot(true) && "Bad EC range start?");
835 assert(lr->end == origIdx.getRegSlot() && "Bad EC range end.");
836 assert(lr->valno->def == origIdx.getRegSlot(true) && "Bad EC valno def.");
838 t.start = miIdx.getRegSlot(true);
839 t.valno->def = miIdx.getRegSlot(true);
840 t.end = miIdx.getRegSlot();
843 assert(intervalRangesSane(li) && "Broke live interval moving EC.");
847 static void moveKillFlags(unsigned reg, SlotIndex oldIdx, SlotIndex newIdx,
849 const TargetRegisterInfo& tri) {
850 MachineInstr* oldKillMI = lis.getInstructionFromIndex(oldIdx);
851 MachineInstr* newKillMI = lis.getInstructionFromIndex(newIdx);
852 assert(oldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
853 assert(!newKillMI->killsRegister(reg) && "New kill instr is already a kill.");
854 oldKillMI->clearRegisterKills(reg, &tri);
855 newKillMI->addRegisterKilled(reg, &tri);
858 template <typename UseSetT>
859 static void handleMoveUses(const MachineBasicBlock *mbb,
860 const MachineRegisterInfo& mri,
861 const TargetRegisterInfo& tri,
862 const BitVector& reservedRegs, LiveIntervals &lis,
863 SlotIndex origIdx, SlotIndex miIdx,
864 const UseSetT &uses) {
865 bool movingUp = miIdx < origIdx;
866 for (typename UseSetT::const_iterator usesItr = uses.begin(),
867 usesEnd = uses.end();
868 usesItr != usesEnd; ++usesItr) {
869 unsigned use = *usesItr;
870 if (!lis.hasInterval(use))
872 if (TargetRegisterInfo::isPhysicalRegister(use) && reservedRegs.test(use))
874 LiveInterval& li = lis.getInterval(use);
875 LiveRange* lr = li.getLiveRangeBefore(origIdx.getRegSlot());
876 assert(lr != 0 && "No range for use?");
877 bool liveThrough = lr->end > origIdx.getRegSlot();
880 // If moving up and liveThrough - nothing to do.
881 // If not live through we need to extend the range to the last use
882 // between the old location and the new one.
884 SlotIndex lastUseInRange = miIdx.getRegSlot();
885 for (MachineRegisterInfo::use_iterator useI = mri.use_begin(use),
886 useE = mri.use_end();
887 useI != useE; ++useI) {
888 const MachineInstr* mopI = &*useI;
889 const MachineOperand& mop = useI.getOperand();
890 SlotIndex instSlot = lis.getSlotIndexes()->getInstructionIndex(mopI);
891 SlotIndex opSlot = instSlot.getRegSlot(mop.isEarlyClobber());
892 if (opSlot > lastUseInRange && opSlot < origIdx)
893 lastUseInRange = opSlot;
896 // If we found a new instr endpoint update the kill flags.
897 if (lastUseInRange != miIdx.getRegSlot())
898 moveKillFlags(use, miIdx, lastUseInRange, lis, tri);
900 // Fix up the range end.
901 lr->end = lastUseInRange;
904 // Moving down is easy - the existing live range end tells us where
907 // Easy fix - just update the range endpoint.
908 lr->end = miIdx.getRegSlot();
910 bool liveOut = lr->end >= lis.getSlotIndexes()->getMBBEndIdx(mbb);
911 if (!liveOut && miIdx.getRegSlot() > lr->end) {
912 moveKillFlags(use, lr->end, miIdx, lis, tri);
913 lr->end = miIdx.getRegSlot();
917 assert(intervalRangesSane(li) && "Broke live interval moving use.");
921 void LiveIntervals::moveInstr(MachineBasicBlock::iterator insertPt,
923 MachineBasicBlock* mbb = mi->getParent();
924 assert((insertPt == mbb->end() || insertPt->getParent() == mbb) &&
925 "Cannot handle moves across basic block boundaries.");
926 assert(&*insertPt != mi && "No-op move requested?");
927 assert(!mi->isBundled() && "Can't handle bundled instructions yet.");
929 // Grab the original instruction index.
930 SlotIndex origIdx = indexes_->getInstructionIndex(mi);
932 // Move the machine instr and obtain its new index.
933 indexes_->removeMachineInstrFromMaps(mi);
934 mbb->splice(insertPt, mbb, mi);
935 SlotIndex miIdx = indexes_->insertMachineInstrInMaps(mi);
937 // Pick the direction.
938 bool movingUp = miIdx < origIdx;
940 // Collect the operands.
941 DenseSet<unsigned> uses, defs, deadDefs, ecs;
942 for (MachineInstr::mop_iterator mopItr = mi->operands_begin(),
943 mopEnd = mi->operands_end();
944 mopItr != mopEnd; ++mopItr) {
945 const MachineOperand& mop = *mopItr;
947 if (!mop.isReg() || mop.getReg() == 0)
949 unsigned reg = mop.getReg();
951 if (mop.readsReg() && !ecs.count(reg)) {
956 assert(!defs.count(reg) && "Can't mix defs with dead-defs.");
957 deadDefs.insert(reg);
958 } else if (mop.isEarlyClobber()) {
962 assert(!deadDefs.count(reg) && "Can't mix defs with dead-defs.");
968 BitVector reservedRegs(tri_->getReservedRegs(*mbb->getParent()));
971 handleMoveUses(mbb, *mri_, *tri_, reservedRegs, *this, origIdx, miIdx, uses);
972 handleMoveECs(*this, origIdx, miIdx, ecs);
973 handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
974 handleMoveDefs(*this, origIdx, miIdx, defs);
976 handleMoveDefs(*this, origIdx, miIdx, defs);
977 handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
978 handleMoveECs(*this, origIdx, miIdx, ecs);
979 handleMoveUses(mbb, *mri_, *tri_, reservedRegs, *this, origIdx, miIdx, uses);
983 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
984 /// allow one) virtual register operand, then its uses are implicitly using
985 /// the register. Returns the virtual register.
986 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
987 MachineInstr *MI) const {
989 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
990 MachineOperand &MO = MI->getOperand(i);
991 if (!MO.isReg() || !MO.isUse())
993 unsigned Reg = MO.getReg();
994 if (Reg == 0 || Reg == li.reg)
997 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
998 !allocatableRegs_[Reg])
1000 RegOp = MO.getReg();
1001 break; // Found vreg operand - leave the loop.
1006 /// isValNoAvailableAt - Return true if the val# of the specified interval
1007 /// which reaches the given instruction also reaches the specified use index.
1008 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1009 SlotIndex UseIdx) const {
1010 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
1011 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
1014 /// isReMaterializable - Returns true if the definition MI of the specified
1015 /// val# of the specified interval is re-materializable.
1017 LiveIntervals::isReMaterializable(const LiveInterval &li,
1018 const VNInfo *ValNo, MachineInstr *MI,
1019 const SmallVectorImpl<LiveInterval*> *SpillIs,
1024 if (!tii_->isTriviallyReMaterializable(MI, aa_))
1027 // Target-specific code can mark an instruction as being rematerializable
1028 // if it has one virtual reg use, though it had better be something like
1029 // a PIC base register which is likely to be live everywhere.
1030 unsigned ImpUse = getReMatImplicitUse(li, MI);
1032 const LiveInterval &ImpLi = getInterval(ImpUse);
1033 for (MachineRegisterInfo::use_nodbg_iterator
1034 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
1036 MachineInstr *UseMI = &*ri;
1037 SlotIndex UseIdx = getInstructionIndex(UseMI);
1038 if (li.getVNInfoAt(UseIdx) != ValNo)
1040 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1044 // If a register operand of the re-materialized instruction is going to
1045 // be spilled next, then it's not legal to re-materialize this instruction.
1047 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
1048 if (ImpUse == (*SpillIs)[i]->reg)
1054 /// isReMaterializable - Returns true if every definition of MI of every
1055 /// val# of the specified interval is re-materializable.
1057 LiveIntervals::isReMaterializable(const LiveInterval &li,
1058 const SmallVectorImpl<LiveInterval*> *SpillIs,
1061 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1063 const VNInfo *VNI = *i;
1064 if (VNI->isUnused())
1065 continue; // Dead val#.
1066 // Is the def for the val# rematerializable?
1067 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1070 bool DefIsLoad = false;
1072 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1074 isLoad |= DefIsLoad;
1080 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
1081 // A local live range must be fully contained inside the block, meaning it is
1082 // defined and killed at instructions, not at block boundaries. It is not
1083 // live in or or out of any block.
1085 // It is technically possible to have a PHI-defined live range identical to a
1086 // single block, but we are going to return false in that case.
1088 SlotIndex Start = LI.beginIndex();
1089 if (Start.isBlock())
1092 SlotIndex Stop = LI.endIndex();
1096 // getMBBFromIndex doesn't need to search the MBB table when both indexes
1097 // belong to proper instructions.
1098 MachineBasicBlock *MBB1 = indexes_->getMBBFromIndex(Start);
1099 MachineBasicBlock *MBB2 = indexes_->getMBBFromIndex(Stop);
1100 return MBB1 == MBB2 ? MBB1 : NULL;
1104 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1105 // Limit the loop depth ridiculousness.
1106 if (loopDepth > 200)
1109 // The loop depth is used to roughly estimate the number of times the
1110 // instruction is executed. Something like 10^d is simple, but will quickly
1111 // overflow a float. This expression behaves like 10^d for small d, but is
1112 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1113 // headroom before overflow.
1114 // By the way, powf() might be unavailable here. For consistency,
1115 // We may take pow(double,double).
1116 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1118 return (isDef + isUse) * lc;
1121 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1122 MachineInstr* startInst) {
1123 LiveInterval& Interval = getOrCreateInterval(reg);
1124 VNInfo* VN = Interval.getNextValue(
1125 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
1126 getVNInfoAllocator());
1127 VN->setHasPHIKill(true);
1129 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
1130 getMBBEndIdx(startInst->getParent()), VN);
1131 Interval.addRange(LR);
1137 //===----------------------------------------------------------------------===//
1138 // Register mask functions
1139 //===----------------------------------------------------------------------===//
1141 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
1142 BitVector &UsableRegs) {
1145 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
1147 // Use a smaller arrays for local live ranges.
1148 ArrayRef<SlotIndex> Slots;
1149 ArrayRef<const uint32_t*> Bits;
1150 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
1151 Slots = getRegMaskSlotsInBlock(MBB->getNumber());
1152 Bits = getRegMaskBitsInBlock(MBB->getNumber());
1154 Slots = getRegMaskSlots();
1155 Bits = getRegMaskBits();
1158 // We are going to enumerate all the register mask slots contained in LI.
1159 // Start with a binary search of RegMaskSlots to find a starting point.
1160 ArrayRef<SlotIndex>::iterator SlotI =
1161 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
1162 ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
1164 // No slots in range, LI begins after the last call.
1170 assert(*SlotI >= LiveI->start);
1171 // Loop over all slots overlapping this segment.
1172 while (*SlotI < LiveI->end) {
1173 // *SlotI overlaps LI. Collect mask bits.
1175 // This is the first overlap. Initialize UsableRegs to all ones.
1177 UsableRegs.resize(tri_->getNumRegs(), true);
1180 // Remove usable registers clobbered by this mask.
1181 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
1182 if (++SlotI == SlotE)
1185 // *SlotI is beyond the current LI segment.
1186 LiveI = LI.advanceTo(LiveI, *SlotI);
1189 // Advance SlotI until it overlaps.
1190 while (*SlotI < LiveI->start)
1191 if (++SlotI == SlotE)