1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->definesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
594 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 MachineBasicBlock::iterator E = MBB->end();
600 // Skip over DBG_VALUE at the start of the MBB.
601 if (mi != E && mi->isDebugValue()) {
602 while (++mi != E && mi->isDebugValue())
605 // MBB is empty except for DBG_VALUE's.
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->definesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
634 while (++mi != E && mi->isDebugValue())
635 // Skip over DBG_VALUE.
638 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
641 // Live-in register might not be used at all.
644 DEBUG(dbgs() << " dead");
645 end = MIIdx.getStoreIndex();
647 DEBUG(dbgs() << " live through");
652 SlotIndex defIdx = getMBBStartIdx(MBB);
653 assert(getInstructionFromIndex(defIdx) == 0 &&
654 "PHI def index points at actual instruction.");
656 interval.getNextValue(defIdx, 0, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 DEBUG(dbgs() << " +" << LR << '\n');
664 /// computeIntervals - computes the live intervals for virtual
665 /// registers. for some ordering of the machine instructions [1,N] a
666 /// live interval is an interval [i, j) where 1 <= i <= j < N for
667 /// which a variable is live
668 void LiveIntervals::computeIntervals() {
669 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670 << "********** Function: "
671 << ((Value*)mf_->getFunction())->getName() << '\n');
673 SmallVector<unsigned, 8> UndefUses;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
676 MachineBasicBlock *MBB = MBBI;
680 // Track the index of the current machine instr.
681 SlotIndex MIIndex = getMBBStartIdx(MBB);
682 DEBUG(dbgs() << "BB#" << MBB->getNumber()
683 << ":\t\t# derived from " << MBB->getName() << "\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
712 // handle register defs - build intervals
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
746 /// shrinkToUses - After removing some uses of a register, shrink its live
747 /// range to just the remaining uses. This method does not compute reaching
748 /// defs for new uses, and it doesn't remove dead defs.
749 void LiveIntervals::shrinkToUses(LiveInterval *li) {
750 DEBUG(dbgs() << "Shrink: " << *li << '\n');
751 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
752 && "Can't only shrink physical registers");
753 // Find all the values used, including PHI kills.
754 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
756 // Visit all instructions reading li->reg.
757 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
758 MachineInstr *UseMI = I.skipInstruction();) {
759 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
762 VNInfo *VNI = li->getVNInfoAt(Idx);
763 assert(VNI && "Live interval not live into reading instruction");
764 if (VNI->def == Idx) {
765 // Special case: An early-clobber tied operand reads and writes the
766 // register one slot early.
767 Idx = Idx.getPrevSlot();
768 VNI = li->getVNInfoAt(Idx);
769 assert(VNI && "Early-clobber tied value not available");
771 WorkList.push_back(std::make_pair(Idx, VNI));
774 // Create a new live interval with only minimal live segments per def.
775 LiveInterval NewLI(li->reg, 0);
776 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
781 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
784 // Extend intervals to reach all uses in WorkList.
785 while (!WorkList.empty()) {
786 SlotIndex Idx = WorkList.back().first;
787 VNInfo *VNI = WorkList.back().second;
790 // Extend the live range for VNI to be live at Idx.
791 LiveInterval::iterator I = NewLI.find(Idx);
794 if (I != NewLI.end() && I->start <= Idx) {
795 assert(I->valno == VNI && "Unexpected existing value number");
799 // Is there already a live range in the block containing Idx?
800 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
801 SlotIndex BlockStart = getMBBStartIdx(MBB);
802 DEBUG(dbgs() << "Shrink: Use val#" << VNI->id << " at " << Idx
803 << " in BB#" << MBB->getNumber() << '@' << BlockStart);
804 if (I != NewLI.begin() && (--I)->end > BlockStart) {
805 assert(I->valno == VNI && "Wrong reaching def");
806 DEBUG(dbgs() << " extend [" << I->start << ';' << I->end << ")\n");
807 // Is this the first use of a PHIDef in its defining block?
808 if (VNI->isPHIDef() && I->end == VNI->def.getNextSlot()) {
809 // The PHI is live, make sure the predecessors are live-out.
810 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
811 PE = MBB->pred_end(); PI != PE; ++PI) {
812 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
813 VNInfo *PVNI = li->getVNInfoAt(Stop);
814 // A predecessor is not required to have a live-out value for a PHI.
816 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
817 WorkList.push_back(std::make_pair(Stop, PVNI));
822 // Extend the live range in the block to include Idx.
823 NewLI.addRange(LiveRange(I->end, Idx.getNextSlot(), VNI));
827 // VNI is live-in to MBB.
828 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
829 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
831 // Make sure VNI is live-out from the predecessors.
832 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
833 PE = MBB->pred_end(); PI != PE; ++PI) {
834 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
835 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
836 WorkList.push_back(std::make_pair(Stop, VNI));
840 // Handle dead values.
841 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
846 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
847 assert(LII != NewLI.end() && "Missing live range for PHI");
848 if (LII->end != VNI->def.getNextSlot())
850 if (VNI->isPHIDef()) {
851 // This is a dead PHI. Remove it.
852 VNI->setIsUnused(true);
853 NewLI.removeRange(*LII);
855 // This is a dead def. Make sure the instruction knows.
856 MachineInstr *MI = getInstructionFromIndex(VNI->def);
857 assert(MI && "No instruction defining live value");
858 MI->addRegisterDead(li->reg, tri_);
862 // Move the trimmed ranges back.
863 li->ranges.swap(NewLI.ranges);
864 DEBUG(dbgs() << "Shrink: " << *li << '\n');
868 //===----------------------------------------------------------------------===//
869 // Register allocator hooks.
872 MachineBasicBlock::iterator
873 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
874 MachineBasicBlock *mbb) const {
875 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
877 // If li is not live into a landing pad, we can insert spill code before the
879 if (!lpad || !isLiveInToMBB(li, lpad))
880 return mbb->getFirstTerminator();
882 // When there is a landing pad, spill code must go before the call instruction
884 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
887 if (I->getDesc().isCall())
890 // The block contains no calls that can throw, so use the first terminator.
891 return mbb->getFirstTerminator();
894 void LiveIntervals::addKillFlags() {
895 for (iterator I = begin(), E = end(); I != E; ++I) {
896 unsigned Reg = I->first;
897 if (TargetRegisterInfo::isPhysicalRegister(Reg))
899 if (mri_->reg_nodbg_empty(Reg))
901 LiveInterval *LI = I->second;
903 // Every instruction that kills Reg corresponds to a live range end point.
904 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
906 // A LOAD index indicates an MBB edge.
907 if (RI->end.isLoad())
909 MachineInstr *MI = getInstructionFromIndex(RI->end);
912 MI->addRegisterKilled(Reg, NULL);
917 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
918 /// allow one) virtual register operand, then its uses are implicitly using
919 /// the register. Returns the virtual register.
920 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
921 MachineInstr *MI) const {
923 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
924 MachineOperand &MO = MI->getOperand(i);
925 if (!MO.isReg() || !MO.isUse())
927 unsigned Reg = MO.getReg();
928 if (Reg == 0 || Reg == li.reg)
931 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
932 !allocatableRegs_[Reg])
934 // FIXME: For now, only remat MI with at most one register operand.
936 "Can't rematerialize instruction with multiple register operand!");
945 /// isValNoAvailableAt - Return true if the val# of the specified interval
946 /// which reaches the given instruction also reaches the specified use index.
947 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
948 SlotIndex UseIdx) const {
949 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
950 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
953 /// isReMaterializable - Returns true if the definition MI of the specified
954 /// val# of the specified interval is re-materializable.
956 LiveIntervals::isReMaterializable(const LiveInterval &li,
957 const VNInfo *ValNo, MachineInstr *MI,
958 const SmallVectorImpl<LiveInterval*> &SpillIs,
963 if (!tii_->isTriviallyReMaterializable(MI, aa_))
966 // Target-specific code can mark an instruction as being rematerializable
967 // if it has one virtual reg use, though it had better be something like
968 // a PIC base register which is likely to be live everywhere.
969 unsigned ImpUse = getReMatImplicitUse(li, MI);
971 const LiveInterval &ImpLi = getInterval(ImpUse);
972 for (MachineRegisterInfo::use_nodbg_iterator
973 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
975 MachineInstr *UseMI = &*ri;
976 SlotIndex UseIdx = getInstructionIndex(UseMI);
977 if (li.getVNInfoAt(UseIdx) != ValNo)
979 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
983 // If a register operand of the re-materialized instruction is going to
984 // be spilled next, then it's not legal to re-materialize this instruction.
985 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
986 if (ImpUse == SpillIs[i]->reg)
992 /// isReMaterializable - Returns true if the definition MI of the specified
993 /// val# of the specified interval is re-materializable.
994 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
995 const VNInfo *ValNo, MachineInstr *MI) {
996 SmallVector<LiveInterval*, 4> Dummy1;
998 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1001 /// isReMaterializable - Returns true if every definition of MI of every
1002 /// val# of the specified interval is re-materializable.
1004 LiveIntervals::isReMaterializable(const LiveInterval &li,
1005 const SmallVectorImpl<LiveInterval*> &SpillIs,
1008 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1010 const VNInfo *VNI = *i;
1011 if (VNI->isUnused())
1012 continue; // Dead val#.
1013 // Is the def for the val# rematerializable?
1014 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1017 bool DefIsLoad = false;
1019 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1021 isLoad |= DefIsLoad;
1026 /// FilterFoldedOps - Filter out two-address use operands. Return
1027 /// true if it finds any issue with the operands that ought to prevent
1029 static bool FilterFoldedOps(MachineInstr *MI,
1030 SmallVector<unsigned, 2> &Ops,
1032 SmallVector<unsigned, 2> &FoldOps) {
1034 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1035 unsigned OpIdx = Ops[i];
1036 MachineOperand &MO = MI->getOperand(OpIdx);
1037 // FIXME: fold subreg use.
1041 MRInfo |= (unsigned)VirtRegMap::isMod;
1043 // Filter out two-address use operand(s).
1044 if (MI->isRegTiedToDefOperand(OpIdx)) {
1045 MRInfo = VirtRegMap::isModRef;
1048 MRInfo |= (unsigned)VirtRegMap::isRef;
1050 FoldOps.push_back(OpIdx);
1056 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1057 /// slot / to reg or any rematerialized load into ith operand of specified
1058 /// MI. If it is successul, MI is updated with the newly created MI and
1060 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1061 VirtRegMap &vrm, MachineInstr *DefMI,
1063 SmallVector<unsigned, 2> &Ops,
1064 bool isSS, int Slot, unsigned Reg) {
1065 // If it is an implicit def instruction, just delete it.
1066 if (MI->isImplicitDef()) {
1067 RemoveMachineInstrFromMaps(MI);
1068 vrm.RemoveMachineInstrFromMaps(MI);
1069 MI->eraseFromParent();
1074 // Filter the list of operand indexes that are to be folded. Abort if
1075 // any operand will prevent folding.
1076 unsigned MRInfo = 0;
1077 SmallVector<unsigned, 2> FoldOps;
1078 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1081 // The only time it's safe to fold into a two address instruction is when
1082 // it's folding reload and spill from / into a spill stack slot.
1083 if (DefMI && (MRInfo & VirtRegMap::isMod))
1086 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1087 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1089 // Remember this instruction uses the spill slot.
1090 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1092 // Attempt to fold the memory reference into the instruction. If
1093 // we can do this, we don't need to insert spill code.
1094 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1095 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1096 vrm.transferSpillPts(MI, fmi);
1097 vrm.transferRestorePts(MI, fmi);
1098 vrm.transferEmergencySpills(MI, fmi);
1099 ReplaceMachineInstrInMaps(MI, fmi);
1100 MI->eraseFromParent();
1108 /// canFoldMemoryOperand - Returns true if the specified load / store
1109 /// folding is possible.
1110 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1111 SmallVector<unsigned, 2> &Ops,
1113 // Filter the list of operand indexes that are to be folded. Abort if
1114 // any operand will prevent folding.
1115 unsigned MRInfo = 0;
1116 SmallVector<unsigned, 2> FoldOps;
1117 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1120 // It's only legal to remat for a use, not a def.
1121 if (ReMat && (MRInfo & VirtRegMap::isMod))
1124 return tii_->canFoldMemoryOperand(MI, FoldOps);
1127 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1128 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1130 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1135 for (++itr; itr != li.ranges.end(); ++itr) {
1136 MachineBasicBlock *mbb2 =
1137 indexes_->getMBBCoveringRange(itr->start, itr->end);
1146 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1147 /// interval on to-be re-materialized operands of MI) with new register.
1148 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1149 MachineInstr *MI, unsigned NewVReg,
1151 // There is an implicit use. That means one of the other operand is
1152 // being remat'ed and the remat'ed instruction has li.reg as an
1153 // use operand. Make sure we rewrite that as well.
1154 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1155 MachineOperand &MO = MI->getOperand(i);
1158 unsigned Reg = MO.getReg();
1159 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1161 if (!vrm.isReMaterialized(Reg))
1163 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1164 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1166 UseMO->setReg(NewVReg);
1170 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1171 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1172 bool LiveIntervals::
1173 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1174 bool TrySplit, SlotIndex index, SlotIndex end,
1176 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1177 unsigned Slot, int LdSlot,
1178 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1180 const TargetRegisterClass* rc,
1181 SmallVector<int, 4> &ReMatIds,
1182 const MachineLoopInfo *loopInfo,
1183 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1184 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1185 std::vector<LiveInterval*> &NewLIs) {
1186 bool CanFold = false;
1188 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1189 MachineOperand& mop = MI->getOperand(i);
1192 unsigned Reg = mop.getReg();
1193 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1198 bool TryFold = !DefIsReMat;
1199 bool FoldSS = true; // Default behavior unless it's a remat.
1200 int FoldSlot = Slot;
1202 // If this is the rematerializable definition MI itself and
1203 // all of its uses are rematerialized, simply delete it.
1204 if (MI == ReMatOrigDefMI && CanDelete) {
1205 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1207 RemoveMachineInstrFromMaps(MI);
1208 vrm.RemoveMachineInstrFromMaps(MI);
1209 MI->eraseFromParent();
1213 // If def for this use can't be rematerialized, then try folding.
1214 // If def is rematerializable and it's a load, also try folding.
1215 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1217 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1223 // Scan all of the operands of this instruction rewriting operands
1224 // to use NewVReg instead of li.reg as appropriate. We do this for
1227 // 1. If the instr reads the same spilled vreg multiple times, we
1228 // want to reuse the NewVReg.
1229 // 2. If the instr is a two-addr instruction, we are required to
1230 // keep the src/dst regs pinned.
1232 // Keep track of whether we replace a use and/or def so that we can
1233 // create the spill interval with the appropriate range.
1234 SmallVector<unsigned, 2> Ops;
1235 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1237 // Create a new virtual register for the spill interval.
1238 // Create the new register now so we can map the fold instruction
1239 // to the new register so when it is unfolded we get the correct
1241 bool CreatedNewVReg = false;
1243 NewVReg = mri_->createVirtualRegister(rc);
1245 CreatedNewVReg = true;
1247 // The new virtual register should get the same allocation hints as the
1249 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1250 if (Hint.first || Hint.second)
1251 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1257 // Do not fold load / store here if we are splitting. We'll find an
1258 // optimal point to insert a load / store later.
1260 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1261 Ops, FoldSS, FoldSlot, NewVReg)) {
1262 // Folding the load/store can completely change the instruction in
1263 // unpredictable ways, rescan it from the beginning.
1266 // We need to give the new vreg the same stack slot as the
1267 // spilled interval.
1268 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1274 if (isNotInMIMap(MI))
1276 goto RestartInstruction;
1279 // We'll try to fold it later if it's profitable.
1280 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1284 mop.setReg(NewVReg);
1285 if (mop.isImplicit())
1286 rewriteImplicitOps(li, MI, NewVReg, vrm);
1288 // Reuse NewVReg for other reads.
1289 bool HasEarlyClobber = false;
1290 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1291 MachineOperand &mopj = MI->getOperand(Ops[j]);
1292 mopj.setReg(NewVReg);
1293 if (mopj.isImplicit())
1294 rewriteImplicitOps(li, MI, NewVReg, vrm);
1295 if (mopj.isEarlyClobber())
1296 HasEarlyClobber = true;
1299 if (CreatedNewVReg) {
1301 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1302 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1303 // Each valnum may have its own remat id.
1304 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1306 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1308 if (!CanDelete || (HasUse && HasDef)) {
1309 // If this is a two-addr instruction then its use operands are
1310 // rematerializable but its def is not. It should be assigned a
1312 vrm.assignVirt2StackSlot(NewVReg, Slot);
1315 vrm.assignVirt2StackSlot(NewVReg, Slot);
1317 } else if (HasUse && HasDef &&
1318 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1319 // If this interval hasn't been assigned a stack slot (because earlier
1320 // def is a deleted remat def), do it now.
1321 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1322 vrm.assignVirt2StackSlot(NewVReg, Slot);
1325 // Re-matting an instruction with virtual register use. Add the
1326 // register as an implicit use on the use MI.
1327 if (DefIsReMat && ImpUse)
1328 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1330 // Create a new register interval for this spill / remat.
1331 LiveInterval &nI = getOrCreateInterval(NewVReg);
1332 if (CreatedNewVReg) {
1333 NewLIs.push_back(&nI);
1334 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1336 vrm.setIsSplitFromReg(NewVReg, li.reg);
1340 if (CreatedNewVReg) {
1341 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1342 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1343 DEBUG(dbgs() << " +" << LR);
1346 // Extend the split live interval to this def / use.
1347 SlotIndex End = index.getDefIndex();
1348 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1349 nI.getValNumInfo(nI.getNumValNums()-1));
1350 DEBUG(dbgs() << " +" << LR);
1355 // An early clobber starts at the use slot, except for an early clobber
1356 // tied to a use operand (yes, that is a thing).
1357 LiveRange LR(HasEarlyClobber && !HasUse ?
1358 index.getUseIndex() : index.getDefIndex(),
1359 index.getStoreIndex(),
1360 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1361 DEBUG(dbgs() << " +" << LR);
1366 dbgs() << "\t\t\t\tAdded new interval: ";
1367 nI.print(dbgs(), tri_);
1373 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1375 MachineBasicBlock *MBB,
1376 SlotIndex Idx) const {
1377 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1380 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1381 /// during spilling.
1383 struct RewriteInfo {
1386 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1389 struct RewriteInfoCompare {
1390 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1391 return LHS.Index < RHS.Index;
1396 void LiveIntervals::
1397 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1398 LiveInterval::Ranges::const_iterator &I,
1399 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1400 unsigned Slot, int LdSlot,
1401 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1403 const TargetRegisterClass* rc,
1404 SmallVector<int, 4> &ReMatIds,
1405 const MachineLoopInfo *loopInfo,
1406 BitVector &SpillMBBs,
1407 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1408 BitVector &RestoreMBBs,
1409 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1410 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1411 std::vector<LiveInterval*> &NewLIs) {
1412 bool AllCanFold = true;
1413 unsigned NewVReg = 0;
1414 SlotIndex start = I->start.getBaseIndex();
1415 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1417 // First collect all the def / use in this live range that will be rewritten.
1418 // Make sure they are sorted according to instruction index.
1419 std::vector<RewriteInfo> RewriteMIs;
1420 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1421 re = mri_->reg_end(); ri != re; ) {
1422 MachineInstr *MI = &*ri;
1423 MachineOperand &O = ri.getOperand();
1425 if (MI->isDebugValue()) {
1426 // Modify DBG_VALUE now that the value is in a spill slot.
1427 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1428 uint64_t Offset = MI->getOperand(1).getImm();
1429 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1430 DebugLoc DL = MI->getDebugLoc();
1431 int FI = isLoadSS ? LdSlot : (int)Slot;
1432 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1433 Offset, MDPtr, DL)) {
1434 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1435 ReplaceMachineInstrInMaps(MI, NewDV);
1436 MachineBasicBlock *MBB = MI->getParent();
1437 MBB->insert(MBB->erase(MI), NewDV);
1442 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1443 RemoveMachineInstrFromMaps(MI);
1444 vrm.RemoveMachineInstrFromMaps(MI);
1445 MI->eraseFromParent();
1448 assert(!(O.isImplicit() && O.isUse()) &&
1449 "Spilling register that's used as implicit use?");
1450 SlotIndex index = getInstructionIndex(MI);
1451 if (index < start || index >= end)
1455 // Must be defined by an implicit def. It should not be spilled. Note,
1456 // this is for correctness reason. e.g.
1457 // 8 %reg1024<def> = IMPLICIT_DEF
1458 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1459 // The live range [12, 14) are not part of the r1024 live interval since
1460 // it's defined by an implicit def. It will not conflicts with live
1461 // interval of r1025. Now suppose both registers are spilled, you can
1462 // easily see a situation where both registers are reloaded before
1463 // the INSERT_SUBREG and both target registers that would overlap.
1465 RewriteMIs.push_back(RewriteInfo(index, MI));
1467 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1469 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1470 // Now rewrite the defs and uses.
1471 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1472 RewriteInfo &rwi = RewriteMIs[i];
1474 SlotIndex index = rwi.Index;
1475 MachineInstr *MI = rwi.MI;
1476 // If MI def and/or use the same register multiple times, then there
1477 // are multiple entries.
1478 while (i != e && RewriteMIs[i].MI == MI) {
1479 assert(RewriteMIs[i].Index == index);
1482 MachineBasicBlock *MBB = MI->getParent();
1484 if (ImpUse && MI != ReMatDefMI) {
1485 // Re-matting an instruction with virtual register use. Prevent interval
1486 // from being spilled.
1487 getInterval(ImpUse).markNotSpillable();
1490 unsigned MBBId = MBB->getNumber();
1491 unsigned ThisVReg = 0;
1493 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1494 if (NVI != MBBVRegsMap.end()) {
1495 ThisVReg = NVI->second;
1502 // It's better to start a new interval to avoid artifically
1503 // extend the new interval.
1504 if (MI->readsWritesVirtualRegister(li.reg) ==
1505 std::make_pair(false,true)) {
1506 MBBVRegsMap.erase(MBB->getNumber());
1512 bool IsNew = ThisVReg == 0;
1514 // This ends the previous live interval. If all of its def / use
1515 // can be folded, give it a low spill weight.
1516 if (NewVReg && TrySplit && AllCanFold) {
1517 LiveInterval &nI = getOrCreateInterval(NewVReg);
1524 bool HasDef = false;
1525 bool HasUse = false;
1526 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1527 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1528 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1529 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1530 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1531 if (!HasDef && !HasUse)
1534 AllCanFold &= CanFold;
1536 // Update weight of spill interval.
1537 LiveInterval &nI = getOrCreateInterval(NewVReg);
1539 // The spill weight is now infinity as it cannot be spilled again.
1540 nI.markNotSpillable();
1544 // Keep track of the last def and first use in each MBB.
1546 if (MI != ReMatOrigDefMI || !CanDelete) {
1547 bool HasKill = false;
1549 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1551 // If this is a two-address code, then this index starts a new VNInfo.
1552 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1554 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1556 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1557 SpillIdxes.find(MBBId);
1559 if (SII == SpillIdxes.end()) {
1560 std::vector<SRInfo> S;
1561 S.push_back(SRInfo(index, NewVReg, true));
1562 SpillIdxes.insert(std::make_pair(MBBId, S));
1563 } else if (SII->second.back().vreg != NewVReg) {
1564 SII->second.push_back(SRInfo(index, NewVReg, true));
1565 } else if (index > SII->second.back().index) {
1566 // If there is an earlier def and this is a two-address
1567 // instruction, then it's not possible to fold the store (which
1568 // would also fold the load).
1569 SRInfo &Info = SII->second.back();
1571 Info.canFold = !HasUse;
1573 SpillMBBs.set(MBBId);
1574 } else if (SII != SpillIdxes.end() &&
1575 SII->second.back().vreg == NewVReg &&
1576 index > SII->second.back().index) {
1577 // There is an earlier def that's not killed (must be two-address).
1578 // The spill is no longer needed.
1579 SII->second.pop_back();
1580 if (SII->second.empty()) {
1581 SpillIdxes.erase(MBBId);
1582 SpillMBBs.reset(MBBId);
1589 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1590 SpillIdxes.find(MBBId);
1591 if (SII != SpillIdxes.end() &&
1592 SII->second.back().vreg == NewVReg &&
1593 index > SII->second.back().index)
1594 // Use(s) following the last def, it's not safe to fold the spill.
1595 SII->second.back().canFold = false;
1596 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1597 RestoreIdxes.find(MBBId);
1598 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1599 // If we are splitting live intervals, only fold if it's the first
1600 // use and there isn't another use later in the MBB.
1601 RII->second.back().canFold = false;
1603 // Only need a reload if there isn't an earlier def / use.
1604 if (RII == RestoreIdxes.end()) {
1605 std::vector<SRInfo> Infos;
1606 Infos.push_back(SRInfo(index, NewVReg, true));
1607 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1609 RII->second.push_back(SRInfo(index, NewVReg, true));
1611 RestoreMBBs.set(MBBId);
1615 // Update spill weight.
1616 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1617 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1620 if (NewVReg && TrySplit && AllCanFold) {
1621 // If all of its def / use can be folded, give it a low spill weight.
1622 LiveInterval &nI = getOrCreateInterval(NewVReg);
1627 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1628 unsigned vr, BitVector &RestoreMBBs,
1629 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1630 if (!RestoreMBBs[Id])
1632 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1633 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1634 if (Restores[i].index == index &&
1635 Restores[i].vreg == vr &&
1636 Restores[i].canFold)
1641 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1642 unsigned vr, BitVector &RestoreMBBs,
1643 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1644 if (!RestoreMBBs[Id])
1646 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1647 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1648 if (Restores[i].index == index && Restores[i].vreg)
1649 Restores[i].index = SlotIndex();
1652 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1653 /// spilled and create empty intervals for their uses.
1655 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1656 const TargetRegisterClass* rc,
1657 std::vector<LiveInterval*> &NewLIs) {
1658 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1659 re = mri_->reg_end(); ri != re; ) {
1660 MachineOperand &O = ri.getOperand();
1661 MachineInstr *MI = &*ri;
1663 if (MI->isDebugValue()) {
1664 // Remove debug info for now.
1666 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1670 assert(MI->isImplicitDef() &&
1671 "Register def was not rewritten?");
1672 RemoveMachineInstrFromMaps(MI);
1673 vrm.RemoveMachineInstrFromMaps(MI);
1674 MI->eraseFromParent();
1676 // This must be an use of an implicit_def so it's not part of the live
1677 // interval. Create a new empty live interval for it.
1678 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1679 unsigned NewVReg = mri_->createVirtualRegister(rc);
1681 vrm.setIsImplicitlyDefined(NewVReg);
1682 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1683 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1684 MachineOperand &MO = MI->getOperand(i);
1685 if (MO.isReg() && MO.getReg() == li.reg) {
1695 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1696 // Limit the loop depth ridiculousness.
1697 if (loopDepth > 200)
1700 // The loop depth is used to roughly estimate the number of times the
1701 // instruction is executed. Something like 10^d is simple, but will quickly
1702 // overflow a float. This expression behaves like 10^d for small d, but is
1703 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1704 // headroom before overflow.
1705 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1707 return (isDef + isUse) * lc;
1710 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1711 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1713 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1716 std::vector<LiveInterval*> LiveIntervals::
1717 addIntervalsForSpills(const LiveInterval &li,
1718 const SmallVectorImpl<LiveInterval*> &SpillIs,
1719 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1720 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1723 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1724 li.print(dbgs(), tri_);
1728 // Each bit specify whether a spill is required in the MBB.
1729 BitVector SpillMBBs(mf_->getNumBlockIDs());
1730 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1731 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1732 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1733 DenseMap<unsigned,unsigned> MBBVRegsMap;
1734 std::vector<LiveInterval*> NewLIs;
1735 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1737 unsigned NumValNums = li.getNumValNums();
1738 SmallVector<MachineInstr*, 4> ReMatDefs;
1739 ReMatDefs.resize(NumValNums, NULL);
1740 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1741 ReMatOrigDefs.resize(NumValNums, NULL);
1742 SmallVector<int, 4> ReMatIds;
1743 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1744 BitVector ReMatDelete(NumValNums);
1745 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1747 // Spilling a split live interval. It cannot be split any further. Also,
1748 // it's also guaranteed to be a single val# / range interval.
1749 if (vrm.getPreSplitReg(li.reg)) {
1750 vrm.setIsSplitFromReg(li.reg, 0);
1751 // Unset the split kill marker on the last use.
1752 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1753 if (KillIdx != SlotIndex()) {
1754 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1755 assert(KillMI && "Last use disappeared?");
1756 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1757 assert(KillOp != -1 && "Last use disappeared?");
1758 KillMI->getOperand(KillOp).setIsKill(false);
1760 vrm.removeKillPoint(li.reg);
1761 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1762 Slot = vrm.getStackSlot(li.reg);
1763 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1764 MachineInstr *ReMatDefMI = DefIsReMat ?
1765 vrm.getReMaterializedMI(li.reg) : NULL;
1767 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1768 bool isLoad = isLoadSS ||
1769 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1770 bool IsFirstRange = true;
1771 for (LiveInterval::Ranges::const_iterator
1772 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1773 // If this is a split live interval with multiple ranges, it means there
1774 // are two-address instructions that re-defined the value. Only the
1775 // first def can be rematerialized!
1777 // Note ReMatOrigDefMI has already been deleted.
1778 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1779 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1780 false, vrm, rc, ReMatIds, loopInfo,
1781 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1782 MBBVRegsMap, NewLIs);
1784 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1785 Slot, 0, false, false, false,
1786 false, vrm, rc, ReMatIds, loopInfo,
1787 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1788 MBBVRegsMap, NewLIs);
1790 IsFirstRange = false;
1793 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1794 normalizeSpillWeights(NewLIs);
1798 bool TrySplit = !intervalIsInOneMBB(li);
1801 bool NeedStackSlot = false;
1802 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1804 const VNInfo *VNI = *i;
1805 unsigned VN = VNI->id;
1806 if (VNI->isUnused())
1807 continue; // Dead val#.
1808 // Is the def for the val# rematerializable?
1809 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1811 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1812 // Remember how to remat the def of this val#.
1813 ReMatOrigDefs[VN] = ReMatDefMI;
1814 // Original def may be modified so we have to make a copy here.
1815 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1816 CloneMIs.push_back(Clone);
1817 ReMatDefs[VN] = Clone;
1819 bool CanDelete = true;
1820 if (VNI->hasPHIKill()) {
1821 // A kill is a phi node, not all of its uses can be rematerialized.
1822 // It must not be deleted.
1824 // Need a stack slot if there is any live range where uses cannot be
1826 NeedStackSlot = true;
1829 ReMatDelete.set(VN);
1831 // Need a stack slot if there is any live range where uses cannot be
1833 NeedStackSlot = true;
1837 // One stack slot per live interval.
1838 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1839 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1840 Slot = vrm.assignVirt2StackSlot(li.reg);
1842 // This case only occurs when the prealloc splitter has already assigned
1843 // a stack slot to this vreg.
1845 Slot = vrm.getStackSlot(li.reg);
1848 // Create new intervals and rewrite defs and uses.
1849 for (LiveInterval::Ranges::const_iterator
1850 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1851 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1852 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1853 bool DefIsReMat = ReMatDefMI != NULL;
1854 bool CanDelete = ReMatDelete[I->valno->id];
1856 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1857 bool isLoad = isLoadSS ||
1858 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1859 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1860 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1861 CanDelete, vrm, rc, ReMatIds, loopInfo,
1862 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1863 MBBVRegsMap, NewLIs);
1866 // Insert spills / restores if we are splitting.
1868 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1869 normalizeSpillWeights(NewLIs);
1873 SmallPtrSet<LiveInterval*, 4> AddedKill;
1874 SmallVector<unsigned, 2> Ops;
1875 if (NeedStackSlot) {
1876 int Id = SpillMBBs.find_first();
1878 std::vector<SRInfo> &spills = SpillIdxes[Id];
1879 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1880 SlotIndex index = spills[i].index;
1881 unsigned VReg = spills[i].vreg;
1882 LiveInterval &nI = getOrCreateInterval(VReg);
1883 bool isReMat = vrm.isReMaterialized(VReg);
1884 MachineInstr *MI = getInstructionFromIndex(index);
1885 bool CanFold = false;
1886 bool FoundUse = false;
1888 if (spills[i].canFold) {
1890 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1891 MachineOperand &MO = MI->getOperand(j);
1892 if (!MO.isReg() || MO.getReg() != VReg)
1899 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1900 RestoreMBBs, RestoreIdxes))) {
1901 // MI has two-address uses of the same register. If the use
1902 // isn't the first and only use in the BB, then we can't fold
1903 // it. FIXME: Move this to rewriteInstructionsForSpills.
1910 // Fold the store into the def if possible.
1911 bool Folded = false;
1912 if (CanFold && !Ops.empty()) {
1913 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1916 // Also folded uses, do not issue a load.
1917 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1918 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1920 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1924 // Otherwise tell the spiller to issue a spill.
1926 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1927 bool isKill = LR->end == index.getStoreIndex();
1928 if (!MI->registerDefIsDead(nI.reg))
1929 // No need to spill a dead def.
1930 vrm.addSpillPoint(VReg, isKill, MI);
1932 AddedKill.insert(&nI);
1935 Id = SpillMBBs.find_next(Id);
1939 int Id = RestoreMBBs.find_first();
1941 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1942 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1943 SlotIndex index = restores[i].index;
1944 if (index == SlotIndex())
1946 unsigned VReg = restores[i].vreg;
1947 LiveInterval &nI = getOrCreateInterval(VReg);
1948 bool isReMat = vrm.isReMaterialized(VReg);
1949 MachineInstr *MI = getInstructionFromIndex(index);
1950 bool CanFold = false;
1952 if (restores[i].canFold) {
1954 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1955 MachineOperand &MO = MI->getOperand(j);
1956 if (!MO.isReg() || MO.getReg() != VReg)
1960 // If this restore were to be folded, it would have been folded
1969 // Fold the load into the use if possible.
1970 bool Folded = false;
1971 if (CanFold && !Ops.empty()) {
1973 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1975 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1977 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1978 // If the rematerializable def is a load, also try to fold it.
1979 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1980 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1981 Ops, isLoadSS, LdSlot, VReg);
1983 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1985 // Re-matting an instruction with virtual register use. Add the
1986 // register as an implicit use on the use MI and mark the register
1987 // interval as unspillable.
1988 LiveInterval &ImpLi = getInterval(ImpUse);
1989 ImpLi.markNotSpillable();
1990 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1995 // If folding is not possible / failed, then tell the spiller to issue a
1996 // load / rematerialization for us.
1998 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2000 vrm.addRestorePoint(VReg, MI);
2002 Id = RestoreMBBs.find_next(Id);
2005 // Finalize intervals: add kills, finalize spill weights, and filter out
2007 std::vector<LiveInterval*> RetNewLIs;
2008 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2009 LiveInterval *LI = NewLIs[i];
2011 if (!AddedKill.count(LI)) {
2012 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2013 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2014 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2015 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2016 assert(UseIdx != -1);
2017 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2018 LastUse->getOperand(UseIdx).setIsKill();
2019 vrm.addKillPoint(LI->reg, LastUseIdx);
2022 RetNewLIs.push_back(LI);
2026 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2027 normalizeSpillWeights(RetNewLIs);
2031 /// hasAllocatableSuperReg - Return true if the specified physical register has
2032 /// any super register that's allocatable.
2033 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2034 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2035 if (allocatableRegs_[*AS] && hasInterval(*AS))
2040 /// getRepresentativeReg - Find the largest super register of the specified
2041 /// physical register.
2042 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2043 // Find the largest super-register that is allocatable.
2044 unsigned BestReg = Reg;
2045 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2046 unsigned SuperReg = *AS;
2047 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2055 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2056 /// specified interval that conflicts with the specified physical register.
2057 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2058 unsigned PhysReg) const {
2059 unsigned NumConflicts = 0;
2060 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2061 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2062 E = mri_->reg_end(); I != E; ++I) {
2063 MachineOperand &O = I.getOperand();
2064 MachineInstr *MI = O.getParent();
2065 if (MI->isDebugValue())
2067 SlotIndex Index = getInstructionIndex(MI);
2068 if (pli.liveAt(Index))
2071 return NumConflicts;
2074 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2075 /// around all defs and uses of the specified interval. Return true if it
2076 /// was able to cut its interval.
2077 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2078 unsigned PhysReg, VirtRegMap &vrm) {
2079 unsigned SpillReg = getRepresentativeReg(PhysReg);
2081 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2082 << " represented by " << tri_->getName(SpillReg) << '\n');
2084 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2085 // If there are registers which alias PhysReg, but which are not a
2086 // sub-register of the chosen representative super register. Assert
2087 // since we can't handle it yet.
2088 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2089 tri_->isSuperRegister(*AS, SpillReg));
2092 SmallVector<unsigned, 4> PRegs;
2093 if (hasInterval(SpillReg))
2094 PRegs.push_back(SpillReg);
2095 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2096 if (hasInterval(*SR))
2097 PRegs.push_back(*SR);
2100 dbgs() << "Trying to spill:";
2101 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2102 dbgs() << ' ' << tri_->getName(PRegs[i]);
2106 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2107 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2108 E = mri_->reg_end(); I != E; ++I) {
2109 MachineOperand &O = I.getOperand();
2110 MachineInstr *MI = O.getParent();
2111 if (MI->isDebugValue() || SeenMIs.count(MI))
2114 SlotIndex Index = getInstructionIndex(MI);
2115 bool LiveReg = false;
2116 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2117 unsigned PReg = PRegs[i];
2118 LiveInterval &pli = getInterval(PReg);
2119 if (!pli.liveAt(Index))
2122 SlotIndex StartIdx = Index.getLoadIndex();
2123 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2124 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2126 raw_string_ostream Msg(msg);
2127 Msg << "Ran out of registers during register allocation!";
2128 if (MI->isInlineAsm()) {
2129 Msg << "\nPlease check your inline asm statement for invalid "
2130 << "constraints:\n";
2131 MI->print(Msg, tm_);
2133 report_fatal_error(Msg.str());
2135 pli.removeRange(StartIdx, EndIdx);
2140 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2141 vrm.addEmergencySpill(SpillReg, MI);
2147 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2148 MachineInstr* startInst) {
2149 LiveInterval& Interval = getOrCreateInterval(reg);
2150 VNInfo* VN = Interval.getNextValue(
2151 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2152 startInst, getVNInfoAllocator());
2153 VN->setHasPHIKill(true);
2155 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2156 getMBBEndIdx(startInst->getParent()), VN);
2157 Interval.addRange(LR);