1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->definesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
594 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 MachineBasicBlock::iterator E = MBB->end();
600 // Skip over DBG_VALUE at the start of the MBB.
601 if (mi != E && mi->isDebugValue()) {
602 while (++mi != E && mi->isDebugValue())
605 // MBB is empty except for DBG_VALUE's.
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->definesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
634 while (++mi != E && mi->isDebugValue())
635 // Skip over DBG_VALUE.
638 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
641 // Live-in register might not be used at all.
644 DEBUG(dbgs() << " dead");
645 end = MIIdx.getStoreIndex();
647 DEBUG(dbgs() << " live through");
652 SlotIndex defIdx = getMBBStartIdx(MBB);
653 assert(getInstructionFromIndex(defIdx) == 0 &&
654 "PHI def index points at actual instruction.");
656 interval.getNextValue(defIdx, 0, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 DEBUG(dbgs() << " +" << LR << '\n');
664 /// computeIntervals - computes the live intervals for virtual
665 /// registers. for some ordering of the machine instructions [1,N] a
666 /// live interval is an interval [i, j) where 1 <= i <= j < N for
667 /// which a variable is live
668 void LiveIntervals::computeIntervals() {
669 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670 << "********** Function: "
671 << ((Value*)mf_->getFunction())->getName() << '\n');
673 SmallVector<unsigned, 8> UndefUses;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
676 MachineBasicBlock *MBB = MBBI;
680 // Track the index of the current machine instr.
681 SlotIndex MIIndex = getMBBStartIdx(MBB);
682 DEBUG(dbgs() << "BB#" << MBB->getNumber()
683 << ":\t\t# derived from " << MBB->getName() << "\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
712 // handle register defs - build intervals
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
746 /// shrinkToUses - After removing some uses of a register, shrink its live
747 /// range to just the remaining uses. This method does not compute reaching
748 /// defs for new uses, and it doesn't remove dead defs.
749 void LiveIntervals::shrinkToUses(LiveInterval *li) {
750 DEBUG(dbgs() << "Shrink: " << *li << '\n');
751 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
752 && "Can't only shrink physical registers");
753 // Find all the values used, including PHI kills.
754 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
756 // Visit all instructions reading li->reg.
757 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
758 MachineInstr *UseMI = I.skipInstruction();) {
759 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
762 VNInfo *VNI = li->getVNInfoAt(Idx);
763 assert(VNI && "Live interval not live into reading instruction");
764 if (VNI->def == Idx) {
765 // Special case: An early-clobber tied operand reads and writes the
766 // register one slot early.
767 Idx = Idx.getPrevSlot();
768 VNI = li->getVNInfoAt(Idx);
769 assert(VNI && "Early-clobber tied value not available");
771 WorkList.push_back(std::make_pair(Idx, VNI));
774 // Create a new live interval with only minimal live segments per def.
775 LiveInterval NewLI(li->reg, 0);
776 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
781 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
784 // Keep track of the PHIs that are in use.
785 SmallPtrSet<VNInfo*, 8> UsedPHIs;
787 // Extend intervals to reach all uses in WorkList.
788 while (!WorkList.empty()) {
789 SlotIndex Idx = WorkList.back().first;
790 VNInfo *VNI = WorkList.back().second;
792 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
793 SlotIndex BlockStart = getMBBStartIdx(MBB);
795 // Extend the live range for VNI to be live at Idx.
796 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
797 assert(ExtVNI == VNI && "Unexpected existing value number");
798 // Is this a PHIDef we haven't seen before?
799 if (!VNI->isPHIDef() || !UsedPHIs.insert(VNI))
801 // The PHI is live, make sure the predecessors are live-out.
802 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
803 PE = MBB->pred_end(); PI != PE; ++PI) {
804 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
805 VNInfo *PVNI = li->getVNInfoAt(Stop);
806 // A predecessor is not required to have a live-out value for a PHI.
808 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
809 WorkList.push_back(std::make_pair(Stop, PVNI));
815 // VNI is live-in to MBB.
816 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
817 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
819 // Make sure VNI is live-out from the predecessors.
820 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
821 PE = MBB->pred_end(); PI != PE; ++PI) {
822 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
823 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
824 WorkList.push_back(std::make_pair(Stop, VNI));
828 // Handle dead values.
829 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
834 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
835 assert(LII != NewLI.end() && "Missing live range for PHI");
836 if (LII->end != VNI->def.getNextSlot())
838 if (VNI->isPHIDef()) {
839 // This is a dead PHI. Remove it.
840 VNI->setIsUnused(true);
841 NewLI.removeRange(*LII);
843 // This is a dead def. Make sure the instruction knows.
844 MachineInstr *MI = getInstructionFromIndex(VNI->def);
845 assert(MI && "No instruction defining live value");
846 MI->addRegisterDead(li->reg, tri_);
850 // Move the trimmed ranges back.
851 li->ranges.swap(NewLI.ranges);
852 DEBUG(dbgs() << "Shrink: " << *li << '\n');
856 //===----------------------------------------------------------------------===//
857 // Register allocator hooks.
860 MachineBasicBlock::iterator
861 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
862 MachineBasicBlock *mbb) const {
863 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
865 // If li is not live into a landing pad, we can insert spill code before the
867 if (!lpad || !isLiveInToMBB(li, lpad))
868 return mbb->getFirstTerminator();
870 // When there is a landing pad, spill code must go before the call instruction
872 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
875 if (I->getDesc().isCall())
878 // The block contains no calls that can throw, so use the first terminator.
879 return mbb->getFirstTerminator();
882 void LiveIntervals::addKillFlags() {
883 for (iterator I = begin(), E = end(); I != E; ++I) {
884 unsigned Reg = I->first;
885 if (TargetRegisterInfo::isPhysicalRegister(Reg))
887 if (mri_->reg_nodbg_empty(Reg))
889 LiveInterval *LI = I->second;
891 // Every instruction that kills Reg corresponds to a live range end point.
892 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
894 // A LOAD index indicates an MBB edge.
895 if (RI->end.isLoad())
897 MachineInstr *MI = getInstructionFromIndex(RI->end);
900 MI->addRegisterKilled(Reg, NULL);
905 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
906 /// allow one) virtual register operand, then its uses are implicitly using
907 /// the register. Returns the virtual register.
908 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
909 MachineInstr *MI) const {
911 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
912 MachineOperand &MO = MI->getOperand(i);
913 if (!MO.isReg() || !MO.isUse())
915 unsigned Reg = MO.getReg();
916 if (Reg == 0 || Reg == li.reg)
919 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
920 !allocatableRegs_[Reg])
922 // FIXME: For now, only remat MI with at most one register operand.
924 "Can't rematerialize instruction with multiple register operand!");
933 /// isValNoAvailableAt - Return true if the val# of the specified interval
934 /// which reaches the given instruction also reaches the specified use index.
935 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
936 SlotIndex UseIdx) const {
937 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
938 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
941 /// isReMaterializable - Returns true if the definition MI of the specified
942 /// val# of the specified interval is re-materializable.
944 LiveIntervals::isReMaterializable(const LiveInterval &li,
945 const VNInfo *ValNo, MachineInstr *MI,
946 const SmallVectorImpl<LiveInterval*> &SpillIs,
951 if (!tii_->isTriviallyReMaterializable(MI, aa_))
954 // Target-specific code can mark an instruction as being rematerializable
955 // if it has one virtual reg use, though it had better be something like
956 // a PIC base register which is likely to be live everywhere.
957 unsigned ImpUse = getReMatImplicitUse(li, MI);
959 const LiveInterval &ImpLi = getInterval(ImpUse);
960 for (MachineRegisterInfo::use_nodbg_iterator
961 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
963 MachineInstr *UseMI = &*ri;
964 SlotIndex UseIdx = getInstructionIndex(UseMI);
965 if (li.getVNInfoAt(UseIdx) != ValNo)
967 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
971 // If a register operand of the re-materialized instruction is going to
972 // be spilled next, then it's not legal to re-materialize this instruction.
973 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
974 if (ImpUse == SpillIs[i]->reg)
980 /// isReMaterializable - Returns true if the definition MI of the specified
981 /// val# of the specified interval is re-materializable.
982 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
983 const VNInfo *ValNo, MachineInstr *MI) {
984 SmallVector<LiveInterval*, 4> Dummy1;
986 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
989 /// isReMaterializable - Returns true if every definition of MI of every
990 /// val# of the specified interval is re-materializable.
992 LiveIntervals::isReMaterializable(const LiveInterval &li,
993 const SmallVectorImpl<LiveInterval*> &SpillIs,
996 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
998 const VNInfo *VNI = *i;
1000 continue; // Dead val#.
1001 // Is the def for the val# rematerializable?
1002 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1005 bool DefIsLoad = false;
1007 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1009 isLoad |= DefIsLoad;
1014 /// FilterFoldedOps - Filter out two-address use operands. Return
1015 /// true if it finds any issue with the operands that ought to prevent
1017 static bool FilterFoldedOps(MachineInstr *MI,
1018 SmallVector<unsigned, 2> &Ops,
1020 SmallVector<unsigned, 2> &FoldOps) {
1022 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1023 unsigned OpIdx = Ops[i];
1024 MachineOperand &MO = MI->getOperand(OpIdx);
1025 // FIXME: fold subreg use.
1029 MRInfo |= (unsigned)VirtRegMap::isMod;
1031 // Filter out two-address use operand(s).
1032 if (MI->isRegTiedToDefOperand(OpIdx)) {
1033 MRInfo = VirtRegMap::isModRef;
1036 MRInfo |= (unsigned)VirtRegMap::isRef;
1038 FoldOps.push_back(OpIdx);
1044 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1045 /// slot / to reg or any rematerialized load into ith operand of specified
1046 /// MI. If it is successul, MI is updated with the newly created MI and
1048 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1049 VirtRegMap &vrm, MachineInstr *DefMI,
1051 SmallVector<unsigned, 2> &Ops,
1052 bool isSS, int Slot, unsigned Reg) {
1053 // If it is an implicit def instruction, just delete it.
1054 if (MI->isImplicitDef()) {
1055 RemoveMachineInstrFromMaps(MI);
1056 vrm.RemoveMachineInstrFromMaps(MI);
1057 MI->eraseFromParent();
1062 // Filter the list of operand indexes that are to be folded. Abort if
1063 // any operand will prevent folding.
1064 unsigned MRInfo = 0;
1065 SmallVector<unsigned, 2> FoldOps;
1066 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1069 // The only time it's safe to fold into a two address instruction is when
1070 // it's folding reload and spill from / into a spill stack slot.
1071 if (DefMI && (MRInfo & VirtRegMap::isMod))
1074 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1075 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1077 // Remember this instruction uses the spill slot.
1078 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1080 // Attempt to fold the memory reference into the instruction. If
1081 // we can do this, we don't need to insert spill code.
1082 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1083 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1084 vrm.transferSpillPts(MI, fmi);
1085 vrm.transferRestorePts(MI, fmi);
1086 vrm.transferEmergencySpills(MI, fmi);
1087 ReplaceMachineInstrInMaps(MI, fmi);
1088 MI->eraseFromParent();
1096 /// canFoldMemoryOperand - Returns true if the specified load / store
1097 /// folding is possible.
1098 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1099 SmallVector<unsigned, 2> &Ops,
1101 // Filter the list of operand indexes that are to be folded. Abort if
1102 // any operand will prevent folding.
1103 unsigned MRInfo = 0;
1104 SmallVector<unsigned, 2> FoldOps;
1105 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1108 // It's only legal to remat for a use, not a def.
1109 if (ReMat && (MRInfo & VirtRegMap::isMod))
1112 return tii_->canFoldMemoryOperand(MI, FoldOps);
1115 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1116 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1118 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1123 for (++itr; itr != li.ranges.end(); ++itr) {
1124 MachineBasicBlock *mbb2 =
1125 indexes_->getMBBCoveringRange(itr->start, itr->end);
1134 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1135 /// interval on to-be re-materialized operands of MI) with new register.
1136 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1137 MachineInstr *MI, unsigned NewVReg,
1139 // There is an implicit use. That means one of the other operand is
1140 // being remat'ed and the remat'ed instruction has li.reg as an
1141 // use operand. Make sure we rewrite that as well.
1142 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1143 MachineOperand &MO = MI->getOperand(i);
1146 unsigned Reg = MO.getReg();
1147 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1149 if (!vrm.isReMaterialized(Reg))
1151 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1152 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1154 UseMO->setReg(NewVReg);
1158 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1159 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1160 bool LiveIntervals::
1161 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1162 bool TrySplit, SlotIndex index, SlotIndex end,
1164 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1165 unsigned Slot, int LdSlot,
1166 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1168 const TargetRegisterClass* rc,
1169 SmallVector<int, 4> &ReMatIds,
1170 const MachineLoopInfo *loopInfo,
1171 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1172 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1173 std::vector<LiveInterval*> &NewLIs) {
1174 bool CanFold = false;
1176 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1177 MachineOperand& mop = MI->getOperand(i);
1180 unsigned Reg = mop.getReg();
1181 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1186 bool TryFold = !DefIsReMat;
1187 bool FoldSS = true; // Default behavior unless it's a remat.
1188 int FoldSlot = Slot;
1190 // If this is the rematerializable definition MI itself and
1191 // all of its uses are rematerialized, simply delete it.
1192 if (MI == ReMatOrigDefMI && CanDelete) {
1193 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1195 RemoveMachineInstrFromMaps(MI);
1196 vrm.RemoveMachineInstrFromMaps(MI);
1197 MI->eraseFromParent();
1201 // If def for this use can't be rematerialized, then try folding.
1202 // If def is rematerializable and it's a load, also try folding.
1203 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1205 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1211 // Scan all of the operands of this instruction rewriting operands
1212 // to use NewVReg instead of li.reg as appropriate. We do this for
1215 // 1. If the instr reads the same spilled vreg multiple times, we
1216 // want to reuse the NewVReg.
1217 // 2. If the instr is a two-addr instruction, we are required to
1218 // keep the src/dst regs pinned.
1220 // Keep track of whether we replace a use and/or def so that we can
1221 // create the spill interval with the appropriate range.
1222 SmallVector<unsigned, 2> Ops;
1223 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1225 // Create a new virtual register for the spill interval.
1226 // Create the new register now so we can map the fold instruction
1227 // to the new register so when it is unfolded we get the correct
1229 bool CreatedNewVReg = false;
1231 NewVReg = mri_->createVirtualRegister(rc);
1233 CreatedNewVReg = true;
1235 // The new virtual register should get the same allocation hints as the
1237 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1238 if (Hint.first || Hint.second)
1239 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1245 // Do not fold load / store here if we are splitting. We'll find an
1246 // optimal point to insert a load / store later.
1248 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1249 Ops, FoldSS, FoldSlot, NewVReg)) {
1250 // Folding the load/store can completely change the instruction in
1251 // unpredictable ways, rescan it from the beginning.
1254 // We need to give the new vreg the same stack slot as the
1255 // spilled interval.
1256 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1262 if (isNotInMIMap(MI))
1264 goto RestartInstruction;
1267 // We'll try to fold it later if it's profitable.
1268 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1272 mop.setReg(NewVReg);
1273 if (mop.isImplicit())
1274 rewriteImplicitOps(li, MI, NewVReg, vrm);
1276 // Reuse NewVReg for other reads.
1277 bool HasEarlyClobber = false;
1278 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1279 MachineOperand &mopj = MI->getOperand(Ops[j]);
1280 mopj.setReg(NewVReg);
1281 if (mopj.isImplicit())
1282 rewriteImplicitOps(li, MI, NewVReg, vrm);
1283 if (mopj.isEarlyClobber())
1284 HasEarlyClobber = true;
1287 if (CreatedNewVReg) {
1289 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1290 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1291 // Each valnum may have its own remat id.
1292 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1294 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1296 if (!CanDelete || (HasUse && HasDef)) {
1297 // If this is a two-addr instruction then its use operands are
1298 // rematerializable but its def is not. It should be assigned a
1300 vrm.assignVirt2StackSlot(NewVReg, Slot);
1303 vrm.assignVirt2StackSlot(NewVReg, Slot);
1305 } else if (HasUse && HasDef &&
1306 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1307 // If this interval hasn't been assigned a stack slot (because earlier
1308 // def is a deleted remat def), do it now.
1309 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1310 vrm.assignVirt2StackSlot(NewVReg, Slot);
1313 // Re-matting an instruction with virtual register use. Add the
1314 // register as an implicit use on the use MI.
1315 if (DefIsReMat && ImpUse)
1316 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1318 // Create a new register interval for this spill / remat.
1319 LiveInterval &nI = getOrCreateInterval(NewVReg);
1320 if (CreatedNewVReg) {
1321 NewLIs.push_back(&nI);
1322 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1324 vrm.setIsSplitFromReg(NewVReg, li.reg);
1328 if (CreatedNewVReg) {
1329 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1330 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1331 DEBUG(dbgs() << " +" << LR);
1334 // Extend the split live interval to this def / use.
1335 SlotIndex End = index.getDefIndex();
1336 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1337 nI.getValNumInfo(nI.getNumValNums()-1));
1338 DEBUG(dbgs() << " +" << LR);
1343 // An early clobber starts at the use slot, except for an early clobber
1344 // tied to a use operand (yes, that is a thing).
1345 LiveRange LR(HasEarlyClobber && !HasUse ?
1346 index.getUseIndex() : index.getDefIndex(),
1347 index.getStoreIndex(),
1348 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1349 DEBUG(dbgs() << " +" << LR);
1354 dbgs() << "\t\t\t\tAdded new interval: ";
1355 nI.print(dbgs(), tri_);
1361 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1363 MachineBasicBlock *MBB,
1364 SlotIndex Idx) const {
1365 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1368 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1369 /// during spilling.
1371 struct RewriteInfo {
1374 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1377 struct RewriteInfoCompare {
1378 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1379 return LHS.Index < RHS.Index;
1384 void LiveIntervals::
1385 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1386 LiveInterval::Ranges::const_iterator &I,
1387 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1388 unsigned Slot, int LdSlot,
1389 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1391 const TargetRegisterClass* rc,
1392 SmallVector<int, 4> &ReMatIds,
1393 const MachineLoopInfo *loopInfo,
1394 BitVector &SpillMBBs,
1395 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1396 BitVector &RestoreMBBs,
1397 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1398 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1399 std::vector<LiveInterval*> &NewLIs) {
1400 bool AllCanFold = true;
1401 unsigned NewVReg = 0;
1402 SlotIndex start = I->start.getBaseIndex();
1403 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1405 // First collect all the def / use in this live range that will be rewritten.
1406 // Make sure they are sorted according to instruction index.
1407 std::vector<RewriteInfo> RewriteMIs;
1408 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1409 re = mri_->reg_end(); ri != re; ) {
1410 MachineInstr *MI = &*ri;
1411 MachineOperand &O = ri.getOperand();
1413 if (MI->isDebugValue()) {
1414 // Modify DBG_VALUE now that the value is in a spill slot.
1415 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1416 uint64_t Offset = MI->getOperand(1).getImm();
1417 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1418 DebugLoc DL = MI->getDebugLoc();
1419 int FI = isLoadSS ? LdSlot : (int)Slot;
1420 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1421 Offset, MDPtr, DL)) {
1422 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1423 ReplaceMachineInstrInMaps(MI, NewDV);
1424 MachineBasicBlock *MBB = MI->getParent();
1425 MBB->insert(MBB->erase(MI), NewDV);
1430 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1431 RemoveMachineInstrFromMaps(MI);
1432 vrm.RemoveMachineInstrFromMaps(MI);
1433 MI->eraseFromParent();
1436 assert(!(O.isImplicit() && O.isUse()) &&
1437 "Spilling register that's used as implicit use?");
1438 SlotIndex index = getInstructionIndex(MI);
1439 if (index < start || index >= end)
1443 // Must be defined by an implicit def. It should not be spilled. Note,
1444 // this is for correctness reason. e.g.
1445 // 8 %reg1024<def> = IMPLICIT_DEF
1446 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1447 // The live range [12, 14) are not part of the r1024 live interval since
1448 // it's defined by an implicit def. It will not conflicts with live
1449 // interval of r1025. Now suppose both registers are spilled, you can
1450 // easily see a situation where both registers are reloaded before
1451 // the INSERT_SUBREG and both target registers that would overlap.
1453 RewriteMIs.push_back(RewriteInfo(index, MI));
1455 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1457 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1458 // Now rewrite the defs and uses.
1459 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1460 RewriteInfo &rwi = RewriteMIs[i];
1462 SlotIndex index = rwi.Index;
1463 MachineInstr *MI = rwi.MI;
1464 // If MI def and/or use the same register multiple times, then there
1465 // are multiple entries.
1466 while (i != e && RewriteMIs[i].MI == MI) {
1467 assert(RewriteMIs[i].Index == index);
1470 MachineBasicBlock *MBB = MI->getParent();
1472 if (ImpUse && MI != ReMatDefMI) {
1473 // Re-matting an instruction with virtual register use. Prevent interval
1474 // from being spilled.
1475 getInterval(ImpUse).markNotSpillable();
1478 unsigned MBBId = MBB->getNumber();
1479 unsigned ThisVReg = 0;
1481 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1482 if (NVI != MBBVRegsMap.end()) {
1483 ThisVReg = NVI->second;
1490 // It's better to start a new interval to avoid artifically
1491 // extend the new interval.
1492 if (MI->readsWritesVirtualRegister(li.reg) ==
1493 std::make_pair(false,true)) {
1494 MBBVRegsMap.erase(MBB->getNumber());
1500 bool IsNew = ThisVReg == 0;
1502 // This ends the previous live interval. If all of its def / use
1503 // can be folded, give it a low spill weight.
1504 if (NewVReg && TrySplit && AllCanFold) {
1505 LiveInterval &nI = getOrCreateInterval(NewVReg);
1512 bool HasDef = false;
1513 bool HasUse = false;
1514 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1515 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1516 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1517 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1518 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1519 if (!HasDef && !HasUse)
1522 AllCanFold &= CanFold;
1524 // Update weight of spill interval.
1525 LiveInterval &nI = getOrCreateInterval(NewVReg);
1527 // The spill weight is now infinity as it cannot be spilled again.
1528 nI.markNotSpillable();
1532 // Keep track of the last def and first use in each MBB.
1534 if (MI != ReMatOrigDefMI || !CanDelete) {
1535 bool HasKill = false;
1537 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1539 // If this is a two-address code, then this index starts a new VNInfo.
1540 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1542 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1544 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1545 SpillIdxes.find(MBBId);
1547 if (SII == SpillIdxes.end()) {
1548 std::vector<SRInfo> S;
1549 S.push_back(SRInfo(index, NewVReg, true));
1550 SpillIdxes.insert(std::make_pair(MBBId, S));
1551 } else if (SII->second.back().vreg != NewVReg) {
1552 SII->second.push_back(SRInfo(index, NewVReg, true));
1553 } else if (index > SII->second.back().index) {
1554 // If there is an earlier def and this is a two-address
1555 // instruction, then it's not possible to fold the store (which
1556 // would also fold the load).
1557 SRInfo &Info = SII->second.back();
1559 Info.canFold = !HasUse;
1561 SpillMBBs.set(MBBId);
1562 } else if (SII != SpillIdxes.end() &&
1563 SII->second.back().vreg == NewVReg &&
1564 index > SII->second.back().index) {
1565 // There is an earlier def that's not killed (must be two-address).
1566 // The spill is no longer needed.
1567 SII->second.pop_back();
1568 if (SII->second.empty()) {
1569 SpillIdxes.erase(MBBId);
1570 SpillMBBs.reset(MBBId);
1577 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1578 SpillIdxes.find(MBBId);
1579 if (SII != SpillIdxes.end() &&
1580 SII->second.back().vreg == NewVReg &&
1581 index > SII->second.back().index)
1582 // Use(s) following the last def, it's not safe to fold the spill.
1583 SII->second.back().canFold = false;
1584 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1585 RestoreIdxes.find(MBBId);
1586 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1587 // If we are splitting live intervals, only fold if it's the first
1588 // use and there isn't another use later in the MBB.
1589 RII->second.back().canFold = false;
1591 // Only need a reload if there isn't an earlier def / use.
1592 if (RII == RestoreIdxes.end()) {
1593 std::vector<SRInfo> Infos;
1594 Infos.push_back(SRInfo(index, NewVReg, true));
1595 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1597 RII->second.push_back(SRInfo(index, NewVReg, true));
1599 RestoreMBBs.set(MBBId);
1603 // Update spill weight.
1604 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1605 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1608 if (NewVReg && TrySplit && AllCanFold) {
1609 // If all of its def / use can be folded, give it a low spill weight.
1610 LiveInterval &nI = getOrCreateInterval(NewVReg);
1615 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1616 unsigned vr, BitVector &RestoreMBBs,
1617 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1618 if (!RestoreMBBs[Id])
1620 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1621 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1622 if (Restores[i].index == index &&
1623 Restores[i].vreg == vr &&
1624 Restores[i].canFold)
1629 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1630 unsigned vr, BitVector &RestoreMBBs,
1631 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1632 if (!RestoreMBBs[Id])
1634 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1635 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1636 if (Restores[i].index == index && Restores[i].vreg)
1637 Restores[i].index = SlotIndex();
1640 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1641 /// spilled and create empty intervals for their uses.
1643 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1644 const TargetRegisterClass* rc,
1645 std::vector<LiveInterval*> &NewLIs) {
1646 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1647 re = mri_->reg_end(); ri != re; ) {
1648 MachineOperand &O = ri.getOperand();
1649 MachineInstr *MI = &*ri;
1651 if (MI->isDebugValue()) {
1652 // Remove debug info for now.
1654 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1658 assert(MI->isImplicitDef() &&
1659 "Register def was not rewritten?");
1660 RemoveMachineInstrFromMaps(MI);
1661 vrm.RemoveMachineInstrFromMaps(MI);
1662 MI->eraseFromParent();
1664 // This must be an use of an implicit_def so it's not part of the live
1665 // interval. Create a new empty live interval for it.
1666 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1667 unsigned NewVReg = mri_->createVirtualRegister(rc);
1669 vrm.setIsImplicitlyDefined(NewVReg);
1670 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1671 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1672 MachineOperand &MO = MI->getOperand(i);
1673 if (MO.isReg() && MO.getReg() == li.reg) {
1683 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1684 // Limit the loop depth ridiculousness.
1685 if (loopDepth > 200)
1688 // The loop depth is used to roughly estimate the number of times the
1689 // instruction is executed. Something like 10^d is simple, but will quickly
1690 // overflow a float. This expression behaves like 10^d for small d, but is
1691 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1692 // headroom before overflow.
1693 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1695 return (isDef + isUse) * lc;
1698 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1699 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1701 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1704 std::vector<LiveInterval*> LiveIntervals::
1705 addIntervalsForSpills(const LiveInterval &li,
1706 const SmallVectorImpl<LiveInterval*> &SpillIs,
1707 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1708 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1711 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1712 li.print(dbgs(), tri_);
1716 // Each bit specify whether a spill is required in the MBB.
1717 BitVector SpillMBBs(mf_->getNumBlockIDs());
1718 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1719 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1720 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1721 DenseMap<unsigned,unsigned> MBBVRegsMap;
1722 std::vector<LiveInterval*> NewLIs;
1723 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1725 unsigned NumValNums = li.getNumValNums();
1726 SmallVector<MachineInstr*, 4> ReMatDefs;
1727 ReMatDefs.resize(NumValNums, NULL);
1728 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1729 ReMatOrigDefs.resize(NumValNums, NULL);
1730 SmallVector<int, 4> ReMatIds;
1731 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1732 BitVector ReMatDelete(NumValNums);
1733 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1735 // Spilling a split live interval. It cannot be split any further. Also,
1736 // it's also guaranteed to be a single val# / range interval.
1737 if (vrm.getPreSplitReg(li.reg)) {
1738 vrm.setIsSplitFromReg(li.reg, 0);
1739 // Unset the split kill marker on the last use.
1740 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1741 if (KillIdx != SlotIndex()) {
1742 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1743 assert(KillMI && "Last use disappeared?");
1744 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1745 assert(KillOp != -1 && "Last use disappeared?");
1746 KillMI->getOperand(KillOp).setIsKill(false);
1748 vrm.removeKillPoint(li.reg);
1749 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1750 Slot = vrm.getStackSlot(li.reg);
1751 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1752 MachineInstr *ReMatDefMI = DefIsReMat ?
1753 vrm.getReMaterializedMI(li.reg) : NULL;
1755 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1756 bool isLoad = isLoadSS ||
1757 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1758 bool IsFirstRange = true;
1759 for (LiveInterval::Ranges::const_iterator
1760 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1761 // If this is a split live interval with multiple ranges, it means there
1762 // are two-address instructions that re-defined the value. Only the
1763 // first def can be rematerialized!
1765 // Note ReMatOrigDefMI has already been deleted.
1766 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1767 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1768 false, vrm, rc, ReMatIds, loopInfo,
1769 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1770 MBBVRegsMap, NewLIs);
1772 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1773 Slot, 0, false, false, false,
1774 false, vrm, rc, ReMatIds, loopInfo,
1775 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1776 MBBVRegsMap, NewLIs);
1778 IsFirstRange = false;
1781 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1782 normalizeSpillWeights(NewLIs);
1786 bool TrySplit = !intervalIsInOneMBB(li);
1789 bool NeedStackSlot = false;
1790 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1792 const VNInfo *VNI = *i;
1793 unsigned VN = VNI->id;
1794 if (VNI->isUnused())
1795 continue; // Dead val#.
1796 // Is the def for the val# rematerializable?
1797 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1799 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1800 // Remember how to remat the def of this val#.
1801 ReMatOrigDefs[VN] = ReMatDefMI;
1802 // Original def may be modified so we have to make a copy here.
1803 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1804 CloneMIs.push_back(Clone);
1805 ReMatDefs[VN] = Clone;
1807 bool CanDelete = true;
1808 if (VNI->hasPHIKill()) {
1809 // A kill is a phi node, not all of its uses can be rematerialized.
1810 // It must not be deleted.
1812 // Need a stack slot if there is any live range where uses cannot be
1814 NeedStackSlot = true;
1817 ReMatDelete.set(VN);
1819 // Need a stack slot if there is any live range where uses cannot be
1821 NeedStackSlot = true;
1825 // One stack slot per live interval.
1826 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1827 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1828 Slot = vrm.assignVirt2StackSlot(li.reg);
1830 // This case only occurs when the prealloc splitter has already assigned
1831 // a stack slot to this vreg.
1833 Slot = vrm.getStackSlot(li.reg);
1836 // Create new intervals and rewrite defs and uses.
1837 for (LiveInterval::Ranges::const_iterator
1838 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1839 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1840 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1841 bool DefIsReMat = ReMatDefMI != NULL;
1842 bool CanDelete = ReMatDelete[I->valno->id];
1844 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1845 bool isLoad = isLoadSS ||
1846 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1847 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1848 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1849 CanDelete, vrm, rc, ReMatIds, loopInfo,
1850 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1851 MBBVRegsMap, NewLIs);
1854 // Insert spills / restores if we are splitting.
1856 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1857 normalizeSpillWeights(NewLIs);
1861 SmallPtrSet<LiveInterval*, 4> AddedKill;
1862 SmallVector<unsigned, 2> Ops;
1863 if (NeedStackSlot) {
1864 int Id = SpillMBBs.find_first();
1866 std::vector<SRInfo> &spills = SpillIdxes[Id];
1867 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1868 SlotIndex index = spills[i].index;
1869 unsigned VReg = spills[i].vreg;
1870 LiveInterval &nI = getOrCreateInterval(VReg);
1871 bool isReMat = vrm.isReMaterialized(VReg);
1872 MachineInstr *MI = getInstructionFromIndex(index);
1873 bool CanFold = false;
1874 bool FoundUse = false;
1876 if (spills[i].canFold) {
1878 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1879 MachineOperand &MO = MI->getOperand(j);
1880 if (!MO.isReg() || MO.getReg() != VReg)
1887 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1888 RestoreMBBs, RestoreIdxes))) {
1889 // MI has two-address uses of the same register. If the use
1890 // isn't the first and only use in the BB, then we can't fold
1891 // it. FIXME: Move this to rewriteInstructionsForSpills.
1898 // Fold the store into the def if possible.
1899 bool Folded = false;
1900 if (CanFold && !Ops.empty()) {
1901 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1904 // Also folded uses, do not issue a load.
1905 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1906 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1908 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1912 // Otherwise tell the spiller to issue a spill.
1914 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1915 bool isKill = LR->end == index.getStoreIndex();
1916 if (!MI->registerDefIsDead(nI.reg))
1917 // No need to spill a dead def.
1918 vrm.addSpillPoint(VReg, isKill, MI);
1920 AddedKill.insert(&nI);
1923 Id = SpillMBBs.find_next(Id);
1927 int Id = RestoreMBBs.find_first();
1929 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1930 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1931 SlotIndex index = restores[i].index;
1932 if (index == SlotIndex())
1934 unsigned VReg = restores[i].vreg;
1935 LiveInterval &nI = getOrCreateInterval(VReg);
1936 bool isReMat = vrm.isReMaterialized(VReg);
1937 MachineInstr *MI = getInstructionFromIndex(index);
1938 bool CanFold = false;
1940 if (restores[i].canFold) {
1942 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1943 MachineOperand &MO = MI->getOperand(j);
1944 if (!MO.isReg() || MO.getReg() != VReg)
1948 // If this restore were to be folded, it would have been folded
1957 // Fold the load into the use if possible.
1958 bool Folded = false;
1959 if (CanFold && !Ops.empty()) {
1961 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1963 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1965 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1966 // If the rematerializable def is a load, also try to fold it.
1967 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1968 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1969 Ops, isLoadSS, LdSlot, VReg);
1971 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1973 // Re-matting an instruction with virtual register use. Add the
1974 // register as an implicit use on the use MI and mark the register
1975 // interval as unspillable.
1976 LiveInterval &ImpLi = getInterval(ImpUse);
1977 ImpLi.markNotSpillable();
1978 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1983 // If folding is not possible / failed, then tell the spiller to issue a
1984 // load / rematerialization for us.
1986 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1988 vrm.addRestorePoint(VReg, MI);
1990 Id = RestoreMBBs.find_next(Id);
1993 // Finalize intervals: add kills, finalize spill weights, and filter out
1995 std::vector<LiveInterval*> RetNewLIs;
1996 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1997 LiveInterval *LI = NewLIs[i];
1999 if (!AddedKill.count(LI)) {
2000 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2001 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2002 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2003 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2004 assert(UseIdx != -1);
2005 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2006 LastUse->getOperand(UseIdx).setIsKill();
2007 vrm.addKillPoint(LI->reg, LastUseIdx);
2010 RetNewLIs.push_back(LI);
2014 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2015 normalizeSpillWeights(RetNewLIs);
2019 /// hasAllocatableSuperReg - Return true if the specified physical register has
2020 /// any super register that's allocatable.
2021 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2022 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2023 if (allocatableRegs_[*AS] && hasInterval(*AS))
2028 /// getRepresentativeReg - Find the largest super register of the specified
2029 /// physical register.
2030 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2031 // Find the largest super-register that is allocatable.
2032 unsigned BestReg = Reg;
2033 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2034 unsigned SuperReg = *AS;
2035 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2043 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2044 /// specified interval that conflicts with the specified physical register.
2045 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2046 unsigned PhysReg) const {
2047 unsigned NumConflicts = 0;
2048 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2049 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2050 E = mri_->reg_end(); I != E; ++I) {
2051 MachineOperand &O = I.getOperand();
2052 MachineInstr *MI = O.getParent();
2053 if (MI->isDebugValue())
2055 SlotIndex Index = getInstructionIndex(MI);
2056 if (pli.liveAt(Index))
2059 return NumConflicts;
2062 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2063 /// around all defs and uses of the specified interval. Return true if it
2064 /// was able to cut its interval.
2065 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2066 unsigned PhysReg, VirtRegMap &vrm) {
2067 unsigned SpillReg = getRepresentativeReg(PhysReg);
2069 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2070 << " represented by " << tri_->getName(SpillReg) << '\n');
2072 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2073 // If there are registers which alias PhysReg, but which are not a
2074 // sub-register of the chosen representative super register. Assert
2075 // since we can't handle it yet.
2076 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2077 tri_->isSuperRegister(*AS, SpillReg));
2080 SmallVector<unsigned, 4> PRegs;
2081 if (hasInterval(SpillReg))
2082 PRegs.push_back(SpillReg);
2083 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2084 if (hasInterval(*SR))
2085 PRegs.push_back(*SR);
2088 dbgs() << "Trying to spill:";
2089 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2090 dbgs() << ' ' << tri_->getName(PRegs[i]);
2094 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2095 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2096 E = mri_->reg_end(); I != E; ++I) {
2097 MachineOperand &O = I.getOperand();
2098 MachineInstr *MI = O.getParent();
2099 if (MI->isDebugValue() || SeenMIs.count(MI))
2102 SlotIndex Index = getInstructionIndex(MI);
2103 bool LiveReg = false;
2104 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2105 unsigned PReg = PRegs[i];
2106 LiveInterval &pli = getInterval(PReg);
2107 if (!pli.liveAt(Index))
2110 SlotIndex StartIdx = Index.getLoadIndex();
2111 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2112 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2114 raw_string_ostream Msg(msg);
2115 Msg << "Ran out of registers during register allocation!";
2116 if (MI->isInlineAsm()) {
2117 Msg << "\nPlease check your inline asm statement for invalid "
2118 << "constraints:\n";
2119 MI->print(Msg, tm_);
2121 report_fatal_error(Msg.str());
2123 pli.removeRange(StartIdx, EndIdx);
2128 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2129 vrm.addEmergencySpill(SpillReg, MI);
2135 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2136 MachineInstr* startInst) {
2137 LiveInterval& Interval = getOrCreateInterval(reg);
2138 VNInfo* VN = Interval.getNextValue(
2139 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2140 startInst, getVNInfoAllocator());
2141 VN->setHasPHIKill(true);
2143 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2144 getMBBEndIdx(startInst->getParent()), VN);
2145 Interval.addRange(LR);