1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 /// runOnMachineFunction - Register allocate the whole function
113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
126 numIntervals += getNumIntervals();
132 /// print - Implement the dump method.
133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
308 mi->addRegisterDefined(interval.reg);
310 MachineInstr *CopyMI = NULL;
311 if (mi->isCopyLike()) {
315 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316 assert(ValNo->id == 0 && "First value in interval is not 0?");
318 // Loop over all of the blocks that the vreg is defined in. There are
319 // two cases we have to handle here. The most common case is a vreg
320 // whose lifetime is contained within a basic block. In this case there
321 // will be a single kill, in MBB, which comes after the definition.
322 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323 // FIXME: what about dead vars?
325 if (vi.Kills[0] != mi)
326 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
328 killIdx = defIndex.getStoreIndex();
330 // If the kill happens after the definition, we have an intra-block
332 if (killIdx > defIndex) {
333 assert(vi.AliveBlocks.empty() &&
334 "Shouldn't be alive across any blocks!");
335 LiveRange LR(defIndex, killIdx, ValNo);
336 interval.addRange(LR);
337 DEBUG(dbgs() << " +" << LR << "\n");
342 // The other case we handle is when a virtual register lives to the end
343 // of the defining block, potentially live across some blocks, then is
344 // live into some number of blocks, but gets killed. Start by adding a
345 // range that goes from this definition to the end of the defining block.
346 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347 DEBUG(dbgs() << " +" << NewLR);
348 interval.addRange(NewLR);
350 bool PHIJoin = lv_->isPHIJoin(interval.reg);
353 // A phi join register is killed at the end of the MBB and revived as a new
354 // valno in the killing blocks.
355 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356 DEBUG(dbgs() << " phi-join");
357 ValNo->setHasPHIKill(true);
359 // Iterate over all of the blocks that the variable is completely
360 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
362 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363 E = vi.AliveBlocks.end(); I != E; ++I) {
364 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366 interval.addRange(LR);
367 DEBUG(dbgs() << " +" << LR);
371 // Finally, this virtual register is live from the start of any killing
372 // block to the 'use' slot of the killing instruction.
373 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374 MachineInstr *Kill = vi.Kills[i];
375 SlotIndex Start = getMBBStartIdx(Kill->getParent());
376 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
378 // Create interval with one of a NEW value number. Note that this value
379 // number isn't actually defined by an instruction, weird huh? :)
381 assert(getInstructionFromIndex(Start) == 0 &&
382 "PHI def index points at actual instruction.");
383 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384 ValNo->setIsPHIDef(true);
386 LiveRange LR(Start, killIdx, ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 if (MultipleDefsBySameMI(*mi, MOIdx))
393 // Multiple defs of the same virtual register by the same instruction.
394 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395 // This is likely due to elimination of REG_SEQUENCE instructions. Return
396 // here since there is nothing to do.
399 // If this is the second time we see a virtual register definition, it
400 // must be due to phi elimination or two addr elimination. If this is
401 // the result of two address elimination, then the vreg is one of the
402 // def-and-use register operand.
404 // It may also be partial redef like this:
405 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409 // If this is a two-address definition, then we have already processed
410 // the live range. The only problem is that we didn't realize there
411 // are actually two values in the live interval. Because of this we
412 // need to take the LiveRegion that defines this register and split it
414 SlotIndex RedefIndex = MIIdx.getDefIndex();
415 if (MO.isEarlyClobber())
416 RedefIndex = MIIdx.getUseIndex();
418 const LiveRange *OldLR =
419 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420 VNInfo *OldValNo = OldLR->valno;
421 SlotIndex DefIndex = OldValNo->def.getDefIndex();
423 // Delete the previous value, which should be short and continuous,
424 // because the 2-addr copy must be in the same MBB as the redef.
425 interval.removeRange(DefIndex, RedefIndex);
427 // The new value number (#1) is defined by the instruction we claimed
429 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
431 // Value#0 is now defined by the 2-addr instruction.
432 OldValNo->def = RedefIndex;
433 OldValNo->setCopy(0);
435 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436 if (PartReDef && mi->isCopyLike())
437 OldValNo->setCopy(&*mi);
439 // Add the new live interval which replaces the range for the input copy.
440 LiveRange LR(DefIndex, RedefIndex, ValNo);
441 DEBUG(dbgs() << " replace range with " << LR);
442 interval.addRange(LR);
444 // If this redefinition is dead, we need to add a dummy unit live
445 // range covering the def slot.
447 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
451 dbgs() << " RESULT: ";
452 interval.print(dbgs(), tri_);
454 } else if (lv_->isPHIJoin(interval.reg)) {
455 // In the case of PHI elimination, each variable definition is only
456 // live until the end of the block. We've already taken care of the
457 // rest of the live range.
459 SlotIndex defIndex = MIIdx.getDefIndex();
460 if (MO.isEarlyClobber())
461 defIndex = MIIdx.getUseIndex();
464 MachineInstr *CopyMI = NULL;
465 if (mi->isCopyLike())
467 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
469 SlotIndex killIndex = getMBBEndIdx(mbb);
470 LiveRange LR(defIndex, killIndex, ValNo);
471 interval.addRange(LR);
472 ValNo->setHasPHIKill(true);
473 DEBUG(dbgs() << " phi-join +" << LR);
475 llvm_unreachable("Multiply defined register");
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
490 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
492 SlotIndex baseIndex = MIIdx;
493 SlotIndex start = baseIndex.getDefIndex();
494 // Earlyclobbers move back one.
495 if (MO.isEarlyClobber())
496 start = MIIdx.getUseIndex();
497 SlotIndex end = start;
499 // If it is not used after definition, it is considered dead at
500 // the instruction defining it. Hence its interval is:
501 // [defSlot(def), defSlot(def)+1)
502 // For earlyclobbers, the defSlot was pushed back one; the extra
503 // advance below compensates.
505 DEBUG(dbgs() << " dead");
506 end = start.getStoreIndex();
510 // If it is not dead on definition, it must be killed by a
511 // subsequent instruction. Hence its interval is:
512 // [defSlot(def), useSlot(kill)+1)
513 baseIndex = baseIndex.getNextIndex();
514 while (++mi != MBB->end()) {
516 if (mi->isDebugValue())
518 if (getInstructionFromIndex(baseIndex) == 0)
519 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521 if (mi->killsRegister(interval.reg, tri_)) {
522 DEBUG(dbgs() << " killed");
523 end = baseIndex.getDefIndex();
526 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
528 if (mi->isRegTiedToUseOperand(DefIdx)) {
529 // Two-address instruction.
530 end = baseIndex.getDefIndex();
532 // Another instruction redefines the register before it is ever read.
533 // Then the register is essentially dead at the instruction that
534 // defines it. Hence its interval is:
535 // [defSlot(def), defSlot(def)+1)
536 DEBUG(dbgs() << " dead");
537 end = start.getStoreIndex();
543 baseIndex = baseIndex.getNextIndex();
546 // The only case we should have a dead physreg here without a killing or
547 // instruction where we know it's dead is if it is live-in to the function
548 // and never used. Another possible case is the implicit use of the
549 // physical register has been deleted by two-address pass.
550 end = start.getStoreIndex();
553 assert(start < end && "did not find end of interval?");
555 // Already exists? Extend old live interval.
556 VNInfo *ValNo = interval.getVNInfoAt(start);
557 bool Extend = ValNo != 0;
559 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560 if (Extend && MO.isEarlyClobber())
561 ValNo->setHasRedefByEC(true);
562 LiveRange LR(start, end, ValNo);
563 interval.addRange(LR);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 if (MI->isCopyLike())
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->definesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
594 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
596 // Look for kills, if it reaches a def before it's killed, then it shouldn't
597 // be considered a livein.
598 MachineBasicBlock::iterator mi = MBB->begin();
599 MachineBasicBlock::iterator E = MBB->end();
600 // Skip over DBG_VALUE at the start of the MBB.
601 if (mi != E && mi->isDebugValue()) {
602 while (++mi != E && mi->isDebugValue())
605 // MBB is empty except for DBG_VALUE's.
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->definesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
634 while (++mi != E && mi->isDebugValue())
635 // Skip over DBG_VALUE.
638 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
641 // Live-in register might not be used at all.
644 DEBUG(dbgs() << " dead");
645 end = MIIdx.getStoreIndex();
647 DEBUG(dbgs() << " live through");
652 SlotIndex defIdx = getMBBStartIdx(MBB);
653 assert(getInstructionFromIndex(defIdx) == 0 &&
654 "PHI def index points at actual instruction.");
656 interval.getNextValue(defIdx, 0, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 DEBUG(dbgs() << " +" << LR << '\n');
664 /// computeIntervals - computes the live intervals for virtual
665 /// registers. for some ordering of the machine instructions [1,N] a
666 /// live interval is an interval [i, j) where 1 <= i <= j < N for
667 /// which a variable is live
668 void LiveIntervals::computeIntervals() {
669 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670 << "********** Function: "
671 << ((Value*)mf_->getFunction())->getName() << '\n');
673 SmallVector<unsigned, 8> UndefUses;
674 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
676 MachineBasicBlock *MBB = MBBI;
680 // Track the index of the current machine instr.
681 SlotIndex MIIndex = getMBBStartIdx(MBB);
682 DEBUG(dbgs() << "BB#" << MBB->getNumber()
683 << ":\t\t# derived from " << MBB->getName() << "\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
712 // handle register defs - build intervals
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
746 /// shrinkToUses - After removing some uses of a register, shrink its live
747 /// range to just the remaining uses. This method does not compute reaching
748 /// defs for new uses, and it doesn't remove dead defs.
749 void LiveIntervals::shrinkToUses(LiveInterval *li) {
750 DEBUG(dbgs() << "Shrink: " << *li << '\n');
751 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
752 && "Can't only shrink physical registers");
753 // Find all the values used, including PHI kills.
754 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
756 // Visit all instructions reading li->reg.
757 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
758 MachineInstr *UseMI = I.skipInstruction();) {
759 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
762 VNInfo *VNI = li->getVNInfoAt(Idx);
763 assert(VNI && "Live interval not live into reading instruction");
764 if (VNI->def == Idx) {
765 // Special case: An early-clobber tied operand reads and writes the
766 // register one slot early.
767 Idx = Idx.getPrevSlot();
768 VNI = li->getVNInfoAt(Idx);
769 assert(VNI && "Early-clobber tied value not available");
771 WorkList.push_back(std::make_pair(Idx, VNI));
774 // Create a new live interval with only minimal live segments per def.
775 LiveInterval NewLI(li->reg, 0);
776 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
781 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
783 // A use tied to an early-clobber def ends at the load slot and isn't caught
784 // above. Catch it here instead. This probably only ever happens for inline
786 if (VNI->def.isUse())
787 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
788 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
791 // Keep track of the PHIs that are in use.
792 SmallPtrSet<VNInfo*, 8> UsedPHIs;
794 // Extend intervals to reach all uses in WorkList.
795 while (!WorkList.empty()) {
796 SlotIndex Idx = WorkList.back().first;
797 VNInfo *VNI = WorkList.back().second;
799 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
800 SlotIndex BlockStart = getMBBStartIdx(MBB);
802 // Extend the live range for VNI to be live at Idx.
803 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
805 assert(ExtVNI == VNI && "Unexpected existing value number");
806 // Is this a PHIDef we haven't seen before?
807 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
809 // The PHI is live, make sure the predecessors are live-out.
810 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
811 PE = MBB->pred_end(); PI != PE; ++PI) {
812 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
813 VNInfo *PVNI = li->getVNInfoAt(Stop);
814 // A predecessor is not required to have a live-out value for a PHI.
816 assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
817 WorkList.push_back(std::make_pair(Stop, PVNI));
823 // VNI is live-in to MBB.
824 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
825 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
827 // Make sure VNI is live-out from the predecessors.
828 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
829 PE = MBB->pred_end(); PI != PE; ++PI) {
830 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
831 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
832 WorkList.push_back(std::make_pair(Stop, VNI));
836 // Handle dead values.
837 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
842 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
843 assert(LII != NewLI.end() && "Missing live range for PHI");
844 if (LII->end != VNI->def.getNextSlot())
846 if (VNI->isPHIDef()) {
847 // This is a dead PHI. Remove it.
848 VNI->setIsUnused(true);
849 NewLI.removeRange(*LII);
851 // This is a dead def. Make sure the instruction knows.
852 MachineInstr *MI = getInstructionFromIndex(VNI->def);
853 assert(MI && "No instruction defining live value");
854 MI->addRegisterDead(li->reg, tri_);
858 // Move the trimmed ranges back.
859 li->ranges.swap(NewLI.ranges);
860 DEBUG(dbgs() << "Shrink: " << *li << '\n');
864 //===----------------------------------------------------------------------===//
865 // Register allocator hooks.
868 MachineBasicBlock::iterator
869 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
870 MachineBasicBlock *mbb) const {
871 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
873 // If li is not live into a landing pad, we can insert spill code before the
875 if (!lpad || !isLiveInToMBB(li, lpad))
876 return mbb->getFirstTerminator();
878 // When there is a landing pad, spill code must go before the call instruction
880 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
883 if (I->getDesc().isCall())
886 // The block contains no calls that can throw, so use the first terminator.
887 return mbb->getFirstTerminator();
890 void LiveIntervals::addKillFlags() {
891 for (iterator I = begin(), E = end(); I != E; ++I) {
892 unsigned Reg = I->first;
893 if (TargetRegisterInfo::isPhysicalRegister(Reg))
895 if (mri_->reg_nodbg_empty(Reg))
897 LiveInterval *LI = I->second;
899 // Every instruction that kills Reg corresponds to a live range end point.
900 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
902 // A LOAD index indicates an MBB edge.
903 if (RI->end.isLoad())
905 MachineInstr *MI = getInstructionFromIndex(RI->end);
908 MI->addRegisterKilled(Reg, NULL);
913 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
914 /// allow one) virtual register operand, then its uses are implicitly using
915 /// the register. Returns the virtual register.
916 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
917 MachineInstr *MI) const {
919 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
920 MachineOperand &MO = MI->getOperand(i);
921 if (!MO.isReg() || !MO.isUse())
923 unsigned Reg = MO.getReg();
924 if (Reg == 0 || Reg == li.reg)
927 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
928 !allocatableRegs_[Reg])
930 // FIXME: For now, only remat MI with at most one register operand.
932 "Can't rematerialize instruction with multiple register operand!");
941 /// isValNoAvailableAt - Return true if the val# of the specified interval
942 /// which reaches the given instruction also reaches the specified use index.
943 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
944 SlotIndex UseIdx) const {
945 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
946 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
949 /// isReMaterializable - Returns true if the definition MI of the specified
950 /// val# of the specified interval is re-materializable.
952 LiveIntervals::isReMaterializable(const LiveInterval &li,
953 const VNInfo *ValNo, MachineInstr *MI,
954 const SmallVectorImpl<LiveInterval*> &SpillIs,
959 if (!tii_->isTriviallyReMaterializable(MI, aa_))
962 // Target-specific code can mark an instruction as being rematerializable
963 // if it has one virtual reg use, though it had better be something like
964 // a PIC base register which is likely to be live everywhere.
965 unsigned ImpUse = getReMatImplicitUse(li, MI);
967 const LiveInterval &ImpLi = getInterval(ImpUse);
968 for (MachineRegisterInfo::use_nodbg_iterator
969 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
971 MachineInstr *UseMI = &*ri;
972 SlotIndex UseIdx = getInstructionIndex(UseMI);
973 if (li.getVNInfoAt(UseIdx) != ValNo)
975 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
979 // If a register operand of the re-materialized instruction is going to
980 // be spilled next, then it's not legal to re-materialize this instruction.
981 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
982 if (ImpUse == SpillIs[i]->reg)
988 /// isReMaterializable - Returns true if the definition MI of the specified
989 /// val# of the specified interval is re-materializable.
990 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
991 const VNInfo *ValNo, MachineInstr *MI) {
992 SmallVector<LiveInterval*, 4> Dummy1;
994 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
997 /// isReMaterializable - Returns true if every definition of MI of every
998 /// val# of the specified interval is re-materializable.
1000 LiveIntervals::isReMaterializable(const LiveInterval &li,
1001 const SmallVectorImpl<LiveInterval*> &SpillIs,
1004 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1006 const VNInfo *VNI = *i;
1007 if (VNI->isUnused())
1008 continue; // Dead val#.
1009 // Is the def for the val# rematerializable?
1010 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1013 bool DefIsLoad = false;
1015 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1017 isLoad |= DefIsLoad;
1022 /// FilterFoldedOps - Filter out two-address use operands. Return
1023 /// true if it finds any issue with the operands that ought to prevent
1025 static bool FilterFoldedOps(MachineInstr *MI,
1026 SmallVector<unsigned, 2> &Ops,
1028 SmallVector<unsigned, 2> &FoldOps) {
1030 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1031 unsigned OpIdx = Ops[i];
1032 MachineOperand &MO = MI->getOperand(OpIdx);
1033 // FIXME: fold subreg use.
1037 MRInfo |= (unsigned)VirtRegMap::isMod;
1039 // Filter out two-address use operand(s).
1040 if (MI->isRegTiedToDefOperand(OpIdx)) {
1041 MRInfo = VirtRegMap::isModRef;
1044 MRInfo |= (unsigned)VirtRegMap::isRef;
1046 FoldOps.push_back(OpIdx);
1052 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1053 /// slot / to reg or any rematerialized load into ith operand of specified
1054 /// MI. If it is successul, MI is updated with the newly created MI and
1056 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1057 VirtRegMap &vrm, MachineInstr *DefMI,
1059 SmallVector<unsigned, 2> &Ops,
1060 bool isSS, int Slot, unsigned Reg) {
1061 // If it is an implicit def instruction, just delete it.
1062 if (MI->isImplicitDef()) {
1063 RemoveMachineInstrFromMaps(MI);
1064 vrm.RemoveMachineInstrFromMaps(MI);
1065 MI->eraseFromParent();
1070 // Filter the list of operand indexes that are to be folded. Abort if
1071 // any operand will prevent folding.
1072 unsigned MRInfo = 0;
1073 SmallVector<unsigned, 2> FoldOps;
1074 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1077 // The only time it's safe to fold into a two address instruction is when
1078 // it's folding reload and spill from / into a spill stack slot.
1079 if (DefMI && (MRInfo & VirtRegMap::isMod))
1082 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1083 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1085 // Remember this instruction uses the spill slot.
1086 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1088 // Attempt to fold the memory reference into the instruction. If
1089 // we can do this, we don't need to insert spill code.
1090 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1091 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1092 vrm.transferSpillPts(MI, fmi);
1093 vrm.transferRestorePts(MI, fmi);
1094 vrm.transferEmergencySpills(MI, fmi);
1095 ReplaceMachineInstrInMaps(MI, fmi);
1096 MI->eraseFromParent();
1104 /// canFoldMemoryOperand - Returns true if the specified load / store
1105 /// folding is possible.
1106 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1107 SmallVector<unsigned, 2> &Ops,
1109 // Filter the list of operand indexes that are to be folded. Abort if
1110 // any operand will prevent folding.
1111 unsigned MRInfo = 0;
1112 SmallVector<unsigned, 2> FoldOps;
1113 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1116 // It's only legal to remat for a use, not a def.
1117 if (ReMat && (MRInfo & VirtRegMap::isMod))
1120 return tii_->canFoldMemoryOperand(MI, FoldOps);
1123 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1124 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1126 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1131 for (++itr; itr != li.ranges.end(); ++itr) {
1132 MachineBasicBlock *mbb2 =
1133 indexes_->getMBBCoveringRange(itr->start, itr->end);
1142 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1143 /// interval on to-be re-materialized operands of MI) with new register.
1144 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1145 MachineInstr *MI, unsigned NewVReg,
1147 // There is an implicit use. That means one of the other operand is
1148 // being remat'ed and the remat'ed instruction has li.reg as an
1149 // use operand. Make sure we rewrite that as well.
1150 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1151 MachineOperand &MO = MI->getOperand(i);
1154 unsigned Reg = MO.getReg();
1155 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1157 if (!vrm.isReMaterialized(Reg))
1159 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1160 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1162 UseMO->setReg(NewVReg);
1166 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1167 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1168 bool LiveIntervals::
1169 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1170 bool TrySplit, SlotIndex index, SlotIndex end,
1172 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1173 unsigned Slot, int LdSlot,
1174 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1176 const TargetRegisterClass* rc,
1177 SmallVector<int, 4> &ReMatIds,
1178 const MachineLoopInfo *loopInfo,
1179 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1180 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1181 std::vector<LiveInterval*> &NewLIs) {
1182 bool CanFold = false;
1184 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1185 MachineOperand& mop = MI->getOperand(i);
1188 unsigned Reg = mop.getReg();
1189 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1194 bool TryFold = !DefIsReMat;
1195 bool FoldSS = true; // Default behavior unless it's a remat.
1196 int FoldSlot = Slot;
1198 // If this is the rematerializable definition MI itself and
1199 // all of its uses are rematerialized, simply delete it.
1200 if (MI == ReMatOrigDefMI && CanDelete) {
1201 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1203 RemoveMachineInstrFromMaps(MI);
1204 vrm.RemoveMachineInstrFromMaps(MI);
1205 MI->eraseFromParent();
1209 // If def for this use can't be rematerialized, then try folding.
1210 // If def is rematerializable and it's a load, also try folding.
1211 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1213 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1219 // Scan all of the operands of this instruction rewriting operands
1220 // to use NewVReg instead of li.reg as appropriate. We do this for
1223 // 1. If the instr reads the same spilled vreg multiple times, we
1224 // want to reuse the NewVReg.
1225 // 2. If the instr is a two-addr instruction, we are required to
1226 // keep the src/dst regs pinned.
1228 // Keep track of whether we replace a use and/or def so that we can
1229 // create the spill interval with the appropriate range.
1230 SmallVector<unsigned, 2> Ops;
1231 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1233 // Create a new virtual register for the spill interval.
1234 // Create the new register now so we can map the fold instruction
1235 // to the new register so when it is unfolded we get the correct
1237 bool CreatedNewVReg = false;
1239 NewVReg = mri_->createVirtualRegister(rc);
1241 CreatedNewVReg = true;
1243 // The new virtual register should get the same allocation hints as the
1245 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1246 if (Hint.first || Hint.second)
1247 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1253 // Do not fold load / store here if we are splitting. We'll find an
1254 // optimal point to insert a load / store later.
1256 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1257 Ops, FoldSS, FoldSlot, NewVReg)) {
1258 // Folding the load/store can completely change the instruction in
1259 // unpredictable ways, rescan it from the beginning.
1262 // We need to give the new vreg the same stack slot as the
1263 // spilled interval.
1264 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1270 if (isNotInMIMap(MI))
1272 goto RestartInstruction;
1275 // We'll try to fold it later if it's profitable.
1276 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1280 mop.setReg(NewVReg);
1281 if (mop.isImplicit())
1282 rewriteImplicitOps(li, MI, NewVReg, vrm);
1284 // Reuse NewVReg for other reads.
1285 bool HasEarlyClobber = false;
1286 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1287 MachineOperand &mopj = MI->getOperand(Ops[j]);
1288 mopj.setReg(NewVReg);
1289 if (mopj.isImplicit())
1290 rewriteImplicitOps(li, MI, NewVReg, vrm);
1291 if (mopj.isEarlyClobber())
1292 HasEarlyClobber = true;
1295 if (CreatedNewVReg) {
1297 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1298 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1299 // Each valnum may have its own remat id.
1300 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1302 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1304 if (!CanDelete || (HasUse && HasDef)) {
1305 // If this is a two-addr instruction then its use operands are
1306 // rematerializable but its def is not. It should be assigned a
1308 vrm.assignVirt2StackSlot(NewVReg, Slot);
1311 vrm.assignVirt2StackSlot(NewVReg, Slot);
1313 } else if (HasUse && HasDef &&
1314 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1315 // If this interval hasn't been assigned a stack slot (because earlier
1316 // def is a deleted remat def), do it now.
1317 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1318 vrm.assignVirt2StackSlot(NewVReg, Slot);
1321 // Re-matting an instruction with virtual register use. Add the
1322 // register as an implicit use on the use MI.
1323 if (DefIsReMat && ImpUse)
1324 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1326 // Create a new register interval for this spill / remat.
1327 LiveInterval &nI = getOrCreateInterval(NewVReg);
1328 if (CreatedNewVReg) {
1329 NewLIs.push_back(&nI);
1330 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1332 vrm.setIsSplitFromReg(NewVReg, li.reg);
1336 if (CreatedNewVReg) {
1337 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1338 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1339 DEBUG(dbgs() << " +" << LR);
1342 // Extend the split live interval to this def / use.
1343 SlotIndex End = index.getDefIndex();
1344 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1345 nI.getValNumInfo(nI.getNumValNums()-1));
1346 DEBUG(dbgs() << " +" << LR);
1351 // An early clobber starts at the use slot, except for an early clobber
1352 // tied to a use operand (yes, that is a thing).
1353 LiveRange LR(HasEarlyClobber && !HasUse ?
1354 index.getUseIndex() : index.getDefIndex(),
1355 index.getStoreIndex(),
1356 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1357 DEBUG(dbgs() << " +" << LR);
1362 dbgs() << "\t\t\t\tAdded new interval: ";
1363 nI.print(dbgs(), tri_);
1369 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1371 MachineBasicBlock *MBB,
1372 SlotIndex Idx) const {
1373 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1376 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1377 /// during spilling.
1379 struct RewriteInfo {
1382 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1385 struct RewriteInfoCompare {
1386 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1387 return LHS.Index < RHS.Index;
1392 void LiveIntervals::
1393 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1394 LiveInterval::Ranges::const_iterator &I,
1395 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1396 unsigned Slot, int LdSlot,
1397 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1399 const TargetRegisterClass* rc,
1400 SmallVector<int, 4> &ReMatIds,
1401 const MachineLoopInfo *loopInfo,
1402 BitVector &SpillMBBs,
1403 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1404 BitVector &RestoreMBBs,
1405 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1406 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1407 std::vector<LiveInterval*> &NewLIs) {
1408 bool AllCanFold = true;
1409 unsigned NewVReg = 0;
1410 SlotIndex start = I->start.getBaseIndex();
1411 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1413 // First collect all the def / use in this live range that will be rewritten.
1414 // Make sure they are sorted according to instruction index.
1415 std::vector<RewriteInfo> RewriteMIs;
1416 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1417 re = mri_->reg_end(); ri != re; ) {
1418 MachineInstr *MI = &*ri;
1419 MachineOperand &O = ri.getOperand();
1421 if (MI->isDebugValue()) {
1422 // Modify DBG_VALUE now that the value is in a spill slot.
1423 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1424 uint64_t Offset = MI->getOperand(1).getImm();
1425 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1426 DebugLoc DL = MI->getDebugLoc();
1427 int FI = isLoadSS ? LdSlot : (int)Slot;
1428 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1429 Offset, MDPtr, DL)) {
1430 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1431 ReplaceMachineInstrInMaps(MI, NewDV);
1432 MachineBasicBlock *MBB = MI->getParent();
1433 MBB->insert(MBB->erase(MI), NewDV);
1438 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1439 RemoveMachineInstrFromMaps(MI);
1440 vrm.RemoveMachineInstrFromMaps(MI);
1441 MI->eraseFromParent();
1444 assert(!(O.isImplicit() && O.isUse()) &&
1445 "Spilling register that's used as implicit use?");
1446 SlotIndex index = getInstructionIndex(MI);
1447 if (index < start || index >= end)
1451 // Must be defined by an implicit def. It should not be spilled. Note,
1452 // this is for correctness reason. e.g.
1453 // 8 %reg1024<def> = IMPLICIT_DEF
1454 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1455 // The live range [12, 14) are not part of the r1024 live interval since
1456 // it's defined by an implicit def. It will not conflicts with live
1457 // interval of r1025. Now suppose both registers are spilled, you can
1458 // easily see a situation where both registers are reloaded before
1459 // the INSERT_SUBREG and both target registers that would overlap.
1461 RewriteMIs.push_back(RewriteInfo(index, MI));
1463 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1465 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1466 // Now rewrite the defs and uses.
1467 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1468 RewriteInfo &rwi = RewriteMIs[i];
1470 SlotIndex index = rwi.Index;
1471 MachineInstr *MI = rwi.MI;
1472 // If MI def and/or use the same register multiple times, then there
1473 // are multiple entries.
1474 while (i != e && RewriteMIs[i].MI == MI) {
1475 assert(RewriteMIs[i].Index == index);
1478 MachineBasicBlock *MBB = MI->getParent();
1480 if (ImpUse && MI != ReMatDefMI) {
1481 // Re-matting an instruction with virtual register use. Prevent interval
1482 // from being spilled.
1483 getInterval(ImpUse).markNotSpillable();
1486 unsigned MBBId = MBB->getNumber();
1487 unsigned ThisVReg = 0;
1489 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1490 if (NVI != MBBVRegsMap.end()) {
1491 ThisVReg = NVI->second;
1498 // It's better to start a new interval to avoid artifically
1499 // extend the new interval.
1500 if (MI->readsWritesVirtualRegister(li.reg) ==
1501 std::make_pair(false,true)) {
1502 MBBVRegsMap.erase(MBB->getNumber());
1508 bool IsNew = ThisVReg == 0;
1510 // This ends the previous live interval. If all of its def / use
1511 // can be folded, give it a low spill weight.
1512 if (NewVReg && TrySplit && AllCanFold) {
1513 LiveInterval &nI = getOrCreateInterval(NewVReg);
1520 bool HasDef = false;
1521 bool HasUse = false;
1522 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1523 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1524 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1525 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1526 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1527 if (!HasDef && !HasUse)
1530 AllCanFold &= CanFold;
1532 // Update weight of spill interval.
1533 LiveInterval &nI = getOrCreateInterval(NewVReg);
1535 // The spill weight is now infinity as it cannot be spilled again.
1536 nI.markNotSpillable();
1540 // Keep track of the last def and first use in each MBB.
1542 if (MI != ReMatOrigDefMI || !CanDelete) {
1543 bool HasKill = false;
1545 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1547 // If this is a two-address code, then this index starts a new VNInfo.
1548 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1550 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1552 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1553 SpillIdxes.find(MBBId);
1555 if (SII == SpillIdxes.end()) {
1556 std::vector<SRInfo> S;
1557 S.push_back(SRInfo(index, NewVReg, true));
1558 SpillIdxes.insert(std::make_pair(MBBId, S));
1559 } else if (SII->second.back().vreg != NewVReg) {
1560 SII->second.push_back(SRInfo(index, NewVReg, true));
1561 } else if (index > SII->second.back().index) {
1562 // If there is an earlier def and this is a two-address
1563 // instruction, then it's not possible to fold the store (which
1564 // would also fold the load).
1565 SRInfo &Info = SII->second.back();
1567 Info.canFold = !HasUse;
1569 SpillMBBs.set(MBBId);
1570 } else if (SII != SpillIdxes.end() &&
1571 SII->second.back().vreg == NewVReg &&
1572 index > SII->second.back().index) {
1573 // There is an earlier def that's not killed (must be two-address).
1574 // The spill is no longer needed.
1575 SII->second.pop_back();
1576 if (SII->second.empty()) {
1577 SpillIdxes.erase(MBBId);
1578 SpillMBBs.reset(MBBId);
1585 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1586 SpillIdxes.find(MBBId);
1587 if (SII != SpillIdxes.end() &&
1588 SII->second.back().vreg == NewVReg &&
1589 index > SII->second.back().index)
1590 // Use(s) following the last def, it's not safe to fold the spill.
1591 SII->second.back().canFold = false;
1592 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1593 RestoreIdxes.find(MBBId);
1594 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1595 // If we are splitting live intervals, only fold if it's the first
1596 // use and there isn't another use later in the MBB.
1597 RII->second.back().canFold = false;
1599 // Only need a reload if there isn't an earlier def / use.
1600 if (RII == RestoreIdxes.end()) {
1601 std::vector<SRInfo> Infos;
1602 Infos.push_back(SRInfo(index, NewVReg, true));
1603 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1605 RII->second.push_back(SRInfo(index, NewVReg, true));
1607 RestoreMBBs.set(MBBId);
1611 // Update spill weight.
1612 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1613 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1616 if (NewVReg && TrySplit && AllCanFold) {
1617 // If all of its def / use can be folded, give it a low spill weight.
1618 LiveInterval &nI = getOrCreateInterval(NewVReg);
1623 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1624 unsigned vr, BitVector &RestoreMBBs,
1625 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1626 if (!RestoreMBBs[Id])
1628 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1629 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1630 if (Restores[i].index == index &&
1631 Restores[i].vreg == vr &&
1632 Restores[i].canFold)
1637 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1638 unsigned vr, BitVector &RestoreMBBs,
1639 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1640 if (!RestoreMBBs[Id])
1642 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1643 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1644 if (Restores[i].index == index && Restores[i].vreg)
1645 Restores[i].index = SlotIndex();
1648 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1649 /// spilled and create empty intervals for their uses.
1651 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1652 const TargetRegisterClass* rc,
1653 std::vector<LiveInterval*> &NewLIs) {
1654 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1655 re = mri_->reg_end(); ri != re; ) {
1656 MachineOperand &O = ri.getOperand();
1657 MachineInstr *MI = &*ri;
1659 if (MI->isDebugValue()) {
1660 // Remove debug info for now.
1662 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1666 assert(MI->isImplicitDef() &&
1667 "Register def was not rewritten?");
1668 RemoveMachineInstrFromMaps(MI);
1669 vrm.RemoveMachineInstrFromMaps(MI);
1670 MI->eraseFromParent();
1672 // This must be an use of an implicit_def so it's not part of the live
1673 // interval. Create a new empty live interval for it.
1674 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1675 unsigned NewVReg = mri_->createVirtualRegister(rc);
1677 vrm.setIsImplicitlyDefined(NewVReg);
1678 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1679 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1680 MachineOperand &MO = MI->getOperand(i);
1681 if (MO.isReg() && MO.getReg() == li.reg) {
1691 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1692 // Limit the loop depth ridiculousness.
1693 if (loopDepth > 200)
1696 // The loop depth is used to roughly estimate the number of times the
1697 // instruction is executed. Something like 10^d is simple, but will quickly
1698 // overflow a float. This expression behaves like 10^d for small d, but is
1699 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1700 // headroom before overflow.
1701 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1703 return (isDef + isUse) * lc;
1706 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1707 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1709 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1712 std::vector<LiveInterval*> LiveIntervals::
1713 addIntervalsForSpills(const LiveInterval &li,
1714 const SmallVectorImpl<LiveInterval*> &SpillIs,
1715 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1716 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1719 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1720 li.print(dbgs(), tri_);
1724 // Each bit specify whether a spill is required in the MBB.
1725 BitVector SpillMBBs(mf_->getNumBlockIDs());
1726 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1727 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1728 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1729 DenseMap<unsigned,unsigned> MBBVRegsMap;
1730 std::vector<LiveInterval*> NewLIs;
1731 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1733 unsigned NumValNums = li.getNumValNums();
1734 SmallVector<MachineInstr*, 4> ReMatDefs;
1735 ReMatDefs.resize(NumValNums, NULL);
1736 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1737 ReMatOrigDefs.resize(NumValNums, NULL);
1738 SmallVector<int, 4> ReMatIds;
1739 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1740 BitVector ReMatDelete(NumValNums);
1741 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1743 // Spilling a split live interval. It cannot be split any further. Also,
1744 // it's also guaranteed to be a single val# / range interval.
1745 if (vrm.getPreSplitReg(li.reg)) {
1746 vrm.setIsSplitFromReg(li.reg, 0);
1747 // Unset the split kill marker on the last use.
1748 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1749 if (KillIdx != SlotIndex()) {
1750 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1751 assert(KillMI && "Last use disappeared?");
1752 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1753 assert(KillOp != -1 && "Last use disappeared?");
1754 KillMI->getOperand(KillOp).setIsKill(false);
1756 vrm.removeKillPoint(li.reg);
1757 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1758 Slot = vrm.getStackSlot(li.reg);
1759 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1760 MachineInstr *ReMatDefMI = DefIsReMat ?
1761 vrm.getReMaterializedMI(li.reg) : NULL;
1763 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1764 bool isLoad = isLoadSS ||
1765 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1766 bool IsFirstRange = true;
1767 for (LiveInterval::Ranges::const_iterator
1768 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1769 // If this is a split live interval with multiple ranges, it means there
1770 // are two-address instructions that re-defined the value. Only the
1771 // first def can be rematerialized!
1773 // Note ReMatOrigDefMI has already been deleted.
1774 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1775 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1776 false, vrm, rc, ReMatIds, loopInfo,
1777 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1778 MBBVRegsMap, NewLIs);
1780 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1781 Slot, 0, false, false, false,
1782 false, vrm, rc, ReMatIds, loopInfo,
1783 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1784 MBBVRegsMap, NewLIs);
1786 IsFirstRange = false;
1789 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1790 normalizeSpillWeights(NewLIs);
1794 bool TrySplit = !intervalIsInOneMBB(li);
1797 bool NeedStackSlot = false;
1798 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1800 const VNInfo *VNI = *i;
1801 unsigned VN = VNI->id;
1802 if (VNI->isUnused())
1803 continue; // Dead val#.
1804 // Is the def for the val# rematerializable?
1805 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1807 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1808 // Remember how to remat the def of this val#.
1809 ReMatOrigDefs[VN] = ReMatDefMI;
1810 // Original def may be modified so we have to make a copy here.
1811 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1812 CloneMIs.push_back(Clone);
1813 ReMatDefs[VN] = Clone;
1815 bool CanDelete = true;
1816 if (VNI->hasPHIKill()) {
1817 // A kill is a phi node, not all of its uses can be rematerialized.
1818 // It must not be deleted.
1820 // Need a stack slot if there is any live range where uses cannot be
1822 NeedStackSlot = true;
1825 ReMatDelete.set(VN);
1827 // Need a stack slot if there is any live range where uses cannot be
1829 NeedStackSlot = true;
1833 // One stack slot per live interval.
1834 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1835 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1836 Slot = vrm.assignVirt2StackSlot(li.reg);
1838 // This case only occurs when the prealloc splitter has already assigned
1839 // a stack slot to this vreg.
1841 Slot = vrm.getStackSlot(li.reg);
1844 // Create new intervals and rewrite defs and uses.
1845 for (LiveInterval::Ranges::const_iterator
1846 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1847 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1848 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1849 bool DefIsReMat = ReMatDefMI != NULL;
1850 bool CanDelete = ReMatDelete[I->valno->id];
1852 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1853 bool isLoad = isLoadSS ||
1854 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1855 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1856 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1857 CanDelete, vrm, rc, ReMatIds, loopInfo,
1858 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1859 MBBVRegsMap, NewLIs);
1862 // Insert spills / restores if we are splitting.
1864 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1865 normalizeSpillWeights(NewLIs);
1869 SmallPtrSet<LiveInterval*, 4> AddedKill;
1870 SmallVector<unsigned, 2> Ops;
1871 if (NeedStackSlot) {
1872 int Id = SpillMBBs.find_first();
1874 std::vector<SRInfo> &spills = SpillIdxes[Id];
1875 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1876 SlotIndex index = spills[i].index;
1877 unsigned VReg = spills[i].vreg;
1878 LiveInterval &nI = getOrCreateInterval(VReg);
1879 bool isReMat = vrm.isReMaterialized(VReg);
1880 MachineInstr *MI = getInstructionFromIndex(index);
1881 bool CanFold = false;
1882 bool FoundUse = false;
1884 if (spills[i].canFold) {
1886 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1887 MachineOperand &MO = MI->getOperand(j);
1888 if (!MO.isReg() || MO.getReg() != VReg)
1895 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1896 RestoreMBBs, RestoreIdxes))) {
1897 // MI has two-address uses of the same register. If the use
1898 // isn't the first and only use in the BB, then we can't fold
1899 // it. FIXME: Move this to rewriteInstructionsForSpills.
1906 // Fold the store into the def if possible.
1907 bool Folded = false;
1908 if (CanFold && !Ops.empty()) {
1909 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1912 // Also folded uses, do not issue a load.
1913 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1914 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1916 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1920 // Otherwise tell the spiller to issue a spill.
1922 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1923 bool isKill = LR->end == index.getStoreIndex();
1924 if (!MI->registerDefIsDead(nI.reg))
1925 // No need to spill a dead def.
1926 vrm.addSpillPoint(VReg, isKill, MI);
1928 AddedKill.insert(&nI);
1931 Id = SpillMBBs.find_next(Id);
1935 int Id = RestoreMBBs.find_first();
1937 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1938 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1939 SlotIndex index = restores[i].index;
1940 if (index == SlotIndex())
1942 unsigned VReg = restores[i].vreg;
1943 LiveInterval &nI = getOrCreateInterval(VReg);
1944 bool isReMat = vrm.isReMaterialized(VReg);
1945 MachineInstr *MI = getInstructionFromIndex(index);
1946 bool CanFold = false;
1948 if (restores[i].canFold) {
1950 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1951 MachineOperand &MO = MI->getOperand(j);
1952 if (!MO.isReg() || MO.getReg() != VReg)
1956 // If this restore were to be folded, it would have been folded
1965 // Fold the load into the use if possible.
1966 bool Folded = false;
1967 if (CanFold && !Ops.empty()) {
1969 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1971 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1973 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1974 // If the rematerializable def is a load, also try to fold it.
1975 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1976 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1977 Ops, isLoadSS, LdSlot, VReg);
1979 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1981 // Re-matting an instruction with virtual register use. Add the
1982 // register as an implicit use on the use MI and mark the register
1983 // interval as unspillable.
1984 LiveInterval &ImpLi = getInterval(ImpUse);
1985 ImpLi.markNotSpillable();
1986 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1991 // If folding is not possible / failed, then tell the spiller to issue a
1992 // load / rematerialization for us.
1994 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1996 vrm.addRestorePoint(VReg, MI);
1998 Id = RestoreMBBs.find_next(Id);
2001 // Finalize intervals: add kills, finalize spill weights, and filter out
2003 std::vector<LiveInterval*> RetNewLIs;
2004 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2005 LiveInterval *LI = NewLIs[i];
2007 if (!AddedKill.count(LI)) {
2008 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2009 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2010 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2011 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2012 assert(UseIdx != -1);
2013 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2014 LastUse->getOperand(UseIdx).setIsKill();
2015 vrm.addKillPoint(LI->reg, LastUseIdx);
2018 RetNewLIs.push_back(LI);
2022 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2023 normalizeSpillWeights(RetNewLIs);
2027 /// hasAllocatableSuperReg - Return true if the specified physical register has
2028 /// any super register that's allocatable.
2029 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2030 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2031 if (allocatableRegs_[*AS] && hasInterval(*AS))
2036 /// getRepresentativeReg - Find the largest super register of the specified
2037 /// physical register.
2038 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2039 // Find the largest super-register that is allocatable.
2040 unsigned BestReg = Reg;
2041 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2042 unsigned SuperReg = *AS;
2043 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2051 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2052 /// specified interval that conflicts with the specified physical register.
2053 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2054 unsigned PhysReg) const {
2055 unsigned NumConflicts = 0;
2056 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2057 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2058 E = mri_->reg_end(); I != E; ++I) {
2059 MachineOperand &O = I.getOperand();
2060 MachineInstr *MI = O.getParent();
2061 if (MI->isDebugValue())
2063 SlotIndex Index = getInstructionIndex(MI);
2064 if (pli.liveAt(Index))
2067 return NumConflicts;
2070 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2071 /// around all defs and uses of the specified interval. Return true if it
2072 /// was able to cut its interval.
2073 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2074 unsigned PhysReg, VirtRegMap &vrm) {
2075 unsigned SpillReg = getRepresentativeReg(PhysReg);
2077 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2078 << " represented by " << tri_->getName(SpillReg) << '\n');
2080 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2081 // If there are registers which alias PhysReg, but which are not a
2082 // sub-register of the chosen representative super register. Assert
2083 // since we can't handle it yet.
2084 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2085 tri_->isSuperRegister(*AS, SpillReg));
2088 SmallVector<unsigned, 4> PRegs;
2089 if (hasInterval(SpillReg))
2090 PRegs.push_back(SpillReg);
2091 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2092 if (hasInterval(*SR))
2093 PRegs.push_back(*SR);
2096 dbgs() << "Trying to spill:";
2097 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2098 dbgs() << ' ' << tri_->getName(PRegs[i]);
2102 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2103 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2104 E = mri_->reg_end(); I != E; ++I) {
2105 MachineOperand &O = I.getOperand();
2106 MachineInstr *MI = O.getParent();
2107 if (MI->isDebugValue() || SeenMIs.count(MI))
2110 SlotIndex Index = getInstructionIndex(MI);
2111 bool LiveReg = false;
2112 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2113 unsigned PReg = PRegs[i];
2114 LiveInterval &pli = getInterval(PReg);
2115 if (!pli.liveAt(Index))
2118 SlotIndex StartIdx = Index.getLoadIndex();
2119 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2120 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2122 raw_string_ostream Msg(msg);
2123 Msg << "Ran out of registers during register allocation!";
2124 if (MI->isInlineAsm()) {
2125 Msg << "\nPlease check your inline asm statement for invalid "
2126 << "constraints:\n";
2127 MI->print(Msg, tm_);
2129 report_fatal_error(Msg.str());
2131 pli.removeRange(StartIdx, EndIdx);
2136 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2137 vrm.addEmergencySpill(SpillReg, MI);
2143 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2144 MachineInstr* startInst) {
2145 LiveInterval& Interval = getOrCreateInterval(reg);
2146 VNInfo* VN = Interval.getNextValue(
2147 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2148 startInst, getVNInfoAllocator());
2149 VN->setHasPHIKill(true);
2151 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2152 getMBBEndIdx(startInst->getParent()), VN);
2153 Interval.addRange(LR);