1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 AU.addPreserved<AliasAnalysis>();
64 AU.addPreserved<LiveVariables>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
74 AU.addRequiredID(TwoAddressInstructionPassID);
75 AU.addPreserved<ProcessImplicitDefs>();
76 AU.addRequired<ProcessImplicitDefs>();
77 AU.addPreserved<SlotIndexes>();
78 AU.addRequiredTransitive<SlotIndexes>();
79 MachineFunctionPass::getAnalysisUsage(AU);
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85 E = r2iMap_.end(); I != E; ++I)
90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
91 VNInfoAllocator.Reset();
92 while (!CloneMIs.empty()) {
93 MachineInstr *MI = CloneMIs.back();
95 mf_->DeleteMachineInstr(MI);
99 /// runOnMachineFunction - Register allocate the whole function
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
103 mri_ = &mf_->getRegInfo();
104 tm_ = &fn.getTarget();
105 tri_ = tm_->getRegisterInfo();
106 tii_ = tm_->getInstrInfo();
107 aa_ = &getAnalysis<AliasAnalysis>();
108 lv_ = &getAnalysis<LiveVariables>();
109 indexes_ = &getAnalysis<SlotIndexes>();
110 allocatableRegs_ = tri_->getAllocatableSet(fn);
114 numIntervals += getNumIntervals();
120 /// print - Implement the dump method.
121 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122 OS << "********** INTERVALS **********\n";
123 for (const_iterator I = begin(), E = end(); I != E; ++I) {
124 I->second->print(OS, tri_);
131 void LiveIntervals::printInstrs(raw_ostream &OS) const {
132 OS << "********** MACHINEINSTRS **********\n";
134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135 mbbi != mbbe; ++mbbi) {
136 OS << "BB#" << mbbi->getNumber()
137 << ":\t\t# derived from " << mbbi->getName() << "\n";
138 for (MachineBasicBlock::iterator mii = mbbi->begin(),
139 mie = mbbi->end(); mii != mie; ++mii) {
140 if (mii->isDebugValue())
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
251 if (TargetRegisterInfo::isPhysicalRegister(reg))
252 dbgs() << tri_->getName(reg);
254 dbgs() << "%reg" << reg;
259 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
260 unsigned Reg = MI.getOperand(MOIdx).getReg();
261 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
262 const MachineOperand &MO = MI.getOperand(i);
265 if (MO.getReg() == Reg && MO.isDef()) {
266 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
267 MI.getOperand(MOIdx).getSubReg() &&
268 (MO.getSubReg() || MO.isImplicit()));
275 /// isPartialRedef - Return true if the specified def at the specific index is
276 /// partially re-defining the specified live interval. A common case of this is
277 /// a definition of the sub-register.
278 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
279 LiveInterval &interval) {
280 if (!MO.getSubReg() || MO.isEarlyClobber())
283 SlotIndex RedefIndex = MIIdx.getDefIndex();
284 const LiveRange *OldLR =
285 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
286 if (OldLR->valno->isDefAccurate()) {
287 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
288 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
293 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
294 MachineBasicBlock::iterator mi,
298 LiveInterval &interval) {
300 dbgs() << "\t\tregister: ";
301 printRegName(interval.reg, tri_);
304 // Virtual registers may be defined multiple times (due to phi
305 // elimination and 2-addr elimination). Much of what we do only has to be
306 // done once for the vreg. We use an empty interval to detect the first
307 // time we see a vreg.
308 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
309 if (interval.empty()) {
310 // Get the Idx of the defining instructions.
311 SlotIndex defIndex = MIIdx.getDefIndex();
312 // Earlyclobbers move back one, so that they overlap the live range
314 if (MO.isEarlyClobber())
315 defIndex = MIIdx.getUseIndex();
317 // Make sure the first definition is not a partial redefinition. Add an
318 // <imp-def> of the full register.
320 mi->addRegisterDefined(interval.reg);
322 MachineInstr *CopyMI = NULL;
323 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
324 if (mi->isCopyLike() ||
325 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
357 // The other case we handle is when a virtual register lives to the end
358 // of the defining block, potentially live across some blocks, then is
359 // live into some number of blocks, but gets killed. Start by adding a
360 // range that goes from this definition to the end of the defining block.
361 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
362 DEBUG(dbgs() << " +" << NewLR);
363 interval.addRange(NewLR);
365 bool PHIJoin = lv_->isPHIJoin(interval.reg);
368 // A phi join register is killed at the end of the MBB and revived as a new
369 // valno in the killing blocks.
370 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
371 DEBUG(dbgs() << " phi-join");
372 ValNo->setHasPHIKill(true);
374 // Iterate over all of the blocks that the variable is completely
375 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
377 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
378 E = vi.AliveBlocks.end(); I != E; ++I) {
379 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
380 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
381 interval.addRange(LR);
382 DEBUG(dbgs() << " +" << LR);
386 // Finally, this virtual register is live from the start of any killing
387 // block to the 'use' slot of the killing instruction.
388 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
389 MachineInstr *Kill = vi.Kills[i];
390 SlotIndex Start = getMBBStartIdx(Kill->getParent());
391 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
393 // Create interval with one of a NEW value number. Note that this value
394 // number isn't actually defined by an instruction, weird huh? :)
396 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
398 ValNo->setIsPHIDef(true);
400 LiveRange LR(Start, killIdx, ValNo);
401 interval.addRange(LR);
402 DEBUG(dbgs() << " +" << LR);
406 if (MultipleDefsBySameMI(*mi, MOIdx))
407 // Multiple defs of the same virtual register by the same instruction.
408 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
409 // This is likely due to elimination of REG_SEQUENCE instructions. Return
410 // here since there is nothing to do.
413 // If this is the second time we see a virtual register definition, it
414 // must be due to phi elimination or two addr elimination. If this is
415 // the result of two address elimination, then the vreg is one of the
416 // def-and-use register operand.
418 // It may also be partial redef like this:
419 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
420 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
421 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
422 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
423 // If this is a two-address definition, then we have already processed
424 // the live range. The only problem is that we didn't realize there
425 // are actually two values in the live interval. Because of this we
426 // need to take the LiveRegion that defines this register and split it
428 SlotIndex RedefIndex = MIIdx.getDefIndex();
429 if (MO.isEarlyClobber())
430 RedefIndex = MIIdx.getUseIndex();
432 const LiveRange *OldLR =
433 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
434 VNInfo *OldValNo = OldLR->valno;
435 SlotIndex DefIndex = OldValNo->def.getDefIndex();
437 // Delete the previous value, which should be short and continuous,
438 // because the 2-addr copy must be in the same MBB as the redef.
439 interval.removeRange(DefIndex, RedefIndex);
441 // The new value number (#1) is defined by the instruction we claimed
443 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
444 false, // update at *
446 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
448 // Value#0 is now defined by the 2-addr instruction.
449 OldValNo->def = RedefIndex;
450 OldValNo->setCopy(0);
452 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
453 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
454 if (PartReDef && (mi->isCopyLike() ||
455 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)))
456 OldValNo->setCopy(&*mi);
458 // Add the new live interval which replaces the range for the input copy.
459 LiveRange LR(DefIndex, RedefIndex, ValNo);
460 DEBUG(dbgs() << " replace range with " << LR);
461 interval.addRange(LR);
463 // If this redefinition is dead, we need to add a dummy unit live
464 // range covering the def slot.
466 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
470 dbgs() << " RESULT: ";
471 interval.print(dbgs(), tri_);
473 } else if (lv_->isPHIJoin(interval.reg)) {
474 // In the case of PHI elimination, each variable definition is only
475 // live until the end of the block. We've already taken care of the
476 // rest of the live range.
478 SlotIndex defIndex = MIIdx.getDefIndex();
479 if (MO.isEarlyClobber())
480 defIndex = MIIdx.getUseIndex();
483 MachineInstr *CopyMI = NULL;
484 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
485 if (mi->isCopyLike() ||
486 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
488 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
490 SlotIndex killIndex = getMBBEndIdx(mbb);
491 LiveRange LR(defIndex, killIndex, ValNo);
492 interval.addRange(LR);
493 ValNo->setHasPHIKill(true);
494 DEBUG(dbgs() << " phi-join +" << LR);
496 llvm_unreachable("Multiply defined register");
500 DEBUG(dbgs() << '\n');
503 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
504 MachineBasicBlock::iterator mi,
507 LiveInterval &interval,
508 MachineInstr *CopyMI) {
509 // A physical register cannot be live across basic block, so its
510 // lifetime must end somewhere in its defining basic block.
512 dbgs() << "\t\tregister: ";
513 printRegName(interval.reg, tri_);
516 SlotIndex baseIndex = MIIdx;
517 SlotIndex start = baseIndex.getDefIndex();
518 // Earlyclobbers move back one.
519 if (MO.isEarlyClobber())
520 start = MIIdx.getUseIndex();
521 SlotIndex end = start;
523 // If it is not used after definition, it is considered dead at
524 // the instruction defining it. Hence its interval is:
525 // [defSlot(def), defSlot(def)+1)
526 // For earlyclobbers, the defSlot was pushed back one; the extra
527 // advance below compensates.
529 DEBUG(dbgs() << " dead");
530 end = start.getStoreIndex();
534 // If it is not dead on definition, it must be killed by a
535 // subsequent instruction. Hence its interval is:
536 // [defSlot(def), useSlot(kill)+1)
537 baseIndex = baseIndex.getNextIndex();
538 while (++mi != MBB->end()) {
540 if (mi->isDebugValue())
542 if (getInstructionFromIndex(baseIndex) == 0)
543 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
545 if (mi->killsRegister(interval.reg, tri_)) {
546 DEBUG(dbgs() << " killed");
547 end = baseIndex.getDefIndex();
550 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
552 if (mi->isRegTiedToUseOperand(DefIdx)) {
553 // Two-address instruction.
554 end = baseIndex.getDefIndex();
556 // Another instruction redefines the register before it is ever read.
557 // Then the register is essentially dead at the instruction that
558 // defines it. Hence its interval is:
559 // [defSlot(def), defSlot(def)+1)
560 DEBUG(dbgs() << " dead");
561 end = start.getStoreIndex();
567 baseIndex = baseIndex.getNextIndex();
570 // The only case we should have a dead physreg here without a killing or
571 // instruction where we know it's dead is if it is live-in to the function
572 // and never used. Another possible case is the implicit use of the
573 // physical register has been deleted by two-address pass.
574 end = start.getStoreIndex();
577 assert(start < end && "did not find end of interval?");
579 // Already exists? Extend old live interval.
580 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
581 bool Extend = OldLR != interval.end();
582 VNInfo *ValNo = Extend
583 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
584 if (MO.isEarlyClobber() && Extend)
585 ValNo->setHasRedefByEC(true);
586 LiveRange LR(start, end, ValNo);
587 interval.addRange(LR);
588 DEBUG(dbgs() << " +" << LR << '\n');
591 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
592 MachineBasicBlock::iterator MI,
596 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
597 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
598 getOrCreateInterval(MO.getReg()));
599 else if (allocatableRegs_[MO.getReg()]) {
600 MachineInstr *CopyMI = NULL;
601 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
602 if (MI->isCopyLike() ||
603 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(MO.getReg()), CopyMI);
607 // Def of a register also defines its sub-registers.
608 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609 // If MI also modifies the sub-register explicitly, avoid processing it
610 // more than once. Do not pass in TRI here so it checks for exact match.
611 if (!MI->definesRegister(*AS))
612 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613 getOrCreateInterval(*AS), 0);
617 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
619 LiveInterval &interval, bool isAlias) {
621 dbgs() << "\t\tlivein register: ";
622 printRegName(interval.reg, tri_);
625 // Look for kills, if it reaches a def before it's killed, then it shouldn't
626 // be considered a livein.
627 MachineBasicBlock::iterator mi = MBB->begin();
628 MachineBasicBlock::iterator E = MBB->end();
629 // Skip over DBG_VALUE at the start of the MBB.
630 if (mi != E && mi->isDebugValue()) {
631 while (++mi != E && mi->isDebugValue())
634 // MBB is empty except for DBG_VALUE's.
638 SlotIndex baseIndex = MIIdx;
639 SlotIndex start = baseIndex;
640 if (getInstructionFromIndex(baseIndex) == 0)
641 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
643 SlotIndex end = baseIndex;
644 bool SeenDefUse = false;
647 if (mi->killsRegister(interval.reg, tri_)) {
648 DEBUG(dbgs() << " killed");
649 end = baseIndex.getDefIndex();
652 } else if (mi->definesRegister(interval.reg, tri_)) {
653 // Another instruction redefines the register before it is ever read.
654 // Then the register is essentially dead at the instruction that defines
655 // it. Hence its interval is:
656 // [defSlot(def), defSlot(def)+1)
657 DEBUG(dbgs() << " dead");
658 end = start.getStoreIndex();
663 while (++mi != E && mi->isDebugValue())
664 // Skip over DBG_VALUE.
667 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
670 // Live-in register might not be used at all.
673 DEBUG(dbgs() << " dead");
674 end = MIIdx.getStoreIndex();
676 DEBUG(dbgs() << " live through");
682 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
683 0, false, VNInfoAllocator);
684 vni->setIsPHIDef(true);
685 LiveRange LR(start, end, vni);
687 interval.addRange(LR);
688 DEBUG(dbgs() << " +" << LR << '\n');
691 /// computeIntervals - computes the live intervals for virtual
692 /// registers. for some ordering of the machine instructions [1,N] a
693 /// live interval is an interval [i, j) where 1 <= i <= j < N for
694 /// which a variable is live
695 void LiveIntervals::computeIntervals() {
696 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
697 << "********** Function: "
698 << ((Value*)mf_->getFunction())->getName() << '\n');
700 SmallVector<unsigned, 8> UndefUses;
701 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
703 MachineBasicBlock *MBB = MBBI;
707 // Track the index of the current machine instr.
708 SlotIndex MIIndex = getMBBStartIdx(MBB);
709 DEBUG(dbgs() << "BB#" << MBB->getNumber()
710 << ":\t\t# derived from " << MBB->getName() << "\n");
712 // Create intervals for live-ins to this BB first.
713 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
714 LE = MBB->livein_end(); LI != LE; ++LI) {
715 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716 // Multiple live-ins can alias the same register.
717 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718 if (!hasInterval(*AS))
719 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
723 // Skip over empty initial indices.
724 if (getInstructionFromIndex(MIIndex) == 0)
725 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
727 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
729 DEBUG(dbgs() << MIIndex << "\t" << *MI);
730 if (MI->isDebugValue())
734 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
735 MachineOperand &MO = MI->getOperand(i);
736 if (!MO.isReg() || !MO.getReg())
739 // handle register defs - build intervals
741 handleRegisterDef(MBB, MI, MIIndex, MO, i);
742 else if (MO.isUndef())
743 UndefUses.push_back(MO.getReg());
746 // Move to the next instr slot.
747 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
751 // Create empty intervals for registers defined by implicit_def's (except
752 // for those implicit_def that define values which are liveout of their
754 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
755 unsigned UndefReg = UndefUses[i];
756 (void)getOrCreateInterval(UndefReg);
760 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
761 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
762 return new LiveInterval(reg, Weight);
765 /// dupInterval - Duplicate a live interval. The caller is responsible for
766 /// managing the allocated memory.
767 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
768 LiveInterval *NewLI = createInterval(li->reg);
769 NewLI->Copy(*li, mri_, getVNInfoAllocator());
773 //===----------------------------------------------------------------------===//
774 // Register allocator hooks.
777 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
778 /// allow one) virtual register operand, then its uses are implicitly using
779 /// the register. Returns the virtual register.
780 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
781 MachineInstr *MI) const {
783 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
784 MachineOperand &MO = MI->getOperand(i);
785 if (!MO.isReg() || !MO.isUse())
787 unsigned Reg = MO.getReg();
788 if (Reg == 0 || Reg == li.reg)
791 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
792 !allocatableRegs_[Reg])
794 // FIXME: For now, only remat MI with at most one register operand.
796 "Can't rematerialize instruction with multiple register operand!");
805 /// isValNoAvailableAt - Return true if the val# of the specified interval
806 /// which reaches the given instruction also reaches the specified use index.
807 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
808 SlotIndex UseIdx) const {
809 SlotIndex Index = getInstructionIndex(MI);
810 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
811 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
812 return UI != li.end() && UI->valno == ValNo;
815 /// isReMaterializable - Returns true if the definition MI of the specified
816 /// val# of the specified interval is re-materializable.
817 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
818 const VNInfo *ValNo, MachineInstr *MI,
819 SmallVectorImpl<LiveInterval*> &SpillIs,
824 if (!tii_->isTriviallyReMaterializable(MI, aa_))
827 // Target-specific code can mark an instruction as being rematerializable
828 // if it has one virtual reg use, though it had better be something like
829 // a PIC base register which is likely to be live everywhere.
830 unsigned ImpUse = getReMatImplicitUse(li, MI);
832 const LiveInterval &ImpLi = getInterval(ImpUse);
833 for (MachineRegisterInfo::use_nodbg_iterator
834 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
836 MachineInstr *UseMI = &*ri;
837 SlotIndex UseIdx = getInstructionIndex(UseMI);
838 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
840 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
844 // If a register operand of the re-materialized instruction is going to
845 // be spilled next, then it's not legal to re-materialize this instruction.
846 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
847 if (ImpUse == SpillIs[i]->reg)
853 /// isReMaterializable - Returns true if the definition MI of the specified
854 /// val# of the specified interval is re-materializable.
855 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
856 const VNInfo *ValNo, MachineInstr *MI) {
857 SmallVector<LiveInterval*, 4> Dummy1;
859 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
862 /// isReMaterializable - Returns true if every definition of MI of every
863 /// val# of the specified interval is re-materializable.
864 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865 SmallVectorImpl<LiveInterval*> &SpillIs,
868 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
870 const VNInfo *VNI = *i;
872 continue; // Dead val#.
873 // Is the def for the val# rematerializable?
874 if (!VNI->isDefAccurate())
876 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
877 bool DefIsLoad = false;
879 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
886 /// FilterFoldedOps - Filter out two-address use operands. Return
887 /// true if it finds any issue with the operands that ought to prevent
889 static bool FilterFoldedOps(MachineInstr *MI,
890 SmallVector<unsigned, 2> &Ops,
892 SmallVector<unsigned, 2> &FoldOps) {
894 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
895 unsigned OpIdx = Ops[i];
896 MachineOperand &MO = MI->getOperand(OpIdx);
897 // FIXME: fold subreg use.
901 MRInfo |= (unsigned)VirtRegMap::isMod;
903 // Filter out two-address use operand(s).
904 if (MI->isRegTiedToDefOperand(OpIdx)) {
905 MRInfo = VirtRegMap::isModRef;
908 MRInfo |= (unsigned)VirtRegMap::isRef;
910 FoldOps.push_back(OpIdx);
916 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
917 /// slot / to reg or any rematerialized load into ith operand of specified
918 /// MI. If it is successul, MI is updated with the newly created MI and
920 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
921 VirtRegMap &vrm, MachineInstr *DefMI,
923 SmallVector<unsigned, 2> &Ops,
924 bool isSS, int Slot, unsigned Reg) {
925 // If it is an implicit def instruction, just delete it.
926 if (MI->isImplicitDef()) {
927 RemoveMachineInstrFromMaps(MI);
928 vrm.RemoveMachineInstrFromMaps(MI);
929 MI->eraseFromParent();
934 // Filter the list of operand indexes that are to be folded. Abort if
935 // any operand will prevent folding.
937 SmallVector<unsigned, 2> FoldOps;
938 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
941 // The only time it's safe to fold into a two address instruction is when
942 // it's folding reload and spill from / into a spill stack slot.
943 if (DefMI && (MRInfo & VirtRegMap::isMod))
946 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
947 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
949 // Remember this instruction uses the spill slot.
950 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
952 // Attempt to fold the memory reference into the instruction. If
953 // we can do this, we don't need to insert spill code.
954 MachineBasicBlock &MBB = *MI->getParent();
955 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
956 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
957 vrm.transferSpillPts(MI, fmi);
958 vrm.transferRestorePts(MI, fmi);
959 vrm.transferEmergencySpills(MI, fmi);
960 ReplaceMachineInstrInMaps(MI, fmi);
961 MI = MBB.insert(MBB.erase(MI), fmi);
968 /// canFoldMemoryOperand - Returns true if the specified load / store
969 /// folding is possible.
970 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
971 SmallVector<unsigned, 2> &Ops,
973 // Filter the list of operand indexes that are to be folded. Abort if
974 // any operand will prevent folding.
976 SmallVector<unsigned, 2> FoldOps;
977 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
980 // It's only legal to remat for a use, not a def.
981 if (ReMat && (MRInfo & VirtRegMap::isMod))
984 return tii_->canFoldMemoryOperand(MI, FoldOps);
987 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
988 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
990 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
995 for (++itr; itr != li.ranges.end(); ++itr) {
996 MachineBasicBlock *mbb2 =
997 indexes_->getMBBCoveringRange(itr->start, itr->end);
1006 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1007 /// interval on to-be re-materialized operands of MI) with new register.
1008 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1009 MachineInstr *MI, unsigned NewVReg,
1011 // There is an implicit use. That means one of the other operand is
1012 // being remat'ed and the remat'ed instruction has li.reg as an
1013 // use operand. Make sure we rewrite that as well.
1014 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1015 MachineOperand &MO = MI->getOperand(i);
1018 unsigned Reg = MO.getReg();
1019 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1021 if (!vrm.isReMaterialized(Reg))
1023 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1024 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1026 UseMO->setReg(NewVReg);
1030 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1031 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1032 bool LiveIntervals::
1033 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1034 bool TrySplit, SlotIndex index, SlotIndex end,
1036 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1037 unsigned Slot, int LdSlot,
1038 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1040 const TargetRegisterClass* rc,
1041 SmallVector<int, 4> &ReMatIds,
1042 const MachineLoopInfo *loopInfo,
1043 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1044 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1045 std::vector<LiveInterval*> &NewLIs) {
1046 bool CanFold = false;
1048 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1049 MachineOperand& mop = MI->getOperand(i);
1052 unsigned Reg = mop.getReg();
1053 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1058 bool TryFold = !DefIsReMat;
1059 bool FoldSS = true; // Default behavior unless it's a remat.
1060 int FoldSlot = Slot;
1062 // If this is the rematerializable definition MI itself and
1063 // all of its uses are rematerialized, simply delete it.
1064 if (MI == ReMatOrigDefMI && CanDelete) {
1065 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1067 RemoveMachineInstrFromMaps(MI);
1068 vrm.RemoveMachineInstrFromMaps(MI);
1069 MI->eraseFromParent();
1073 // If def for this use can't be rematerialized, then try folding.
1074 // If def is rematerializable and it's a load, also try folding.
1075 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1077 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1083 // Scan all of the operands of this instruction rewriting operands
1084 // to use NewVReg instead of li.reg as appropriate. We do this for
1087 // 1. If the instr reads the same spilled vreg multiple times, we
1088 // want to reuse the NewVReg.
1089 // 2. If the instr is a two-addr instruction, we are required to
1090 // keep the src/dst regs pinned.
1092 // Keep track of whether we replace a use and/or def so that we can
1093 // create the spill interval with the appropriate range.
1094 SmallVector<unsigned, 2> Ops;
1095 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1097 // Create a new virtual register for the spill interval.
1098 // Create the new register now so we can map the fold instruction
1099 // to the new register so when it is unfolded we get the correct
1101 bool CreatedNewVReg = false;
1103 NewVReg = mri_->createVirtualRegister(rc);
1105 CreatedNewVReg = true;
1107 // The new virtual register should get the same allocation hints as the
1109 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1110 if (Hint.first || Hint.second)
1111 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1117 // Do not fold load / store here if we are splitting. We'll find an
1118 // optimal point to insert a load / store later.
1120 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1121 Ops, FoldSS, FoldSlot, NewVReg)) {
1122 // Folding the load/store can completely change the instruction in
1123 // unpredictable ways, rescan it from the beginning.
1126 // We need to give the new vreg the same stack slot as the
1127 // spilled interval.
1128 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1134 if (isNotInMIMap(MI))
1136 goto RestartInstruction;
1139 // We'll try to fold it later if it's profitable.
1140 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1144 mop.setReg(NewVReg);
1145 if (mop.isImplicit())
1146 rewriteImplicitOps(li, MI, NewVReg, vrm);
1148 // Reuse NewVReg for other reads.
1149 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1150 MachineOperand &mopj = MI->getOperand(Ops[j]);
1151 mopj.setReg(NewVReg);
1152 if (mopj.isImplicit())
1153 rewriteImplicitOps(li, MI, NewVReg, vrm);
1156 if (CreatedNewVReg) {
1158 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1159 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1160 // Each valnum may have its own remat id.
1161 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1163 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1165 if (!CanDelete || (HasUse && HasDef)) {
1166 // If this is a two-addr instruction then its use operands are
1167 // rematerializable but its def is not. It should be assigned a
1169 vrm.assignVirt2StackSlot(NewVReg, Slot);
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1174 } else if (HasUse && HasDef &&
1175 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1176 // If this interval hasn't been assigned a stack slot (because earlier
1177 // def is a deleted remat def), do it now.
1178 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1179 vrm.assignVirt2StackSlot(NewVReg, Slot);
1182 // Re-matting an instruction with virtual register use. Add the
1183 // register as an implicit use on the use MI.
1184 if (DefIsReMat && ImpUse)
1185 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1187 // Create a new register interval for this spill / remat.
1188 LiveInterval &nI = getOrCreateInterval(NewVReg);
1189 if (CreatedNewVReg) {
1190 NewLIs.push_back(&nI);
1191 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1193 vrm.setIsSplitFromReg(NewVReg, li.reg);
1197 if (CreatedNewVReg) {
1198 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1199 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1200 DEBUG(dbgs() << " +" << LR);
1203 // Extend the split live interval to this def / use.
1204 SlotIndex End = index.getDefIndex();
1205 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1206 nI.getValNumInfo(nI.getNumValNums()-1));
1207 DEBUG(dbgs() << " +" << LR);
1212 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1213 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1214 DEBUG(dbgs() << " +" << LR);
1219 dbgs() << "\t\t\t\tAdded new interval: ";
1220 nI.print(dbgs(), tri_);
1226 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1228 MachineBasicBlock *MBB,
1229 SlotIndex Idx) const {
1230 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1233 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1234 /// during spilling.
1236 struct RewriteInfo {
1239 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1242 struct RewriteInfoCompare {
1243 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1244 return LHS.Index < RHS.Index;
1249 void LiveIntervals::
1250 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1251 LiveInterval::Ranges::const_iterator &I,
1252 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1253 unsigned Slot, int LdSlot,
1254 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1256 const TargetRegisterClass* rc,
1257 SmallVector<int, 4> &ReMatIds,
1258 const MachineLoopInfo *loopInfo,
1259 BitVector &SpillMBBs,
1260 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1261 BitVector &RestoreMBBs,
1262 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1263 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1264 std::vector<LiveInterval*> &NewLIs) {
1265 bool AllCanFold = true;
1266 unsigned NewVReg = 0;
1267 SlotIndex start = I->start.getBaseIndex();
1268 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1270 // First collect all the def / use in this live range that will be rewritten.
1271 // Make sure they are sorted according to instruction index.
1272 std::vector<RewriteInfo> RewriteMIs;
1273 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1274 re = mri_->reg_end(); ri != re; ) {
1275 MachineInstr *MI = &*ri;
1276 MachineOperand &O = ri.getOperand();
1278 if (MI->isDebugValue()) {
1279 // Modify DBG_VALUE now that the value is in a spill slot.
1280 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1281 uint64_t Offset = MI->getOperand(1).getImm();
1282 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1283 DebugLoc DL = MI->getDebugLoc();
1284 int FI = isLoadSS ? LdSlot : (int)Slot;
1285 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1286 Offset, MDPtr, DL)) {
1287 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1288 ReplaceMachineInstrInMaps(MI, NewDV);
1289 MachineBasicBlock *MBB = MI->getParent();
1290 MBB->insert(MBB->erase(MI), NewDV);
1295 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1296 RemoveMachineInstrFromMaps(MI);
1297 vrm.RemoveMachineInstrFromMaps(MI);
1298 MI->eraseFromParent();
1301 assert(!(O.isImplicit() && O.isUse()) &&
1302 "Spilling register that's used as implicit use?");
1303 SlotIndex index = getInstructionIndex(MI);
1304 if (index < start || index >= end)
1308 // Must be defined by an implicit def. It should not be spilled. Note,
1309 // this is for correctness reason. e.g.
1310 // 8 %reg1024<def> = IMPLICIT_DEF
1311 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1312 // The live range [12, 14) are not part of the r1024 live interval since
1313 // it's defined by an implicit def. It will not conflicts with live
1314 // interval of r1025. Now suppose both registers are spilled, you can
1315 // easily see a situation where both registers are reloaded before
1316 // the INSERT_SUBREG and both target registers that would overlap.
1318 RewriteMIs.push_back(RewriteInfo(index, MI));
1320 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1322 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1323 // Now rewrite the defs and uses.
1324 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1325 RewriteInfo &rwi = RewriteMIs[i];
1327 SlotIndex index = rwi.Index;
1328 MachineInstr *MI = rwi.MI;
1329 // If MI def and/or use the same register multiple times, then there
1330 // are multiple entries.
1331 while (i != e && RewriteMIs[i].MI == MI) {
1332 assert(RewriteMIs[i].Index == index);
1335 MachineBasicBlock *MBB = MI->getParent();
1337 if (ImpUse && MI != ReMatDefMI) {
1338 // Re-matting an instruction with virtual register use. Prevent interval
1339 // from being spilled.
1340 getInterval(ImpUse).markNotSpillable();
1343 unsigned MBBId = MBB->getNumber();
1344 unsigned ThisVReg = 0;
1346 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1347 if (NVI != MBBVRegsMap.end()) {
1348 ThisVReg = NVI->second;
1355 // It's better to start a new interval to avoid artifically
1356 // extend the new interval.
1357 if (MI->readsWritesVirtualRegister(li.reg) ==
1358 std::make_pair(false,true)) {
1359 MBBVRegsMap.erase(MBB->getNumber());
1365 bool IsNew = ThisVReg == 0;
1367 // This ends the previous live interval. If all of its def / use
1368 // can be folded, give it a low spill weight.
1369 if (NewVReg && TrySplit && AllCanFold) {
1370 LiveInterval &nI = getOrCreateInterval(NewVReg);
1377 bool HasDef = false;
1378 bool HasUse = false;
1379 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1380 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1381 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1382 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1383 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1384 if (!HasDef && !HasUse)
1387 AllCanFold &= CanFold;
1389 // Update weight of spill interval.
1390 LiveInterval &nI = getOrCreateInterval(NewVReg);
1392 // The spill weight is now infinity as it cannot be spilled again.
1393 nI.markNotSpillable();
1397 // Keep track of the last def and first use in each MBB.
1399 if (MI != ReMatOrigDefMI || !CanDelete) {
1400 bool HasKill = false;
1402 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1404 // If this is a two-address code, then this index starts a new VNInfo.
1405 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1407 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1409 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1410 SpillIdxes.find(MBBId);
1412 if (SII == SpillIdxes.end()) {
1413 std::vector<SRInfo> S;
1414 S.push_back(SRInfo(index, NewVReg, true));
1415 SpillIdxes.insert(std::make_pair(MBBId, S));
1416 } else if (SII->second.back().vreg != NewVReg) {
1417 SII->second.push_back(SRInfo(index, NewVReg, true));
1418 } else if (index > SII->second.back().index) {
1419 // If there is an earlier def and this is a two-address
1420 // instruction, then it's not possible to fold the store (which
1421 // would also fold the load).
1422 SRInfo &Info = SII->second.back();
1424 Info.canFold = !HasUse;
1426 SpillMBBs.set(MBBId);
1427 } else if (SII != SpillIdxes.end() &&
1428 SII->second.back().vreg == NewVReg &&
1429 index > SII->second.back().index) {
1430 // There is an earlier def that's not killed (must be two-address).
1431 // The spill is no longer needed.
1432 SII->second.pop_back();
1433 if (SII->second.empty()) {
1434 SpillIdxes.erase(MBBId);
1435 SpillMBBs.reset(MBBId);
1442 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1443 SpillIdxes.find(MBBId);
1444 if (SII != SpillIdxes.end() &&
1445 SII->second.back().vreg == NewVReg &&
1446 index > SII->second.back().index)
1447 // Use(s) following the last def, it's not safe to fold the spill.
1448 SII->second.back().canFold = false;
1449 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1450 RestoreIdxes.find(MBBId);
1451 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1452 // If we are splitting live intervals, only fold if it's the first
1453 // use and there isn't another use later in the MBB.
1454 RII->second.back().canFold = false;
1456 // Only need a reload if there isn't an earlier def / use.
1457 if (RII == RestoreIdxes.end()) {
1458 std::vector<SRInfo> Infos;
1459 Infos.push_back(SRInfo(index, NewVReg, true));
1460 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1462 RII->second.push_back(SRInfo(index, NewVReg, true));
1464 RestoreMBBs.set(MBBId);
1468 // Update spill weight.
1469 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1470 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1473 if (NewVReg && TrySplit && AllCanFold) {
1474 // If all of its def / use can be folded, give it a low spill weight.
1475 LiveInterval &nI = getOrCreateInterval(NewVReg);
1480 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1481 unsigned vr, BitVector &RestoreMBBs,
1482 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1483 if (!RestoreMBBs[Id])
1485 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1486 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1487 if (Restores[i].index == index &&
1488 Restores[i].vreg == vr &&
1489 Restores[i].canFold)
1494 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1495 unsigned vr, BitVector &RestoreMBBs,
1496 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1497 if (!RestoreMBBs[Id])
1499 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1500 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1501 if (Restores[i].index == index && Restores[i].vreg)
1502 Restores[i].index = SlotIndex();
1505 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1506 /// spilled and create empty intervals for their uses.
1508 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1509 const TargetRegisterClass* rc,
1510 std::vector<LiveInterval*> &NewLIs) {
1511 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1512 re = mri_->reg_end(); ri != re; ) {
1513 MachineOperand &O = ri.getOperand();
1514 MachineInstr *MI = &*ri;
1516 if (MI->isDebugValue()) {
1517 // Remove debug info for now.
1519 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1523 assert(MI->isImplicitDef() &&
1524 "Register def was not rewritten?");
1525 RemoveMachineInstrFromMaps(MI);
1526 vrm.RemoveMachineInstrFromMaps(MI);
1527 MI->eraseFromParent();
1529 // This must be an use of an implicit_def so it's not part of the live
1530 // interval. Create a new empty live interval for it.
1531 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1532 unsigned NewVReg = mri_->createVirtualRegister(rc);
1534 vrm.setIsImplicitlyDefined(NewVReg);
1535 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1536 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1537 MachineOperand &MO = MI->getOperand(i);
1538 if (MO.isReg() && MO.getReg() == li.reg) {
1548 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1549 // Limit the loop depth ridiculousness.
1550 if (loopDepth > 200)
1553 // The loop depth is used to roughly estimate the number of times the
1554 // instruction is executed. Something like 10^d is simple, but will quickly
1555 // overflow a float. This expression behaves like 10^d for small d, but is
1556 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1557 // headroom before overflow.
1558 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1560 return (isDef + isUse) * lc;
1564 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1565 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1566 normalizeSpillWeight(*NewLIs[i]);
1569 std::vector<LiveInterval*> LiveIntervals::
1570 addIntervalsForSpills(const LiveInterval &li,
1571 SmallVectorImpl<LiveInterval*> &SpillIs,
1572 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1573 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1576 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1577 li.print(dbgs(), tri_);
1581 // Each bit specify whether a spill is required in the MBB.
1582 BitVector SpillMBBs(mf_->getNumBlockIDs());
1583 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1584 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1585 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1586 DenseMap<unsigned,unsigned> MBBVRegsMap;
1587 std::vector<LiveInterval*> NewLIs;
1588 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1590 unsigned NumValNums = li.getNumValNums();
1591 SmallVector<MachineInstr*, 4> ReMatDefs;
1592 ReMatDefs.resize(NumValNums, NULL);
1593 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1594 ReMatOrigDefs.resize(NumValNums, NULL);
1595 SmallVector<int, 4> ReMatIds;
1596 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1597 BitVector ReMatDelete(NumValNums);
1598 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1600 // Spilling a split live interval. It cannot be split any further. Also,
1601 // it's also guaranteed to be a single val# / range interval.
1602 if (vrm.getPreSplitReg(li.reg)) {
1603 vrm.setIsSplitFromReg(li.reg, 0);
1604 // Unset the split kill marker on the last use.
1605 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1606 if (KillIdx != SlotIndex()) {
1607 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1608 assert(KillMI && "Last use disappeared?");
1609 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1610 assert(KillOp != -1 && "Last use disappeared?");
1611 KillMI->getOperand(KillOp).setIsKill(false);
1613 vrm.removeKillPoint(li.reg);
1614 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1615 Slot = vrm.getStackSlot(li.reg);
1616 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1617 MachineInstr *ReMatDefMI = DefIsReMat ?
1618 vrm.getReMaterializedMI(li.reg) : NULL;
1620 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1621 bool isLoad = isLoadSS ||
1622 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1623 bool IsFirstRange = true;
1624 for (LiveInterval::Ranges::const_iterator
1625 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1626 // If this is a split live interval with multiple ranges, it means there
1627 // are two-address instructions that re-defined the value. Only the
1628 // first def can be rematerialized!
1630 // Note ReMatOrigDefMI has already been deleted.
1631 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1632 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1633 false, vrm, rc, ReMatIds, loopInfo,
1634 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635 MBBVRegsMap, NewLIs);
1637 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1638 Slot, 0, false, false, false,
1639 false, vrm, rc, ReMatIds, loopInfo,
1640 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1641 MBBVRegsMap, NewLIs);
1643 IsFirstRange = false;
1646 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1647 normalizeSpillWeights(NewLIs);
1651 bool TrySplit = !intervalIsInOneMBB(li);
1654 bool NeedStackSlot = false;
1655 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1657 const VNInfo *VNI = *i;
1658 unsigned VN = VNI->id;
1659 if (VNI->isUnused())
1660 continue; // Dead val#.
1661 // Is the def for the val# rematerializable?
1662 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1663 ? getInstructionFromIndex(VNI->def) : 0;
1665 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1666 // Remember how to remat the def of this val#.
1667 ReMatOrigDefs[VN] = ReMatDefMI;
1668 // Original def may be modified so we have to make a copy here.
1669 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1670 CloneMIs.push_back(Clone);
1671 ReMatDefs[VN] = Clone;
1673 bool CanDelete = true;
1674 if (VNI->hasPHIKill()) {
1675 // A kill is a phi node, not all of its uses can be rematerialized.
1676 // It must not be deleted.
1678 // Need a stack slot if there is any live range where uses cannot be
1680 NeedStackSlot = true;
1683 ReMatDelete.set(VN);
1685 // Need a stack slot if there is any live range where uses cannot be
1687 NeedStackSlot = true;
1691 // One stack slot per live interval.
1692 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1693 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1694 Slot = vrm.assignVirt2StackSlot(li.reg);
1696 // This case only occurs when the prealloc splitter has already assigned
1697 // a stack slot to this vreg.
1699 Slot = vrm.getStackSlot(li.reg);
1702 // Create new intervals and rewrite defs and uses.
1703 for (LiveInterval::Ranges::const_iterator
1704 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1705 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1706 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1707 bool DefIsReMat = ReMatDefMI != NULL;
1708 bool CanDelete = ReMatDelete[I->valno->id];
1710 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1711 bool isLoad = isLoadSS ||
1712 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1713 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1714 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1715 CanDelete, vrm, rc, ReMatIds, loopInfo,
1716 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1717 MBBVRegsMap, NewLIs);
1720 // Insert spills / restores if we are splitting.
1722 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1723 normalizeSpillWeights(NewLIs);
1727 SmallPtrSet<LiveInterval*, 4> AddedKill;
1728 SmallVector<unsigned, 2> Ops;
1729 if (NeedStackSlot) {
1730 int Id = SpillMBBs.find_first();
1732 std::vector<SRInfo> &spills = SpillIdxes[Id];
1733 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1734 SlotIndex index = spills[i].index;
1735 unsigned VReg = spills[i].vreg;
1736 LiveInterval &nI = getOrCreateInterval(VReg);
1737 bool isReMat = vrm.isReMaterialized(VReg);
1738 MachineInstr *MI = getInstructionFromIndex(index);
1739 bool CanFold = false;
1740 bool FoundUse = false;
1742 if (spills[i].canFold) {
1744 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1745 MachineOperand &MO = MI->getOperand(j);
1746 if (!MO.isReg() || MO.getReg() != VReg)
1753 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1754 RestoreMBBs, RestoreIdxes))) {
1755 // MI has two-address uses of the same register. If the use
1756 // isn't the first and only use in the BB, then we can't fold
1757 // it. FIXME: Move this to rewriteInstructionsForSpills.
1764 // Fold the store into the def if possible.
1765 bool Folded = false;
1766 if (CanFold && !Ops.empty()) {
1767 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1770 // Also folded uses, do not issue a load.
1771 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1772 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1774 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1778 // Otherwise tell the spiller to issue a spill.
1780 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1781 bool isKill = LR->end == index.getStoreIndex();
1782 if (!MI->registerDefIsDead(nI.reg))
1783 // No need to spill a dead def.
1784 vrm.addSpillPoint(VReg, isKill, MI);
1786 AddedKill.insert(&nI);
1789 Id = SpillMBBs.find_next(Id);
1793 int Id = RestoreMBBs.find_first();
1795 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1796 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1797 SlotIndex index = restores[i].index;
1798 if (index == SlotIndex())
1800 unsigned VReg = restores[i].vreg;
1801 LiveInterval &nI = getOrCreateInterval(VReg);
1802 bool isReMat = vrm.isReMaterialized(VReg);
1803 MachineInstr *MI = getInstructionFromIndex(index);
1804 bool CanFold = false;
1806 if (restores[i].canFold) {
1808 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1809 MachineOperand &MO = MI->getOperand(j);
1810 if (!MO.isReg() || MO.getReg() != VReg)
1814 // If this restore were to be folded, it would have been folded
1823 // Fold the load into the use if possible.
1824 bool Folded = false;
1825 if (CanFold && !Ops.empty()) {
1827 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1829 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1831 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1832 // If the rematerializable def is a load, also try to fold it.
1833 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1834 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1835 Ops, isLoadSS, LdSlot, VReg);
1837 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1839 // Re-matting an instruction with virtual register use. Add the
1840 // register as an implicit use on the use MI and mark the register
1841 // interval as unspillable.
1842 LiveInterval &ImpLi = getInterval(ImpUse);
1843 ImpLi.markNotSpillable();
1844 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1849 // If folding is not possible / failed, then tell the spiller to issue a
1850 // load / rematerialization for us.
1852 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1854 vrm.addRestorePoint(VReg, MI);
1856 Id = RestoreMBBs.find_next(Id);
1859 // Finalize intervals: add kills, finalize spill weights, and filter out
1861 std::vector<LiveInterval*> RetNewLIs;
1862 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1863 LiveInterval *LI = NewLIs[i];
1865 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1866 if (!AddedKill.count(LI)) {
1867 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1868 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1869 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1870 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1871 assert(UseIdx != -1);
1872 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1873 LastUse->getOperand(UseIdx).setIsKill();
1874 vrm.addKillPoint(LI->reg, LastUseIdx);
1877 RetNewLIs.push_back(LI);
1881 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1882 normalizeSpillWeights(RetNewLIs);
1886 /// hasAllocatableSuperReg - Return true if the specified physical register has
1887 /// any super register that's allocatable.
1888 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1889 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1890 if (allocatableRegs_[*AS] && hasInterval(*AS))
1895 /// getRepresentativeReg - Find the largest super register of the specified
1896 /// physical register.
1897 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1898 // Find the largest super-register that is allocatable.
1899 unsigned BestReg = Reg;
1900 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1901 unsigned SuperReg = *AS;
1902 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1910 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1911 /// specified interval that conflicts with the specified physical register.
1912 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1913 unsigned PhysReg) const {
1914 unsigned NumConflicts = 0;
1915 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1916 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1917 E = mri_->reg_end(); I != E; ++I) {
1918 MachineOperand &O = I.getOperand();
1919 MachineInstr *MI = O.getParent();
1920 if (MI->isDebugValue())
1922 SlotIndex Index = getInstructionIndex(MI);
1923 if (pli.liveAt(Index))
1926 return NumConflicts;
1929 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1930 /// around all defs and uses of the specified interval. Return true if it
1931 /// was able to cut its interval.
1932 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1933 unsigned PhysReg, VirtRegMap &vrm) {
1934 unsigned SpillReg = getRepresentativeReg(PhysReg);
1936 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1937 // If there are registers which alias PhysReg, but which are not a
1938 // sub-register of the chosen representative super register. Assert
1939 // since we can't handle it yet.
1940 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1941 tri_->isSuperRegister(*AS, SpillReg));
1944 SmallVector<unsigned, 4> PRegs;
1945 if (hasInterval(SpillReg))
1946 PRegs.push_back(SpillReg);
1948 SmallSet<unsigned, 4> Added;
1949 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1950 if (Added.insert(*AS) && hasInterval(*AS)) {
1951 PRegs.push_back(*AS);
1952 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1957 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1958 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1959 E = mri_->reg_end(); I != E; ++I) {
1960 MachineOperand &O = I.getOperand();
1961 MachineInstr *MI = O.getParent();
1962 if (MI->isDebugValue() || SeenMIs.count(MI))
1965 SlotIndex Index = getInstructionIndex(MI);
1966 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1967 unsigned PReg = PRegs[i];
1968 LiveInterval &pli = getInterval(PReg);
1969 if (!pli.liveAt(Index))
1971 vrm.addEmergencySpill(PReg, MI);
1972 SlotIndex StartIdx = Index.getLoadIndex();
1973 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1974 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1975 pli.removeRange(StartIdx, EndIdx);
1979 raw_string_ostream Msg(msg);
1980 Msg << "Ran out of registers during register allocation!";
1981 if (MI->isInlineAsm()) {
1982 Msg << "\nPlease check your inline asm statement for invalid "
1983 << "constraints:\n";
1984 MI->print(Msg, tm_);
1986 report_fatal_error(Msg.str());
1988 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1989 if (!hasInterval(*AS))
1991 LiveInterval &spli = getInterval(*AS);
1992 if (spli.liveAt(Index))
1993 spli.removeRange(Index.getLoadIndex(),
1994 Index.getNextIndex().getBaseIndex());
2001 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2002 MachineInstr* startInst) {
2003 LiveInterval& Interval = getOrCreateInterval(reg);
2004 VNInfo* VN = Interval.getNextValue(
2005 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2006 startInst, true, getVNInfoAllocator());
2007 VN->setHasPHIKill(true);
2009 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2010 getMBBEndIdx(startInst->getParent()), VN);
2011 Interval.addRange(LR);