1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 AU.addPreserved<AliasAnalysis>();
64 AU.addPreserved<LiveVariables>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreservedID(MachineLoopInfoID);
67 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
74 AU.addRequiredID(TwoAddressInstructionPassID);
75 AU.addPreserved<ProcessImplicitDefs>();
76 AU.addRequired<ProcessImplicitDefs>();
77 AU.addPreserved<SlotIndexes>();
78 AU.addRequiredTransitive<SlotIndexes>();
79 MachineFunctionPass::getAnalysisUsage(AU);
82 void LiveIntervals::releaseMemory() {
83 // Free the live intervals themselves.
84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85 E = r2iMap_.end(); I != E; ++I)
90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
91 VNInfoAllocator.Reset();
92 while (!CloneMIs.empty()) {
93 MachineInstr *MI = CloneMIs.back();
95 mf_->DeleteMachineInstr(MI);
99 /// runOnMachineFunction - Register allocate the whole function
101 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
103 mri_ = &mf_->getRegInfo();
104 tm_ = &fn.getTarget();
105 tri_ = tm_->getRegisterInfo();
106 tii_ = tm_->getInstrInfo();
107 aa_ = &getAnalysis<AliasAnalysis>();
108 lv_ = &getAnalysis<LiveVariables>();
109 indexes_ = &getAnalysis<SlotIndexes>();
110 allocatableRegs_ = tri_->getAllocatableSet(fn);
114 numIntervals += getNumIntervals();
120 /// print - Implement the dump method.
121 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122 OS << "********** INTERVALS **********\n";
123 for (const_iterator I = begin(), E = end(); I != E; ++I) {
124 I->second->print(OS, tri_);
131 void LiveIntervals::printInstrs(raw_ostream &OS) const {
132 OS << "********** MACHINEINSTRS **********\n";
134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135 mbbi != mbbe; ++mbbi) {
136 OS << "BB#" << mbbi->getNumber()
137 << ":\t\t# derived from " << mbbi->getName() << "\n";
138 for (MachineBasicBlock::iterator mii = mbbi->begin(),
139 mie = mbbi->end(); mii != mie; ++mii) {
140 if (mii->isDebugValue())
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
228 continue; // skip deleted instructions
230 if (JoinedCopies.count(MI))
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
240 if (tri_->regsOverlap(Reg, PhysReg))
250 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
251 if (TargetRegisterInfo::isPhysicalRegister(reg))
252 dbgs() << tri_->getName(reg);
254 dbgs() << "%reg" << reg;
259 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
260 unsigned Reg = MI.getOperand(MOIdx).getReg();
261 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
262 const MachineOperand &MO = MI.getOperand(i);
265 if (MO.getReg() == Reg && MO.isDef()) {
266 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
267 MI.getOperand(MOIdx).getSubReg() &&
275 /// isPartialRedef - Return true if the specified def at the specific index is
276 /// partially re-defining the specified live interval. A common case of this is
277 /// a definition of the sub-register.
278 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
279 LiveInterval &interval) {
280 if (!MO.getSubReg() || MO.isEarlyClobber())
283 SlotIndex RedefIndex = MIIdx.getDefIndex();
284 const LiveRange *OldLR =
285 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
286 if (OldLR->valno->isDefAccurate()) {
287 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
288 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
293 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
294 MachineBasicBlock::iterator mi,
298 LiveInterval &interval) {
300 dbgs() << "\t\tregister: ";
301 printRegName(interval.reg, tri_);
304 // Virtual registers may be defined multiple times (due to phi
305 // elimination and 2-addr elimination). Much of what we do only has to be
306 // done once for the vreg. We use an empty interval to detect the first
307 // time we see a vreg.
308 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
309 if (interval.empty()) {
310 // Get the Idx of the defining instructions.
311 SlotIndex defIndex = MIIdx.getDefIndex();
312 // Earlyclobbers move back one, so that they overlap the live range
314 if (MO.isEarlyClobber())
315 defIndex = MIIdx.getUseIndex();
317 // Make sure the first definition is not a partial redefinition. Add an
318 // <imp-def> of the full register.
320 mi->addRegisterDefined(interval.reg);
322 MachineInstr *CopyMI = NULL;
323 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
324 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
325 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
328 // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
329 // implicit defs without really knowing. It shows up as INSERT_SUBREG
330 // using an undefined register.
331 if (mi->isInsertSubreg())
332 mi->getOperand(1).setIsUndef();
335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
337 assert(ValNo->id == 0 && "First value in interval is not 0?");
339 // Loop over all of the blocks that the vreg is defined in. There are
340 // two cases we have to handle here. The most common case is a vreg
341 // whose lifetime is contained within a basic block. In this case there
342 // will be a single kill, in MBB, which comes after the definition.
343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
344 // FIXME: what about dead vars?
346 if (vi.Kills[0] != mi)
347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
349 killIdx = defIndex.getStoreIndex();
351 // If the kill happens after the definition, we have an intra-block
353 if (killIdx > defIndex) {
354 assert(vi.AliveBlocks.empty() &&
355 "Shouldn't be alive across any blocks!");
356 LiveRange LR(defIndex, killIdx, ValNo);
357 interval.addRange(LR);
358 DEBUG(dbgs() << " +" << LR << "\n");
363 // The other case we handle is when a virtual register lives to the end
364 // of the defining block, potentially live across some blocks, then is
365 // live into some number of blocks, but gets killed. Start by adding a
366 // range that goes from this definition to the end of the defining block.
367 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
368 DEBUG(dbgs() << " +" << NewLR);
369 interval.addRange(NewLR);
371 bool PHIJoin = lv_->isPHIJoin(interval.reg);
374 // A phi join register is killed at the end of the MBB and revived as a new
375 // valno in the killing blocks.
376 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
377 DEBUG(dbgs() << " phi-join");
378 ValNo->setHasPHIKill(true);
380 // Iterate over all of the blocks that the variable is completely
381 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
383 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
384 E = vi.AliveBlocks.end(); I != E; ++I) {
385 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
386 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
387 interval.addRange(LR);
388 DEBUG(dbgs() << " +" << LR);
392 // Finally, this virtual register is live from the start of any killing
393 // block to the 'use' slot of the killing instruction.
394 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
395 MachineInstr *Kill = vi.Kills[i];
396 SlotIndex Start = getMBBStartIdx(Kill->getParent());
397 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
399 // Create interval with one of a NEW value number. Note that this value
400 // number isn't actually defined by an instruction, weird huh? :)
402 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
404 ValNo->setIsPHIDef(true);
406 LiveRange LR(Start, killIdx, ValNo);
407 interval.addRange(LR);
408 DEBUG(dbgs() << " +" << LR);
412 if (MultipleDefsBySameMI(*mi, MOIdx))
413 // Multiple defs of the same virtual register by the same instruction.
414 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
415 // This is likely due to elimination of REG_SEQUENCE instructions. Return
416 // here since there is nothing to do.
419 // If this is the second time we see a virtual register definition, it
420 // must be due to phi elimination or two addr elimination. If this is
421 // the result of two address elimination, then the vreg is one of the
422 // def-and-use register operand.
424 // It may also be partial redef like this:
425 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
426 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
427 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
428 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
429 // If this is a two-address definition, then we have already processed
430 // the live range. The only problem is that we didn't realize there
431 // are actually two values in the live interval. Because of this we
432 // need to take the LiveRegion that defines this register and split it
434 SlotIndex RedefIndex = MIIdx.getDefIndex();
435 if (MO.isEarlyClobber())
436 RedefIndex = MIIdx.getUseIndex();
438 const LiveRange *OldLR =
439 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
440 VNInfo *OldValNo = OldLR->valno;
441 SlotIndex DefIndex = OldValNo->def.getDefIndex();
443 // Delete the previous value, which should be short and continuous,
444 // because the 2-addr copy must be in the same MBB as the redef.
445 interval.removeRange(DefIndex, RedefIndex);
447 // The new value number (#1) is defined by the instruction we claimed
449 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
450 false, // update at *
452 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
454 // Value#0 is now defined by the 2-addr instruction.
455 OldValNo->def = RedefIndex;
456 OldValNo->setCopy(0);
458 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
459 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
462 OldValNo->setCopy(&*mi);
464 // Add the new live interval which replaces the range for the input copy.
465 LiveRange LR(DefIndex, RedefIndex, ValNo);
466 DEBUG(dbgs() << " replace range with " << LR);
467 interval.addRange(LR);
469 // If this redefinition is dead, we need to add a dummy unit live
470 // range covering the def slot.
472 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
476 dbgs() << " RESULT: ";
477 interval.print(dbgs(), tri_);
479 } else if (lv_->isPHIJoin(interval.reg)) {
480 // In the case of PHI elimination, each variable definition is only
481 // live until the end of the block. We've already taken care of the
482 // rest of the live range.
484 SlotIndex defIndex = MIIdx.getDefIndex();
485 if (MO.isEarlyClobber())
486 defIndex = MIIdx.getUseIndex();
489 MachineInstr *CopyMI = NULL;
490 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
491 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
492 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
494 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
496 SlotIndex killIndex = getMBBEndIdx(mbb);
497 LiveRange LR(defIndex, killIndex, ValNo);
498 interval.addRange(LR);
499 ValNo->setHasPHIKill(true);
500 DEBUG(dbgs() << " phi-join +" << LR);
502 llvm_unreachable("Multiply defined register");
506 DEBUG(dbgs() << '\n');
509 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
510 MachineBasicBlock::iterator mi,
513 LiveInterval &interval,
514 MachineInstr *CopyMI) {
515 // A physical register cannot be live across basic block, so its
516 // lifetime must end somewhere in its defining basic block.
518 dbgs() << "\t\tregister: ";
519 printRegName(interval.reg, tri_);
522 SlotIndex baseIndex = MIIdx;
523 SlotIndex start = baseIndex.getDefIndex();
524 // Earlyclobbers move back one.
525 if (MO.isEarlyClobber())
526 start = MIIdx.getUseIndex();
527 SlotIndex end = start;
529 // If it is not used after definition, it is considered dead at
530 // the instruction defining it. Hence its interval is:
531 // [defSlot(def), defSlot(def)+1)
532 // For earlyclobbers, the defSlot was pushed back one; the extra
533 // advance below compensates.
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
540 // If it is not dead on definition, it must be killed by a
541 // subsequent instruction. Hence its interval is:
542 // [defSlot(def), useSlot(kill)+1)
543 baseIndex = baseIndex.getNextIndex();
544 while (++mi != MBB->end()) {
546 if (mi->isDebugValue())
548 if (getInstructionFromIndex(baseIndex) == 0)
549 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
551 if (mi->killsRegister(interval.reg, tri_)) {
552 DEBUG(dbgs() << " killed");
553 end = baseIndex.getDefIndex();
556 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
558 if (mi->isRegTiedToUseOperand(DefIdx)) {
559 // Two-address instruction.
560 end = baseIndex.getDefIndex();
562 // Another instruction redefines the register before it is ever read.
563 // Then the register is essentially dead at the instruction that
564 // defines it. Hence its interval is:
565 // [defSlot(def), defSlot(def)+1)
566 DEBUG(dbgs() << " dead");
567 end = start.getStoreIndex();
573 baseIndex = baseIndex.getNextIndex();
576 // The only case we should have a dead physreg here without a killing or
577 // instruction where we know it's dead is if it is live-in to the function
578 // and never used. Another possible case is the implicit use of the
579 // physical register has been deleted by two-address pass.
580 end = start.getStoreIndex();
583 assert(start < end && "did not find end of interval?");
585 // Already exists? Extend old live interval.
586 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
587 bool Extend = OldLR != interval.end();
588 VNInfo *ValNo = Extend
589 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
590 if (MO.isEarlyClobber() && Extend)
591 ValNo->setHasRedefByEC(true);
592 LiveRange LR(start, end, ValNo);
593 interval.addRange(LR);
594 DEBUG(dbgs() << " +" << LR << '\n');
597 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
598 MachineBasicBlock::iterator MI,
602 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
603 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
604 getOrCreateInterval(MO.getReg()));
605 else if (allocatableRegs_[MO.getReg()]) {
606 MachineInstr *CopyMI = NULL;
607 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
608 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
609 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
611 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
612 getOrCreateInterval(MO.getReg()), CopyMI);
613 // Def of a register also defines its sub-registers.
614 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
615 // If MI also modifies the sub-register explicitly, avoid processing it
616 // more than once. Do not pass in TRI here so it checks for exact match.
617 if (!MI->definesRegister(*AS))
618 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
619 getOrCreateInterval(*AS), 0);
623 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
625 LiveInterval &interval, bool isAlias) {
627 dbgs() << "\t\tlivein register: ";
628 printRegName(interval.reg, tri_);
631 // Look for kills, if it reaches a def before it's killed, then it shouldn't
632 // be considered a livein.
633 MachineBasicBlock::iterator mi = MBB->begin();
634 MachineBasicBlock::iterator E = MBB->end();
635 // Skip over DBG_VALUE at the start of the MBB.
636 if (mi != E && mi->isDebugValue()) {
637 while (++mi != E && mi->isDebugValue())
640 // MBB is empty except for DBG_VALUE's.
644 SlotIndex baseIndex = MIIdx;
645 SlotIndex start = baseIndex;
646 if (getInstructionFromIndex(baseIndex) == 0)
647 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
649 SlotIndex end = baseIndex;
650 bool SeenDefUse = false;
653 if (mi->killsRegister(interval.reg, tri_)) {
654 DEBUG(dbgs() << " killed");
655 end = baseIndex.getDefIndex();
658 } else if (mi->definesRegister(interval.reg, tri_)) {
659 // Another instruction redefines the register before it is ever read.
660 // Then the register is essentially dead at the instruction that defines
661 // it. Hence its interval is:
662 // [defSlot(def), defSlot(def)+1)
663 DEBUG(dbgs() << " dead");
664 end = start.getStoreIndex();
669 while (++mi != E && mi->isDebugValue())
670 // Skip over DBG_VALUE.
673 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
676 // Live-in register might not be used at all.
679 DEBUG(dbgs() << " dead");
680 end = MIIdx.getStoreIndex();
682 DEBUG(dbgs() << " live through");
688 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
689 0, false, VNInfoAllocator);
690 vni->setIsPHIDef(true);
691 LiveRange LR(start, end, vni);
693 interval.addRange(LR);
694 DEBUG(dbgs() << " +" << LR << '\n');
697 /// computeIntervals - computes the live intervals for virtual
698 /// registers. for some ordering of the machine instructions [1,N] a
699 /// live interval is an interval [i, j) where 1 <= i <= j < N for
700 /// which a variable is live
701 void LiveIntervals::computeIntervals() {
702 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
703 << "********** Function: "
704 << ((Value*)mf_->getFunction())->getName() << '\n');
706 SmallVector<unsigned, 8> UndefUses;
707 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
709 MachineBasicBlock *MBB = MBBI;
713 // Track the index of the current machine instr.
714 SlotIndex MIIndex = getMBBStartIdx(MBB);
715 DEBUG(dbgs() << "BB#" << MBB->getNumber()
716 << ":\t\t# derived from " << MBB->getName() << "\n");
718 // Create intervals for live-ins to this BB first.
719 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
720 LE = MBB->livein_end(); LI != LE; ++LI) {
721 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
722 // Multiple live-ins can alias the same register.
723 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
724 if (!hasInterval(*AS))
725 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
729 // Skip over empty initial indices.
730 if (getInstructionFromIndex(MIIndex) == 0)
731 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
733 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
735 DEBUG(dbgs() << MIIndex << "\t" << *MI);
736 if (MI->isDebugValue())
740 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
741 MachineOperand &MO = MI->getOperand(i);
742 if (!MO.isReg() || !MO.getReg())
745 // handle register defs - build intervals
747 handleRegisterDef(MBB, MI, MIIndex, MO, i);
748 else if (MO.isUndef())
749 UndefUses.push_back(MO.getReg());
752 // Move to the next instr slot.
753 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
757 // Create empty intervals for registers defined by implicit_def's (except
758 // for those implicit_def that define values which are liveout of their
760 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
761 unsigned UndefReg = UndefUses[i];
762 (void)getOrCreateInterval(UndefReg);
766 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
767 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
768 return new LiveInterval(reg, Weight);
771 /// dupInterval - Duplicate a live interval. The caller is responsible for
772 /// managing the allocated memory.
773 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
774 LiveInterval *NewLI = createInterval(li->reg);
775 NewLI->Copy(*li, mri_, getVNInfoAllocator());
779 //===----------------------------------------------------------------------===//
780 // Register allocator hooks.
783 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
784 /// allow one) virtual register operand, then its uses are implicitly using
785 /// the register. Returns the virtual register.
786 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
787 MachineInstr *MI) const {
789 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
790 MachineOperand &MO = MI->getOperand(i);
791 if (!MO.isReg() || !MO.isUse())
793 unsigned Reg = MO.getReg();
794 if (Reg == 0 || Reg == li.reg)
797 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
798 !allocatableRegs_[Reg])
800 // FIXME: For now, only remat MI with at most one register operand.
802 "Can't rematerialize instruction with multiple register operand!");
811 /// isValNoAvailableAt - Return true if the val# of the specified interval
812 /// which reaches the given instruction also reaches the specified use index.
813 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
814 SlotIndex UseIdx) const {
815 SlotIndex Index = getInstructionIndex(MI);
816 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
817 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
818 return UI != li.end() && UI->valno == ValNo;
821 /// isReMaterializable - Returns true if the definition MI of the specified
822 /// val# of the specified interval is re-materializable.
823 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
824 const VNInfo *ValNo, MachineInstr *MI,
825 SmallVectorImpl<LiveInterval*> &SpillIs,
830 if (!tii_->isTriviallyReMaterializable(MI, aa_))
833 // Target-specific code can mark an instruction as being rematerializable
834 // if it has one virtual reg use, though it had better be something like
835 // a PIC base register which is likely to be live everywhere.
836 unsigned ImpUse = getReMatImplicitUse(li, MI);
838 const LiveInterval &ImpLi = getInterval(ImpUse);
839 for (MachineRegisterInfo::use_nodbg_iterator
840 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
842 MachineInstr *UseMI = &*ri;
843 SlotIndex UseIdx = getInstructionIndex(UseMI);
844 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
846 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
850 // If a register operand of the re-materialized instruction is going to
851 // be spilled next, then it's not legal to re-materialize this instruction.
852 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
853 if (ImpUse == SpillIs[i]->reg)
859 /// isReMaterializable - Returns true if the definition MI of the specified
860 /// val# of the specified interval is re-materializable.
861 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
862 const VNInfo *ValNo, MachineInstr *MI) {
863 SmallVector<LiveInterval*, 4> Dummy1;
865 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
868 /// isReMaterializable - Returns true if every definition of MI of every
869 /// val# of the specified interval is re-materializable.
870 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
871 SmallVectorImpl<LiveInterval*> &SpillIs,
874 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
876 const VNInfo *VNI = *i;
878 continue; // Dead val#.
879 // Is the def for the val# rematerializable?
880 if (!VNI->isDefAccurate())
882 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
883 bool DefIsLoad = false;
885 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
892 /// FilterFoldedOps - Filter out two-address use operands. Return
893 /// true if it finds any issue with the operands that ought to prevent
895 static bool FilterFoldedOps(MachineInstr *MI,
896 SmallVector<unsigned, 2> &Ops,
898 SmallVector<unsigned, 2> &FoldOps) {
900 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
901 unsigned OpIdx = Ops[i];
902 MachineOperand &MO = MI->getOperand(OpIdx);
903 // FIXME: fold subreg use.
907 MRInfo |= (unsigned)VirtRegMap::isMod;
909 // Filter out two-address use operand(s).
910 if (MI->isRegTiedToDefOperand(OpIdx)) {
911 MRInfo = VirtRegMap::isModRef;
914 MRInfo |= (unsigned)VirtRegMap::isRef;
916 FoldOps.push_back(OpIdx);
922 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
923 /// slot / to reg or any rematerialized load into ith operand of specified
924 /// MI. If it is successul, MI is updated with the newly created MI and
926 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
927 VirtRegMap &vrm, MachineInstr *DefMI,
929 SmallVector<unsigned, 2> &Ops,
930 bool isSS, int Slot, unsigned Reg) {
931 // If it is an implicit def instruction, just delete it.
932 if (MI->isImplicitDef()) {
933 RemoveMachineInstrFromMaps(MI);
934 vrm.RemoveMachineInstrFromMaps(MI);
935 MI->eraseFromParent();
940 // Filter the list of operand indexes that are to be folded. Abort if
941 // any operand will prevent folding.
943 SmallVector<unsigned, 2> FoldOps;
944 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
947 // The only time it's safe to fold into a two address instruction is when
948 // it's folding reload and spill from / into a spill stack slot.
949 if (DefMI && (MRInfo & VirtRegMap::isMod))
952 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
953 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
955 // Remember this instruction uses the spill slot.
956 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
958 // Attempt to fold the memory reference into the instruction. If
959 // we can do this, we don't need to insert spill code.
960 MachineBasicBlock &MBB = *MI->getParent();
961 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
962 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
963 vrm.transferSpillPts(MI, fmi);
964 vrm.transferRestorePts(MI, fmi);
965 vrm.transferEmergencySpills(MI, fmi);
966 ReplaceMachineInstrInMaps(MI, fmi);
967 MI = MBB.insert(MBB.erase(MI), fmi);
974 /// canFoldMemoryOperand - Returns true if the specified load / store
975 /// folding is possible.
976 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
977 SmallVector<unsigned, 2> &Ops,
979 // Filter the list of operand indexes that are to be folded. Abort if
980 // any operand will prevent folding.
982 SmallVector<unsigned, 2> FoldOps;
983 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
986 // It's only legal to remat for a use, not a def.
987 if (ReMat && (MRInfo & VirtRegMap::isMod))
990 return tii_->canFoldMemoryOperand(MI, FoldOps);
993 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
994 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
996 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1001 for (++itr; itr != li.ranges.end(); ++itr) {
1002 MachineBasicBlock *mbb2 =
1003 indexes_->getMBBCoveringRange(itr->start, itr->end);
1012 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1013 /// interval on to-be re-materialized operands of MI) with new register.
1014 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1015 MachineInstr *MI, unsigned NewVReg,
1017 // There is an implicit use. That means one of the other operand is
1018 // being remat'ed and the remat'ed instruction has li.reg as an
1019 // use operand. Make sure we rewrite that as well.
1020 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1021 MachineOperand &MO = MI->getOperand(i);
1024 unsigned Reg = MO.getReg();
1025 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1027 if (!vrm.isReMaterialized(Reg))
1029 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1030 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1032 UseMO->setReg(NewVReg);
1036 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1037 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1038 bool LiveIntervals::
1039 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1040 bool TrySplit, SlotIndex index, SlotIndex end,
1042 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1043 unsigned Slot, int LdSlot,
1044 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1046 const TargetRegisterClass* rc,
1047 SmallVector<int, 4> &ReMatIds,
1048 const MachineLoopInfo *loopInfo,
1049 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1050 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1051 std::vector<LiveInterval*> &NewLIs) {
1052 bool CanFold = false;
1054 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1055 MachineOperand& mop = MI->getOperand(i);
1058 unsigned Reg = mop.getReg();
1059 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1064 bool TryFold = !DefIsReMat;
1065 bool FoldSS = true; // Default behavior unless it's a remat.
1066 int FoldSlot = Slot;
1068 // If this is the rematerializable definition MI itself and
1069 // all of its uses are rematerialized, simply delete it.
1070 if (MI == ReMatOrigDefMI && CanDelete) {
1071 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1073 RemoveMachineInstrFromMaps(MI);
1074 vrm.RemoveMachineInstrFromMaps(MI);
1075 MI->eraseFromParent();
1079 // If def for this use can't be rematerialized, then try folding.
1080 // If def is rematerializable and it's a load, also try folding.
1081 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1083 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1089 // Scan all of the operands of this instruction rewriting operands
1090 // to use NewVReg instead of li.reg as appropriate. We do this for
1093 // 1. If the instr reads the same spilled vreg multiple times, we
1094 // want to reuse the NewVReg.
1095 // 2. If the instr is a two-addr instruction, we are required to
1096 // keep the src/dst regs pinned.
1098 // Keep track of whether we replace a use and/or def so that we can
1099 // create the spill interval with the appropriate range.
1100 SmallVector<unsigned, 2> Ops;
1101 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1103 // Create a new virtual register for the spill interval.
1104 // Create the new register now so we can map the fold instruction
1105 // to the new register so when it is unfolded we get the correct
1107 bool CreatedNewVReg = false;
1109 NewVReg = mri_->createVirtualRegister(rc);
1111 CreatedNewVReg = true;
1113 // The new virtual register should get the same allocation hints as the
1115 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1116 if (Hint.first || Hint.second)
1117 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1123 // Do not fold load / store here if we are splitting. We'll find an
1124 // optimal point to insert a load / store later.
1126 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1127 Ops, FoldSS, FoldSlot, NewVReg)) {
1128 // Folding the load/store can completely change the instruction in
1129 // unpredictable ways, rescan it from the beginning.
1132 // We need to give the new vreg the same stack slot as the
1133 // spilled interval.
1134 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1140 if (isNotInMIMap(MI))
1142 goto RestartInstruction;
1145 // We'll try to fold it later if it's profitable.
1146 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1150 mop.setReg(NewVReg);
1151 if (mop.isImplicit())
1152 rewriteImplicitOps(li, MI, NewVReg, vrm);
1154 // Reuse NewVReg for other reads.
1155 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1156 MachineOperand &mopj = MI->getOperand(Ops[j]);
1157 mopj.setReg(NewVReg);
1158 if (mopj.isImplicit())
1159 rewriteImplicitOps(li, MI, NewVReg, vrm);
1162 if (CreatedNewVReg) {
1164 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1165 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1166 // Each valnum may have its own remat id.
1167 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1169 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1171 if (!CanDelete || (HasUse && HasDef)) {
1172 // If this is a two-addr instruction then its use operands are
1173 // rematerializable but its def is not. It should be assigned a
1175 vrm.assignVirt2StackSlot(NewVReg, Slot);
1178 vrm.assignVirt2StackSlot(NewVReg, Slot);
1180 } else if (HasUse && HasDef &&
1181 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1182 // If this interval hasn't been assigned a stack slot (because earlier
1183 // def is a deleted remat def), do it now.
1184 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1185 vrm.assignVirt2StackSlot(NewVReg, Slot);
1188 // Re-matting an instruction with virtual register use. Add the
1189 // register as an implicit use on the use MI.
1190 if (DefIsReMat && ImpUse)
1191 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1193 // Create a new register interval for this spill / remat.
1194 LiveInterval &nI = getOrCreateInterval(NewVReg);
1195 if (CreatedNewVReg) {
1196 NewLIs.push_back(&nI);
1197 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1199 vrm.setIsSplitFromReg(NewVReg, li.reg);
1203 if (CreatedNewVReg) {
1204 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1205 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1206 DEBUG(dbgs() << " +" << LR);
1209 // Extend the split live interval to this def / use.
1210 SlotIndex End = index.getDefIndex();
1211 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1212 nI.getValNumInfo(nI.getNumValNums()-1));
1213 DEBUG(dbgs() << " +" << LR);
1218 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1219 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1220 DEBUG(dbgs() << " +" << LR);
1225 dbgs() << "\t\t\t\tAdded new interval: ";
1226 nI.print(dbgs(), tri_);
1232 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1234 MachineBasicBlock *MBB,
1235 SlotIndex Idx) const {
1236 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1239 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1240 /// during spilling.
1242 struct RewriteInfo {
1245 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1248 struct RewriteInfoCompare {
1249 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1250 return LHS.Index < RHS.Index;
1255 void LiveIntervals::
1256 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1257 LiveInterval::Ranges::const_iterator &I,
1258 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1259 unsigned Slot, int LdSlot,
1260 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1262 const TargetRegisterClass* rc,
1263 SmallVector<int, 4> &ReMatIds,
1264 const MachineLoopInfo *loopInfo,
1265 BitVector &SpillMBBs,
1266 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1267 BitVector &RestoreMBBs,
1268 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1269 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1270 std::vector<LiveInterval*> &NewLIs) {
1271 bool AllCanFold = true;
1272 unsigned NewVReg = 0;
1273 SlotIndex start = I->start.getBaseIndex();
1274 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1276 // First collect all the def / use in this live range that will be rewritten.
1277 // Make sure they are sorted according to instruction index.
1278 std::vector<RewriteInfo> RewriteMIs;
1279 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1280 re = mri_->reg_end(); ri != re; ) {
1281 MachineInstr *MI = &*ri;
1282 MachineOperand &O = ri.getOperand();
1284 if (MI->isDebugValue()) {
1285 // Modify DBG_VALUE now that the value is in a spill slot.
1286 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1287 uint64_t Offset = MI->getOperand(1).getImm();
1288 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1289 DebugLoc DL = MI->getDebugLoc();
1290 int FI = isLoadSS ? LdSlot : (int)Slot;
1291 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1292 Offset, MDPtr, DL)) {
1293 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1294 ReplaceMachineInstrInMaps(MI, NewDV);
1295 MachineBasicBlock *MBB = MI->getParent();
1296 MBB->insert(MBB->erase(MI), NewDV);
1301 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1302 RemoveMachineInstrFromMaps(MI);
1303 vrm.RemoveMachineInstrFromMaps(MI);
1304 MI->eraseFromParent();
1307 assert(!(O.isImplicit() && O.isUse()) &&
1308 "Spilling register that's used as implicit use?");
1309 SlotIndex index = getInstructionIndex(MI);
1310 if (index < start || index >= end)
1314 // Must be defined by an implicit def. It should not be spilled. Note,
1315 // this is for correctness reason. e.g.
1316 // 8 %reg1024<def> = IMPLICIT_DEF
1317 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1318 // The live range [12, 14) are not part of the r1024 live interval since
1319 // it's defined by an implicit def. It will not conflicts with live
1320 // interval of r1025. Now suppose both registers are spilled, you can
1321 // easily see a situation where both registers are reloaded before
1322 // the INSERT_SUBREG and both target registers that would overlap.
1324 RewriteMIs.push_back(RewriteInfo(index, MI));
1326 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1328 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1329 // Now rewrite the defs and uses.
1330 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1331 RewriteInfo &rwi = RewriteMIs[i];
1333 SlotIndex index = rwi.Index;
1334 MachineInstr *MI = rwi.MI;
1335 // If MI def and/or use the same register multiple times, then there
1336 // are multiple entries.
1337 while (i != e && RewriteMIs[i].MI == MI) {
1338 assert(RewriteMIs[i].Index == index);
1341 MachineBasicBlock *MBB = MI->getParent();
1343 if (ImpUse && MI != ReMatDefMI) {
1344 // Re-matting an instruction with virtual register use. Prevent interval
1345 // from being spilled.
1346 getInterval(ImpUse).markNotSpillable();
1349 unsigned MBBId = MBB->getNumber();
1350 unsigned ThisVReg = 0;
1352 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1353 if (NVI != MBBVRegsMap.end()) {
1354 ThisVReg = NVI->second;
1361 // It's better to start a new interval to avoid artifically
1362 // extend the new interval.
1363 if (MI->readsWritesVirtualRegister(li.reg) ==
1364 std::make_pair(false,true)) {
1365 MBBVRegsMap.erase(MBB->getNumber());
1371 bool IsNew = ThisVReg == 0;
1373 // This ends the previous live interval. If all of its def / use
1374 // can be folded, give it a low spill weight.
1375 if (NewVReg && TrySplit && AllCanFold) {
1376 LiveInterval &nI = getOrCreateInterval(NewVReg);
1383 bool HasDef = false;
1384 bool HasUse = false;
1385 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1386 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1387 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1388 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1389 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1390 if (!HasDef && !HasUse)
1393 AllCanFold &= CanFold;
1395 // Update weight of spill interval.
1396 LiveInterval &nI = getOrCreateInterval(NewVReg);
1398 // The spill weight is now infinity as it cannot be spilled again.
1399 nI.markNotSpillable();
1403 // Keep track of the last def and first use in each MBB.
1405 if (MI != ReMatOrigDefMI || !CanDelete) {
1406 bool HasKill = false;
1408 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1410 // If this is a two-address code, then this index starts a new VNInfo.
1411 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1413 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1415 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1416 SpillIdxes.find(MBBId);
1418 if (SII == SpillIdxes.end()) {
1419 std::vector<SRInfo> S;
1420 S.push_back(SRInfo(index, NewVReg, true));
1421 SpillIdxes.insert(std::make_pair(MBBId, S));
1422 } else if (SII->second.back().vreg != NewVReg) {
1423 SII->second.push_back(SRInfo(index, NewVReg, true));
1424 } else if (index > SII->second.back().index) {
1425 // If there is an earlier def and this is a two-address
1426 // instruction, then it's not possible to fold the store (which
1427 // would also fold the load).
1428 SRInfo &Info = SII->second.back();
1430 Info.canFold = !HasUse;
1432 SpillMBBs.set(MBBId);
1433 } else if (SII != SpillIdxes.end() &&
1434 SII->second.back().vreg == NewVReg &&
1435 index > SII->second.back().index) {
1436 // There is an earlier def that's not killed (must be two-address).
1437 // The spill is no longer needed.
1438 SII->second.pop_back();
1439 if (SII->second.empty()) {
1440 SpillIdxes.erase(MBBId);
1441 SpillMBBs.reset(MBBId);
1448 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1449 SpillIdxes.find(MBBId);
1450 if (SII != SpillIdxes.end() &&
1451 SII->second.back().vreg == NewVReg &&
1452 index > SII->second.back().index)
1453 // Use(s) following the last def, it's not safe to fold the spill.
1454 SII->second.back().canFold = false;
1455 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1456 RestoreIdxes.find(MBBId);
1457 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1458 // If we are splitting live intervals, only fold if it's the first
1459 // use and there isn't another use later in the MBB.
1460 RII->second.back().canFold = false;
1462 // Only need a reload if there isn't an earlier def / use.
1463 if (RII == RestoreIdxes.end()) {
1464 std::vector<SRInfo> Infos;
1465 Infos.push_back(SRInfo(index, NewVReg, true));
1466 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1468 RII->second.push_back(SRInfo(index, NewVReg, true));
1470 RestoreMBBs.set(MBBId);
1474 // Update spill weight.
1475 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1476 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1479 if (NewVReg && TrySplit && AllCanFold) {
1480 // If all of its def / use can be folded, give it a low spill weight.
1481 LiveInterval &nI = getOrCreateInterval(NewVReg);
1486 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1487 unsigned vr, BitVector &RestoreMBBs,
1488 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1489 if (!RestoreMBBs[Id])
1491 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1492 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1493 if (Restores[i].index == index &&
1494 Restores[i].vreg == vr &&
1495 Restores[i].canFold)
1500 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1501 unsigned vr, BitVector &RestoreMBBs,
1502 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1503 if (!RestoreMBBs[Id])
1505 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1506 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1507 if (Restores[i].index == index && Restores[i].vreg)
1508 Restores[i].index = SlotIndex();
1511 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1512 /// spilled and create empty intervals for their uses.
1514 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1515 const TargetRegisterClass* rc,
1516 std::vector<LiveInterval*> &NewLIs) {
1517 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1518 re = mri_->reg_end(); ri != re; ) {
1519 MachineOperand &O = ri.getOperand();
1520 MachineInstr *MI = &*ri;
1522 if (MI->isDebugValue()) {
1523 // Remove debug info for now.
1525 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1529 assert(MI->isImplicitDef() &&
1530 "Register def was not rewritten?");
1531 RemoveMachineInstrFromMaps(MI);
1532 vrm.RemoveMachineInstrFromMaps(MI);
1533 MI->eraseFromParent();
1535 // This must be an use of an implicit_def so it's not part of the live
1536 // interval. Create a new empty live interval for it.
1537 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1538 unsigned NewVReg = mri_->createVirtualRegister(rc);
1540 vrm.setIsImplicitlyDefined(NewVReg);
1541 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1542 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1543 MachineOperand &MO = MI->getOperand(i);
1544 if (MO.isReg() && MO.getReg() == li.reg) {
1554 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1555 // Limit the loop depth ridiculousness.
1556 if (loopDepth > 200)
1559 // The loop depth is used to roughly estimate the number of times the
1560 // instruction is executed. Something like 10^d is simple, but will quickly
1561 // overflow a float. This expression behaves like 10^d for small d, but is
1562 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1563 // headroom before overflow.
1564 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1566 return (isDef + isUse) * lc;
1570 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1571 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1572 normalizeSpillWeight(*NewLIs[i]);
1575 std::vector<LiveInterval*> LiveIntervals::
1576 addIntervalsForSpills(const LiveInterval &li,
1577 SmallVectorImpl<LiveInterval*> &SpillIs,
1578 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1579 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1582 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1583 li.print(dbgs(), tri_);
1587 // Each bit specify whether a spill is required in the MBB.
1588 BitVector SpillMBBs(mf_->getNumBlockIDs());
1589 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1590 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1591 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1592 DenseMap<unsigned,unsigned> MBBVRegsMap;
1593 std::vector<LiveInterval*> NewLIs;
1594 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1596 unsigned NumValNums = li.getNumValNums();
1597 SmallVector<MachineInstr*, 4> ReMatDefs;
1598 ReMatDefs.resize(NumValNums, NULL);
1599 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1600 ReMatOrigDefs.resize(NumValNums, NULL);
1601 SmallVector<int, 4> ReMatIds;
1602 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1603 BitVector ReMatDelete(NumValNums);
1604 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1606 // Spilling a split live interval. It cannot be split any further. Also,
1607 // it's also guaranteed to be a single val# / range interval.
1608 if (vrm.getPreSplitReg(li.reg)) {
1609 vrm.setIsSplitFromReg(li.reg, 0);
1610 // Unset the split kill marker on the last use.
1611 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1612 if (KillIdx != SlotIndex()) {
1613 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1614 assert(KillMI && "Last use disappeared?");
1615 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1616 assert(KillOp != -1 && "Last use disappeared?");
1617 KillMI->getOperand(KillOp).setIsKill(false);
1619 vrm.removeKillPoint(li.reg);
1620 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1621 Slot = vrm.getStackSlot(li.reg);
1622 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1623 MachineInstr *ReMatDefMI = DefIsReMat ?
1624 vrm.getReMaterializedMI(li.reg) : NULL;
1626 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1627 bool isLoad = isLoadSS ||
1628 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1629 bool IsFirstRange = true;
1630 for (LiveInterval::Ranges::const_iterator
1631 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1632 // If this is a split live interval with multiple ranges, it means there
1633 // are two-address instructions that re-defined the value. Only the
1634 // first def can be rematerialized!
1636 // Note ReMatOrigDefMI has already been deleted.
1637 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1638 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1639 false, vrm, rc, ReMatIds, loopInfo,
1640 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1641 MBBVRegsMap, NewLIs);
1643 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1644 Slot, 0, false, false, false,
1645 false, vrm, rc, ReMatIds, loopInfo,
1646 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1647 MBBVRegsMap, NewLIs);
1649 IsFirstRange = false;
1652 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1653 normalizeSpillWeights(NewLIs);
1657 bool TrySplit = !intervalIsInOneMBB(li);
1660 bool NeedStackSlot = false;
1661 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1663 const VNInfo *VNI = *i;
1664 unsigned VN = VNI->id;
1665 if (VNI->isUnused())
1666 continue; // Dead val#.
1667 // Is the def for the val# rematerializable?
1668 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1669 ? getInstructionFromIndex(VNI->def) : 0;
1671 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1672 // Remember how to remat the def of this val#.
1673 ReMatOrigDefs[VN] = ReMatDefMI;
1674 // Original def may be modified so we have to make a copy here.
1675 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1676 CloneMIs.push_back(Clone);
1677 ReMatDefs[VN] = Clone;
1679 bool CanDelete = true;
1680 if (VNI->hasPHIKill()) {
1681 // A kill is a phi node, not all of its uses can be rematerialized.
1682 // It must not be deleted.
1684 // Need a stack slot if there is any live range where uses cannot be
1686 NeedStackSlot = true;
1689 ReMatDelete.set(VN);
1691 // Need a stack slot if there is any live range where uses cannot be
1693 NeedStackSlot = true;
1697 // One stack slot per live interval.
1698 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1699 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1700 Slot = vrm.assignVirt2StackSlot(li.reg);
1702 // This case only occurs when the prealloc splitter has already assigned
1703 // a stack slot to this vreg.
1705 Slot = vrm.getStackSlot(li.reg);
1708 // Create new intervals and rewrite defs and uses.
1709 for (LiveInterval::Ranges::const_iterator
1710 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1711 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1712 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1713 bool DefIsReMat = ReMatDefMI != NULL;
1714 bool CanDelete = ReMatDelete[I->valno->id];
1716 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1717 bool isLoad = isLoadSS ||
1718 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1719 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1720 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1721 CanDelete, vrm, rc, ReMatIds, loopInfo,
1722 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1723 MBBVRegsMap, NewLIs);
1726 // Insert spills / restores if we are splitting.
1728 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1729 normalizeSpillWeights(NewLIs);
1733 SmallPtrSet<LiveInterval*, 4> AddedKill;
1734 SmallVector<unsigned, 2> Ops;
1735 if (NeedStackSlot) {
1736 int Id = SpillMBBs.find_first();
1738 std::vector<SRInfo> &spills = SpillIdxes[Id];
1739 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1740 SlotIndex index = spills[i].index;
1741 unsigned VReg = spills[i].vreg;
1742 LiveInterval &nI = getOrCreateInterval(VReg);
1743 bool isReMat = vrm.isReMaterialized(VReg);
1744 MachineInstr *MI = getInstructionFromIndex(index);
1745 bool CanFold = false;
1746 bool FoundUse = false;
1748 if (spills[i].canFold) {
1750 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1751 MachineOperand &MO = MI->getOperand(j);
1752 if (!MO.isReg() || MO.getReg() != VReg)
1759 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1760 RestoreMBBs, RestoreIdxes))) {
1761 // MI has two-address uses of the same register. If the use
1762 // isn't the first and only use in the BB, then we can't fold
1763 // it. FIXME: Move this to rewriteInstructionsForSpills.
1770 // Fold the store into the def if possible.
1771 bool Folded = false;
1772 if (CanFold && !Ops.empty()) {
1773 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1776 // Also folded uses, do not issue a load.
1777 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1778 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1780 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1784 // Otherwise tell the spiller to issue a spill.
1786 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1787 bool isKill = LR->end == index.getStoreIndex();
1788 if (!MI->registerDefIsDead(nI.reg))
1789 // No need to spill a dead def.
1790 vrm.addSpillPoint(VReg, isKill, MI);
1792 AddedKill.insert(&nI);
1795 Id = SpillMBBs.find_next(Id);
1799 int Id = RestoreMBBs.find_first();
1801 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1802 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1803 SlotIndex index = restores[i].index;
1804 if (index == SlotIndex())
1806 unsigned VReg = restores[i].vreg;
1807 LiveInterval &nI = getOrCreateInterval(VReg);
1808 bool isReMat = vrm.isReMaterialized(VReg);
1809 MachineInstr *MI = getInstructionFromIndex(index);
1810 bool CanFold = false;
1812 if (restores[i].canFold) {
1814 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1815 MachineOperand &MO = MI->getOperand(j);
1816 if (!MO.isReg() || MO.getReg() != VReg)
1820 // If this restore were to be folded, it would have been folded
1829 // Fold the load into the use if possible.
1830 bool Folded = false;
1831 if (CanFold && !Ops.empty()) {
1833 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1835 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1837 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1838 // If the rematerializable def is a load, also try to fold it.
1839 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1840 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1841 Ops, isLoadSS, LdSlot, VReg);
1843 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1845 // Re-matting an instruction with virtual register use. Add the
1846 // register as an implicit use on the use MI and mark the register
1847 // interval as unspillable.
1848 LiveInterval &ImpLi = getInterval(ImpUse);
1849 ImpLi.markNotSpillable();
1850 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1855 // If folding is not possible / failed, then tell the spiller to issue a
1856 // load / rematerialization for us.
1858 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1860 vrm.addRestorePoint(VReg, MI);
1862 Id = RestoreMBBs.find_next(Id);
1865 // Finalize intervals: add kills, finalize spill weights, and filter out
1867 std::vector<LiveInterval*> RetNewLIs;
1868 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1869 LiveInterval *LI = NewLIs[i];
1871 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1872 if (!AddedKill.count(LI)) {
1873 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1874 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1875 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1876 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1877 assert(UseIdx != -1);
1878 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1879 LastUse->getOperand(UseIdx).setIsKill();
1880 vrm.addKillPoint(LI->reg, LastUseIdx);
1883 RetNewLIs.push_back(LI);
1887 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1888 normalizeSpillWeights(RetNewLIs);
1892 /// hasAllocatableSuperReg - Return true if the specified physical register has
1893 /// any super register that's allocatable.
1894 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1895 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1896 if (allocatableRegs_[*AS] && hasInterval(*AS))
1901 /// getRepresentativeReg - Find the largest super register of the specified
1902 /// physical register.
1903 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1904 // Find the largest super-register that is allocatable.
1905 unsigned BestReg = Reg;
1906 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1907 unsigned SuperReg = *AS;
1908 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1916 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1917 /// specified interval that conflicts with the specified physical register.
1918 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1919 unsigned PhysReg) const {
1920 unsigned NumConflicts = 0;
1921 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1922 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1923 E = mri_->reg_end(); I != E; ++I) {
1924 MachineOperand &O = I.getOperand();
1925 MachineInstr *MI = O.getParent();
1926 if (MI->isDebugValue())
1928 SlotIndex Index = getInstructionIndex(MI);
1929 if (pli.liveAt(Index))
1932 return NumConflicts;
1935 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1936 /// around all defs and uses of the specified interval. Return true if it
1937 /// was able to cut its interval.
1938 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1939 unsigned PhysReg, VirtRegMap &vrm) {
1940 unsigned SpillReg = getRepresentativeReg(PhysReg);
1942 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1943 // If there are registers which alias PhysReg, but which are not a
1944 // sub-register of the chosen representative super register. Assert
1945 // since we can't handle it yet.
1946 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1947 tri_->isSuperRegister(*AS, SpillReg));
1950 SmallVector<unsigned, 4> PRegs;
1951 if (hasInterval(SpillReg))
1952 PRegs.push_back(SpillReg);
1954 SmallSet<unsigned, 4> Added;
1955 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1956 if (Added.insert(*AS) && hasInterval(*AS)) {
1957 PRegs.push_back(*AS);
1958 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1963 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1964 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1965 E = mri_->reg_end(); I != E; ++I) {
1966 MachineOperand &O = I.getOperand();
1967 MachineInstr *MI = O.getParent();
1968 if (MI->isDebugValue() || SeenMIs.count(MI))
1971 SlotIndex Index = getInstructionIndex(MI);
1972 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1973 unsigned PReg = PRegs[i];
1974 LiveInterval &pli = getInterval(PReg);
1975 if (!pli.liveAt(Index))
1977 vrm.addEmergencySpill(PReg, MI);
1978 SlotIndex StartIdx = Index.getLoadIndex();
1979 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1980 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1981 pli.removeRange(StartIdx, EndIdx);
1985 raw_string_ostream Msg(msg);
1986 Msg << "Ran out of registers during register allocation!";
1987 if (MI->isInlineAsm()) {
1988 Msg << "\nPlease check your inline asm statement for invalid "
1989 << "constraints:\n";
1990 MI->print(Msg, tm_);
1992 report_fatal_error(Msg.str());
1994 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1995 if (!hasInterval(*AS))
1997 LiveInterval &spli = getInterval(*AS);
1998 if (spli.liveAt(Index))
1999 spli.removeRange(Index.getLoadIndex(),
2000 Index.getNextIndex().getBaseIndex());
2007 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2008 MachineInstr* startInst) {
2009 LiveInterval& Interval = getOrCreateInterval(reg);
2010 VNInfo* VN = Interval.getNextValue(
2011 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2012 startInst, true, getVNInfoAllocator());
2013 VN->setHasPHIKill(true);
2015 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2016 getMBBEndIdx(startInst->getParent()), VN);
2017 Interval.addRange(LR);