1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
266 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267 unsigned Reg = MI.getOperand(MOIdx).getReg();
268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269 const MachineOperand &MO = MI.getOperand(i);
272 if (MO.getReg() == Reg && MO.isDef()) {
273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274 MI.getOperand(MOIdx).getSubReg() &&
282 /// isPartialRedef - Return true if the specified def at the specific index is
283 /// partially re-defining the specified live interval. A common case of this is
284 /// a definition of the sub-register.
285 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286 LiveInterval &interval) {
287 if (!MO.getSubReg() || MO.isEarlyClobber())
290 SlotIndex RedefIndex = MIIdx.getDefIndex();
291 const LiveRange *OldLR =
292 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293 if (OldLR->valno->isDefAccurate()) {
294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
300 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301 MachineBasicBlock::iterator mi,
305 LiveInterval &interval) {
307 dbgs() << "\t\tregister: ";
308 printRegName(interval.reg, tri_);
311 // Virtual registers may be defined multiple times (due to phi
312 // elimination and 2-addr elimination). Much of what we do only has to be
313 // done once for the vreg. We use an empty interval to detect the first
314 // time we see a vreg.
315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316 if (interval.empty()) {
317 // Get the Idx of the defining instructions.
318 SlotIndex defIndex = MIIdx.getDefIndex();
319 // Earlyclobbers move back one, so that they overlap the live range
321 if (MO.isEarlyClobber())
322 defIndex = MIIdx.getUseIndex();
323 MachineInstr *CopyMI = NULL;
324 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
325 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
326 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
353 ValNo->addKill(killIdx);
358 // The other case we handle is when a virtual register lives to the end
359 // of the defining block, potentially live across some blocks, then is
360 // live into some number of blocks, but gets killed. Start by adding a
361 // range that goes from this definition to the end of the defining block.
362 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
363 DEBUG(dbgs() << " +" << NewLR);
364 interval.addRange(NewLR);
366 bool PHIJoin = lv_->isPHIJoin(interval.reg);
369 // A phi join register is killed at the end of the MBB and revived as a new
370 // valno in the killing blocks.
371 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
372 DEBUG(dbgs() << " phi-join");
373 ValNo->addKill(indexes_->getTerminatorGap(mbb));
374 ValNo->setHasPHIKill(true);
376 // Iterate over all of the blocks that the variable is completely
377 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
379 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
380 E = vi.AliveBlocks.end(); I != E; ++I) {
381 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
382 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
383 interval.addRange(LR);
384 DEBUG(dbgs() << " +" << LR);
388 // Finally, this virtual register is live from the start of any killing
389 // block to the 'use' slot of the killing instruction.
390 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
391 MachineInstr *Kill = vi.Kills[i];
392 SlotIndex Start = getMBBStartIdx(Kill->getParent());
393 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
395 // Create interval with one of a NEW value number. Note that this value
396 // number isn't actually defined by an instruction, weird huh? :)
398 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
400 ValNo->setIsPHIDef(true);
402 LiveRange LR(Start, killIdx, ValNo);
403 interval.addRange(LR);
404 ValNo->addKill(killIdx);
405 DEBUG(dbgs() << " +" << LR);
409 if (MultipleDefsBySameMI(*mi, MOIdx))
410 // Mutple defs of the same virtual register by the same instruction. e.g.
411 // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412 // This is likely due to elimination of REG_SEQUENCE instructions. Return
413 // here since there is nothing to do.
416 // If this is the second time we see a virtual register definition, it
417 // must be due to phi elimination or two addr elimination. If this is
418 // the result of two address elimination, then the vreg is one of the
419 // def-and-use register operand.
421 // It may also be partial redef like this:
422 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 // Two-address vregs should always only be redefined once. This means
432 // that at this point, there should be exactly one value number in it.
433 assert((PartReDef || interval.containsOneValue()) &&
434 "Unexpected 2-addr liveint!");
435 unsigned NumVals = interval.getNumValNums();
436 SlotIndex DefIndex = interval.getValNumInfo(NumVals-1)->def.getDefIndex();
437 SlotIndex RedefIndex = MIIdx.getDefIndex();
438 if (MO.isEarlyClobber())
439 RedefIndex = MIIdx.getUseIndex();
441 const LiveRange *OldLR =
442 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
443 VNInfo *OldValNo = OldLR->valno;
445 // Delete the initial value, which should be short and continuous,
446 // because the 2-addr copy must be in the same MBB as the redef.
447 interval.removeRange(DefIndex, RedefIndex);
449 // The new value number (#1) is defined by the instruction we claimed
451 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
452 false, // update at *
454 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
456 // Value#0 is now defined by the 2-addr instruction.
457 OldValNo->def = RedefIndex;
458 OldValNo->setCopy(0);
460 // Add the new live interval which replaces the range for the input copy.
461 LiveRange LR(DefIndex, RedefIndex, ValNo);
462 DEBUG(dbgs() << " replace range with " << LR);
463 interval.addRange(LR);
464 ValNo->addKill(RedefIndex);
466 // If this redefinition is dead, we need to add a dummy unit live
467 // range covering the def slot.
469 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
473 dbgs() << " RESULT: ";
474 interval.print(dbgs(), tri_);
476 } else if (lv_->isPHIJoin(interval.reg)) {
477 // In the case of PHI elimination, each variable definition is only
478 // live until the end of the block. We've already taken care of the
479 // rest of the live range.
481 SlotIndex defIndex = MIIdx.getDefIndex();
482 if (MO.isEarlyClobber())
483 defIndex = MIIdx.getUseIndex();
486 MachineInstr *CopyMI = NULL;
487 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
488 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
489 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
491 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
493 SlotIndex killIndex = getMBBEndIdx(mbb);
494 LiveRange LR(defIndex, killIndex, ValNo);
495 interval.addRange(LR);
496 ValNo->addKill(indexes_->getTerminatorGap(mbb));
497 ValNo->setHasPHIKill(true);
498 DEBUG(dbgs() << " phi-join +" << LR);
500 llvm_unreachable("Multiply defined register");
504 DEBUG(dbgs() << '\n');
507 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
508 MachineBasicBlock::iterator mi,
511 LiveInterval &interval,
512 MachineInstr *CopyMI) {
513 // A physical register cannot be live across basic block, so its
514 // lifetime must end somewhere in its defining basic block.
516 dbgs() << "\t\tregister: ";
517 printRegName(interval.reg, tri_);
520 SlotIndex baseIndex = MIIdx;
521 SlotIndex start = baseIndex.getDefIndex();
522 // Earlyclobbers move back one.
523 if (MO.isEarlyClobber())
524 start = MIIdx.getUseIndex();
525 SlotIndex end = start;
527 // If it is not used after definition, it is considered dead at
528 // the instruction defining it. Hence its interval is:
529 // [defSlot(def), defSlot(def)+1)
530 // For earlyclobbers, the defSlot was pushed back one; the extra
531 // advance below compensates.
533 DEBUG(dbgs() << " dead");
534 end = start.getStoreIndex();
538 // If it is not dead on definition, it must be killed by a
539 // subsequent instruction. Hence its interval is:
540 // [defSlot(def), useSlot(kill)+1)
541 baseIndex = baseIndex.getNextIndex();
542 while (++mi != MBB->end()) {
544 if (mi->isDebugValue())
546 if (getInstructionFromIndex(baseIndex) == 0)
547 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
549 if (mi->killsRegister(interval.reg, tri_)) {
550 DEBUG(dbgs() << " killed");
551 end = baseIndex.getDefIndex();
554 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
556 if (mi->isRegTiedToUseOperand(DefIdx)) {
557 // Two-address instruction.
558 end = baseIndex.getDefIndex();
560 // Another instruction redefines the register before it is ever read.
561 // Then the register is essentially dead at the instruction that
562 // defines it. Hence its interval is:
563 // [defSlot(def), defSlot(def)+1)
564 DEBUG(dbgs() << " dead");
565 end = start.getStoreIndex();
571 baseIndex = baseIndex.getNextIndex();
574 // The only case we should have a dead physreg here without a killing or
575 // instruction where we know it's dead is if it is live-in to the function
576 // and never used. Another possible case is the implicit use of the
577 // physical register has been deleted by two-address pass.
578 end = start.getStoreIndex();
581 assert(start < end && "did not find end of interval?");
583 // Already exists? Extend old live interval.
584 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
585 bool Extend = OldLR != interval.end();
586 VNInfo *ValNo = Extend
587 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
588 if (MO.isEarlyClobber() && Extend)
589 ValNo->setHasRedefByEC(true);
590 LiveRange LR(start, end, ValNo);
591 interval.addRange(LR);
592 LR.valno->addKill(end);
593 DEBUG(dbgs() << " +" << LR << '\n');
596 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
597 MachineBasicBlock::iterator MI,
601 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
602 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
603 getOrCreateInterval(MO.getReg()));
604 else if (allocatableRegs_[MO.getReg()]) {
605 MachineInstr *CopyMI = NULL;
606 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
607 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
608 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
610 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
611 getOrCreateInterval(MO.getReg()), CopyMI);
612 // Def of a register also defines its sub-registers.
613 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
614 // If MI also modifies the sub-register explicitly, avoid processing it
615 // more than once. Do not pass in TRI here so it checks for exact match.
616 if (!MI->modifiesRegister(*AS))
617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618 getOrCreateInterval(*AS), 0);
622 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
624 LiveInterval &interval, bool isAlias) {
626 dbgs() << "\t\tlivein register: ";
627 printRegName(interval.reg, tri_);
630 // Look for kills, if it reaches a def before it's killed, then it shouldn't
631 // be considered a livein.
632 MachineBasicBlock::iterator mi = MBB->begin();
633 MachineBasicBlock::iterator E = MBB->end();
634 // Skip over DBG_VALUE at the start of the MBB.
635 if (mi != E && mi->isDebugValue()) {
636 while (++mi != E && mi->isDebugValue())
639 // MBB is empty except for DBG_VALUE's.
643 SlotIndex baseIndex = MIIdx;
644 SlotIndex start = baseIndex;
645 if (getInstructionFromIndex(baseIndex) == 0)
646 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
648 SlotIndex end = baseIndex;
649 bool SeenDefUse = false;
652 if (mi->killsRegister(interval.reg, tri_)) {
653 DEBUG(dbgs() << " killed");
654 end = baseIndex.getDefIndex();
657 } else if (mi->modifiesRegister(interval.reg, tri_)) {
658 // Another instruction redefines the register before it is ever read.
659 // Then the register is essentially dead at the instruction that defines
660 // it. Hence its interval is:
661 // [defSlot(def), defSlot(def)+1)
662 DEBUG(dbgs() << " dead");
663 end = start.getStoreIndex();
668 while (++mi != E && mi->isDebugValue())
669 // Skip over DBG_VALUE.
672 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
675 // Live-in register might not be used at all.
678 DEBUG(dbgs() << " dead");
679 end = MIIdx.getStoreIndex();
681 DEBUG(dbgs() << " live through");
687 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
688 0, false, VNInfoAllocator);
689 vni->setIsPHIDef(true);
690 LiveRange LR(start, end, vni);
692 interval.addRange(LR);
693 LR.valno->addKill(end);
694 DEBUG(dbgs() << " +" << LR << '\n');
697 /// computeIntervals - computes the live intervals for virtual
698 /// registers. for some ordering of the machine instructions [1,N] a
699 /// live interval is an interval [i, j) where 1 <= i <= j < N for
700 /// which a variable is live
701 void LiveIntervals::computeIntervals() {
702 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
703 << "********** Function: "
704 << ((Value*)mf_->getFunction())->getName() << '\n');
706 SmallVector<unsigned, 8> UndefUses;
707 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
709 MachineBasicBlock *MBB = MBBI;
713 // Track the index of the current machine instr.
714 SlotIndex MIIndex = getMBBStartIdx(MBB);
715 DEBUG(dbgs() << "BB#" << MBB->getNumber()
716 << ":\t\t# derived from " << MBB->getName() << "\n");
718 // Create intervals for live-ins to this BB first.
719 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
720 LE = MBB->livein_end(); LI != LE; ++LI) {
721 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
722 // Multiple live-ins can alias the same register.
723 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
724 if (!hasInterval(*AS))
725 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
729 // Skip over empty initial indices.
730 if (getInstructionFromIndex(MIIndex) == 0)
731 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
733 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
735 DEBUG(dbgs() << MIIndex << "\t" << *MI);
736 if (MI->isDebugValue())
740 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
741 MachineOperand &MO = MI->getOperand(i);
742 if (!MO.isReg() || !MO.getReg())
745 // handle register defs - build intervals
747 handleRegisterDef(MBB, MI, MIIndex, MO, i);
748 else if (MO.isUndef())
749 UndefUses.push_back(MO.getReg());
752 // Move to the next instr slot.
753 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
757 // Create empty intervals for registers defined by implicit_def's (except
758 // for those implicit_def that define values which are liveout of their
760 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
761 unsigned UndefReg = UndefUses[i];
762 (void)getOrCreateInterval(UndefReg);
766 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
767 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
768 return new LiveInterval(reg, Weight);
771 /// dupInterval - Duplicate a live interval. The caller is responsible for
772 /// managing the allocated memory.
773 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
774 LiveInterval *NewLI = createInterval(li->reg);
775 NewLI->Copy(*li, mri_, getVNInfoAllocator());
779 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
780 /// copy field and returns the source register that defines it.
781 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
785 if (VNI->getCopy()->isExtractSubreg()) {
786 // If it's extracting out of a physical register, return the sub-register.
787 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
788 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
789 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
790 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
791 if (SrcSubReg == DstSubReg)
792 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
793 // reg1034 can still be coalesced to EDX.
795 assert(DstSubReg == 0);
796 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
799 } else if (VNI->getCopy()->isInsertSubreg() ||
800 VNI->getCopy()->isSubregToReg())
801 return VNI->getCopy()->getOperand(2).getReg();
803 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
804 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
806 llvm_unreachable("Unrecognized copy instruction!");
810 //===----------------------------------------------------------------------===//
811 // Register allocator hooks.
814 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
815 /// allow one) virtual register operand, then its uses are implicitly using
816 /// the register. Returns the virtual register.
817 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
818 MachineInstr *MI) const {
820 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
821 MachineOperand &MO = MI->getOperand(i);
822 if (!MO.isReg() || !MO.isUse())
824 unsigned Reg = MO.getReg();
825 if (Reg == 0 || Reg == li.reg)
828 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
829 !allocatableRegs_[Reg])
831 // FIXME: For now, only remat MI with at most one register operand.
833 "Can't rematerialize instruction with multiple register operand!");
842 /// isValNoAvailableAt - Return true if the val# of the specified interval
843 /// which reaches the given instruction also reaches the specified use index.
844 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
845 SlotIndex UseIdx) const {
846 SlotIndex Index = getInstructionIndex(MI);
847 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
848 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
849 return UI != li.end() && UI->valno == ValNo;
852 /// isReMaterializable - Returns true if the definition MI of the specified
853 /// val# of the specified interval is re-materializable.
854 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855 const VNInfo *ValNo, MachineInstr *MI,
856 SmallVectorImpl<LiveInterval*> &SpillIs,
861 if (!tii_->isTriviallyReMaterializable(MI, aa_))
864 // Target-specific code can mark an instruction as being rematerializable
865 // if it has one virtual reg use, though it had better be something like
866 // a PIC base register which is likely to be live everywhere.
867 unsigned ImpUse = getReMatImplicitUse(li, MI);
869 const LiveInterval &ImpLi = getInterval(ImpUse);
870 for (MachineRegisterInfo::use_nodbg_iterator
871 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
873 MachineInstr *UseMI = &*ri;
874 SlotIndex UseIdx = getInstructionIndex(UseMI);
875 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
877 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
881 // If a register operand of the re-materialized instruction is going to
882 // be spilled next, then it's not legal to re-materialize this instruction.
883 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
884 if (ImpUse == SpillIs[i]->reg)
890 /// isReMaterializable - Returns true if the definition MI of the specified
891 /// val# of the specified interval is re-materializable.
892 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
893 const VNInfo *ValNo, MachineInstr *MI) {
894 SmallVector<LiveInterval*, 4> Dummy1;
896 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
899 /// isReMaterializable - Returns true if every definition of MI of every
900 /// val# of the specified interval is re-materializable.
901 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
902 SmallVectorImpl<LiveInterval*> &SpillIs,
905 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
907 const VNInfo *VNI = *i;
909 continue; // Dead val#.
910 // Is the def for the val# rematerializable?
911 if (!VNI->isDefAccurate())
913 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
914 bool DefIsLoad = false;
916 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
923 /// FilterFoldedOps - Filter out two-address use operands. Return
924 /// true if it finds any issue with the operands that ought to prevent
926 static bool FilterFoldedOps(MachineInstr *MI,
927 SmallVector<unsigned, 2> &Ops,
929 SmallVector<unsigned, 2> &FoldOps) {
931 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
932 unsigned OpIdx = Ops[i];
933 MachineOperand &MO = MI->getOperand(OpIdx);
934 // FIXME: fold subreg use.
938 MRInfo |= (unsigned)VirtRegMap::isMod;
940 // Filter out two-address use operand(s).
941 if (MI->isRegTiedToDefOperand(OpIdx)) {
942 MRInfo = VirtRegMap::isModRef;
945 MRInfo |= (unsigned)VirtRegMap::isRef;
947 FoldOps.push_back(OpIdx);
953 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
954 /// slot / to reg or any rematerialized load into ith operand of specified
955 /// MI. If it is successul, MI is updated with the newly created MI and
957 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
958 VirtRegMap &vrm, MachineInstr *DefMI,
960 SmallVector<unsigned, 2> &Ops,
961 bool isSS, int Slot, unsigned Reg) {
962 // If it is an implicit def instruction, just delete it.
963 if (MI->isImplicitDef()) {
964 RemoveMachineInstrFromMaps(MI);
965 vrm.RemoveMachineInstrFromMaps(MI);
966 MI->eraseFromParent();
971 // Filter the list of operand indexes that are to be folded. Abort if
972 // any operand will prevent folding.
974 SmallVector<unsigned, 2> FoldOps;
975 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
978 // The only time it's safe to fold into a two address instruction is when
979 // it's folding reload and spill from / into a spill stack slot.
980 if (DefMI && (MRInfo & VirtRegMap::isMod))
983 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
984 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
986 // Remember this instruction uses the spill slot.
987 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
989 // Attempt to fold the memory reference into the instruction. If
990 // we can do this, we don't need to insert spill code.
991 MachineBasicBlock &MBB = *MI->getParent();
992 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
993 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
994 vrm.transferSpillPts(MI, fmi);
995 vrm.transferRestorePts(MI, fmi);
996 vrm.transferEmergencySpills(MI, fmi);
997 ReplaceMachineInstrInMaps(MI, fmi);
998 MI = MBB.insert(MBB.erase(MI), fmi);
1005 /// canFoldMemoryOperand - Returns true if the specified load / store
1006 /// folding is possible.
1007 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1008 SmallVector<unsigned, 2> &Ops,
1010 // Filter the list of operand indexes that are to be folded. Abort if
1011 // any operand will prevent folding.
1012 unsigned MRInfo = 0;
1013 SmallVector<unsigned, 2> FoldOps;
1014 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1017 // It's only legal to remat for a use, not a def.
1018 if (ReMat && (MRInfo & VirtRegMap::isMod))
1021 return tii_->canFoldMemoryOperand(MI, FoldOps);
1024 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1025 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1027 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1032 for (++itr; itr != li.ranges.end(); ++itr) {
1033 MachineBasicBlock *mbb2 =
1034 indexes_->getMBBCoveringRange(itr->start, itr->end);
1043 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1044 /// interval on to-be re-materialized operands of MI) with new register.
1045 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1046 MachineInstr *MI, unsigned NewVReg,
1048 // There is an implicit use. That means one of the other operand is
1049 // being remat'ed and the remat'ed instruction has li.reg as an
1050 // use operand. Make sure we rewrite that as well.
1051 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1052 MachineOperand &MO = MI->getOperand(i);
1055 unsigned Reg = MO.getReg();
1056 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1058 if (!vrm.isReMaterialized(Reg))
1060 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1061 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1063 UseMO->setReg(NewVReg);
1067 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1068 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1069 bool LiveIntervals::
1070 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1071 bool TrySplit, SlotIndex index, SlotIndex end,
1073 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1074 unsigned Slot, int LdSlot,
1075 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1077 const TargetRegisterClass* rc,
1078 SmallVector<int, 4> &ReMatIds,
1079 const MachineLoopInfo *loopInfo,
1080 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1081 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1082 std::vector<LiveInterval*> &NewLIs) {
1083 bool CanFold = false;
1085 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1086 MachineOperand& mop = MI->getOperand(i);
1089 unsigned Reg = mop.getReg();
1090 unsigned RegI = Reg;
1091 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1096 bool TryFold = !DefIsReMat;
1097 bool FoldSS = true; // Default behavior unless it's a remat.
1098 int FoldSlot = Slot;
1100 // If this is the rematerializable definition MI itself and
1101 // all of its uses are rematerialized, simply delete it.
1102 if (MI == ReMatOrigDefMI && CanDelete) {
1103 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1105 RemoveMachineInstrFromMaps(MI);
1106 vrm.RemoveMachineInstrFromMaps(MI);
1107 MI->eraseFromParent();
1111 // If def for this use can't be rematerialized, then try folding.
1112 // If def is rematerializable and it's a load, also try folding.
1113 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1115 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1121 // Scan all of the operands of this instruction rewriting operands
1122 // to use NewVReg instead of li.reg as appropriate. We do this for
1125 // 1. If the instr reads the same spilled vreg multiple times, we
1126 // want to reuse the NewVReg.
1127 // 2. If the instr is a two-addr instruction, we are required to
1128 // keep the src/dst regs pinned.
1130 // Keep track of whether we replace a use and/or def so that we can
1131 // create the spill interval with the appropriate range.
1133 HasUse = mop.isUse();
1134 HasDef = mop.isDef();
1135 SmallVector<unsigned, 2> Ops;
1137 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1138 const MachineOperand &MOj = MI->getOperand(j);
1141 unsigned RegJ = MOj.getReg();
1142 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1146 if (!MOj.isUndef()) {
1147 HasUse |= MOj.isUse();
1148 HasDef |= MOj.isDef();
1153 // Create a new virtual register for the spill interval.
1154 // Create the new register now so we can map the fold instruction
1155 // to the new register so when it is unfolded we get the correct
1157 bool CreatedNewVReg = false;
1159 NewVReg = mri_->createVirtualRegister(rc);
1161 CreatedNewVReg = true;
1163 // The new virtual register should get the same allocation hints as the
1165 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1166 if (Hint.first || Hint.second)
1167 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1173 // Do not fold load / store here if we are splitting. We'll find an
1174 // optimal point to insert a load / store later.
1176 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1177 Ops, FoldSS, FoldSlot, NewVReg)) {
1178 // Folding the load/store can completely change the instruction in
1179 // unpredictable ways, rescan it from the beginning.
1182 // We need to give the new vreg the same stack slot as the
1183 // spilled interval.
1184 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1190 if (isNotInMIMap(MI))
1192 goto RestartInstruction;
1195 // We'll try to fold it later if it's profitable.
1196 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1200 mop.setReg(NewVReg);
1201 if (mop.isImplicit())
1202 rewriteImplicitOps(li, MI, NewVReg, vrm);
1204 // Reuse NewVReg for other reads.
1205 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1206 MachineOperand &mopj = MI->getOperand(Ops[j]);
1207 mopj.setReg(NewVReg);
1208 if (mopj.isImplicit())
1209 rewriteImplicitOps(li, MI, NewVReg, vrm);
1212 if (CreatedNewVReg) {
1214 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1215 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1216 // Each valnum may have its own remat id.
1217 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1219 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1221 if (!CanDelete || (HasUse && HasDef)) {
1222 // If this is a two-addr instruction then its use operands are
1223 // rematerializable but its def is not. It should be assigned a
1225 vrm.assignVirt2StackSlot(NewVReg, Slot);
1228 vrm.assignVirt2StackSlot(NewVReg, Slot);
1230 } else if (HasUse && HasDef &&
1231 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1232 // If this interval hasn't been assigned a stack slot (because earlier
1233 // def is a deleted remat def), do it now.
1234 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1235 vrm.assignVirt2StackSlot(NewVReg, Slot);
1238 // Re-matting an instruction with virtual register use. Add the
1239 // register as an implicit use on the use MI.
1240 if (DefIsReMat && ImpUse)
1241 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1243 // Create a new register interval for this spill / remat.
1244 LiveInterval &nI = getOrCreateInterval(NewVReg);
1245 if (CreatedNewVReg) {
1246 NewLIs.push_back(&nI);
1247 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1249 vrm.setIsSplitFromReg(NewVReg, li.reg);
1253 if (CreatedNewVReg) {
1254 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1255 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1256 DEBUG(dbgs() << " +" << LR);
1259 // Extend the split live interval to this def / use.
1260 SlotIndex End = index.getDefIndex();
1261 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1262 nI.getValNumInfo(nI.getNumValNums()-1));
1263 DEBUG(dbgs() << " +" << LR);
1268 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1269 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1270 DEBUG(dbgs() << " +" << LR);
1275 dbgs() << "\t\t\t\tAdded new interval: ";
1276 nI.print(dbgs(), tri_);
1282 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1284 MachineBasicBlock *MBB,
1285 SlotIndex Idx) const {
1286 SlotIndex End = getMBBEndIdx(MBB);
1287 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1288 if (VNI->kills[j].isPHI())
1291 SlotIndex KillIdx = VNI->kills[j];
1292 if (KillIdx > Idx && KillIdx <= End)
1298 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1299 /// during spilling.
1301 struct RewriteInfo {
1306 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1307 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1310 struct RewriteInfoCompare {
1311 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1312 return LHS.Index < RHS.Index;
1317 void LiveIntervals::
1318 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1319 LiveInterval::Ranges::const_iterator &I,
1320 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1321 unsigned Slot, int LdSlot,
1322 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1324 const TargetRegisterClass* rc,
1325 SmallVector<int, 4> &ReMatIds,
1326 const MachineLoopInfo *loopInfo,
1327 BitVector &SpillMBBs,
1328 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1329 BitVector &RestoreMBBs,
1330 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1331 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1332 std::vector<LiveInterval*> &NewLIs) {
1333 bool AllCanFold = true;
1334 unsigned NewVReg = 0;
1335 SlotIndex start = I->start.getBaseIndex();
1336 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1338 // First collect all the def / use in this live range that will be rewritten.
1339 // Make sure they are sorted according to instruction index.
1340 std::vector<RewriteInfo> RewriteMIs;
1341 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1342 re = mri_->reg_end(); ri != re; ) {
1343 MachineInstr *MI = &*ri;
1344 MachineOperand &O = ri.getOperand();
1346 if (MI->isDebugValue()) {
1347 // Modify DBG_VALUE now that the value is in a spill slot.
1348 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1349 uint64_t Offset = MI->getOperand(1).getImm();
1350 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1351 DebugLoc DL = MI->getDebugLoc();
1352 int FI = isLoadSS ? LdSlot : (int)Slot;
1353 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1354 Offset, MDPtr, DL)) {
1355 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1356 ReplaceMachineInstrInMaps(MI, NewDV);
1357 MachineBasicBlock *MBB = MI->getParent();
1358 MBB->insert(MBB->erase(MI), NewDV);
1363 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1364 RemoveMachineInstrFromMaps(MI);
1365 vrm.RemoveMachineInstrFromMaps(MI);
1366 MI->eraseFromParent();
1369 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1370 SlotIndex index = getInstructionIndex(MI);
1371 if (index < start || index >= end)
1375 // Must be defined by an implicit def. It should not be spilled. Note,
1376 // this is for correctness reason. e.g.
1377 // 8 %reg1024<def> = IMPLICIT_DEF
1378 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1379 // The live range [12, 14) are not part of the r1024 live interval since
1380 // it's defined by an implicit def. It will not conflicts with live
1381 // interval of r1025. Now suppose both registers are spilled, you can
1382 // easily see a situation where both registers are reloaded before
1383 // the INSERT_SUBREG and both target registers that would overlap.
1385 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1387 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1389 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1390 // Now rewrite the defs and uses.
1391 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1392 RewriteInfo &rwi = RewriteMIs[i];
1394 SlotIndex index = rwi.Index;
1395 bool MIHasUse = rwi.HasUse;
1396 bool MIHasDef = rwi.HasDef;
1397 MachineInstr *MI = rwi.MI;
1398 // If MI def and/or use the same register multiple times, then there
1399 // are multiple entries.
1400 unsigned NumUses = MIHasUse;
1401 while (i != e && RewriteMIs[i].MI == MI) {
1402 assert(RewriteMIs[i].Index == index);
1403 bool isUse = RewriteMIs[i].HasUse;
1404 if (isUse) ++NumUses;
1406 MIHasDef |= RewriteMIs[i].HasDef;
1409 MachineBasicBlock *MBB = MI->getParent();
1411 if (ImpUse && MI != ReMatDefMI) {
1412 // Re-matting an instruction with virtual register use. Prevent interval
1413 // from being spilled.
1414 getInterval(ImpUse).markNotSpillable();
1417 unsigned MBBId = MBB->getNumber();
1418 unsigned ThisVReg = 0;
1420 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1421 if (NVI != MBBVRegsMap.end()) {
1422 ThisVReg = NVI->second;
1429 // It's better to start a new interval to avoid artifically
1430 // extend the new interval.
1431 if (MIHasDef && !MIHasUse) {
1432 MBBVRegsMap.erase(MBB->getNumber());
1438 bool IsNew = ThisVReg == 0;
1440 // This ends the previous live interval. If all of its def / use
1441 // can be folded, give it a low spill weight.
1442 if (NewVReg && TrySplit && AllCanFold) {
1443 LiveInterval &nI = getOrCreateInterval(NewVReg);
1450 bool HasDef = false;
1451 bool HasUse = false;
1452 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1453 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1454 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1455 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1456 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1457 if (!HasDef && !HasUse)
1460 AllCanFold &= CanFold;
1462 // Update weight of spill interval.
1463 LiveInterval &nI = getOrCreateInterval(NewVReg);
1465 // The spill weight is now infinity as it cannot be spilled again.
1466 nI.markNotSpillable();
1470 // Keep track of the last def and first use in each MBB.
1472 if (MI != ReMatOrigDefMI || !CanDelete) {
1473 bool HasKill = false;
1475 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1477 // If this is a two-address code, then this index starts a new VNInfo.
1478 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1480 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1482 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1483 SpillIdxes.find(MBBId);
1485 if (SII == SpillIdxes.end()) {
1486 std::vector<SRInfo> S;
1487 S.push_back(SRInfo(index, NewVReg, true));
1488 SpillIdxes.insert(std::make_pair(MBBId, S));
1489 } else if (SII->second.back().vreg != NewVReg) {
1490 SII->second.push_back(SRInfo(index, NewVReg, true));
1491 } else if (index > SII->second.back().index) {
1492 // If there is an earlier def and this is a two-address
1493 // instruction, then it's not possible to fold the store (which
1494 // would also fold the load).
1495 SRInfo &Info = SII->second.back();
1497 Info.canFold = !HasUse;
1499 SpillMBBs.set(MBBId);
1500 } else if (SII != SpillIdxes.end() &&
1501 SII->second.back().vreg == NewVReg &&
1502 index > SII->second.back().index) {
1503 // There is an earlier def that's not killed (must be two-address).
1504 // The spill is no longer needed.
1505 SII->second.pop_back();
1506 if (SII->second.empty()) {
1507 SpillIdxes.erase(MBBId);
1508 SpillMBBs.reset(MBBId);
1515 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1516 SpillIdxes.find(MBBId);
1517 if (SII != SpillIdxes.end() &&
1518 SII->second.back().vreg == NewVReg &&
1519 index > SII->second.back().index)
1520 // Use(s) following the last def, it's not safe to fold the spill.
1521 SII->second.back().canFold = false;
1522 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1523 RestoreIdxes.find(MBBId);
1524 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1525 // If we are splitting live intervals, only fold if it's the first
1526 // use and there isn't another use later in the MBB.
1527 RII->second.back().canFold = false;
1529 // Only need a reload if there isn't an earlier def / use.
1530 if (RII == RestoreIdxes.end()) {
1531 std::vector<SRInfo> Infos;
1532 Infos.push_back(SRInfo(index, NewVReg, true));
1533 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1535 RII->second.push_back(SRInfo(index, NewVReg, true));
1537 RestoreMBBs.set(MBBId);
1541 // Update spill weight.
1542 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1543 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1546 if (NewVReg && TrySplit && AllCanFold) {
1547 // If all of its def / use can be folded, give it a low spill weight.
1548 LiveInterval &nI = getOrCreateInterval(NewVReg);
1553 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1554 unsigned vr, BitVector &RestoreMBBs,
1555 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1556 if (!RestoreMBBs[Id])
1558 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1559 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1560 if (Restores[i].index == index &&
1561 Restores[i].vreg == vr &&
1562 Restores[i].canFold)
1567 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1568 unsigned vr, BitVector &RestoreMBBs,
1569 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1570 if (!RestoreMBBs[Id])
1572 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1573 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1574 if (Restores[i].index == index && Restores[i].vreg)
1575 Restores[i].index = SlotIndex();
1578 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1579 /// spilled and create empty intervals for their uses.
1581 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1582 const TargetRegisterClass* rc,
1583 std::vector<LiveInterval*> &NewLIs) {
1584 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1585 re = mri_->reg_end(); ri != re; ) {
1586 MachineOperand &O = ri.getOperand();
1587 MachineInstr *MI = &*ri;
1589 if (MI->isDebugValue()) {
1590 // Remove debug info for now.
1592 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1596 assert(MI->isImplicitDef() &&
1597 "Register def was not rewritten?");
1598 RemoveMachineInstrFromMaps(MI);
1599 vrm.RemoveMachineInstrFromMaps(MI);
1600 MI->eraseFromParent();
1602 // This must be an use of an implicit_def so it's not part of the live
1603 // interval. Create a new empty live interval for it.
1604 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1605 unsigned NewVReg = mri_->createVirtualRegister(rc);
1607 vrm.setIsImplicitlyDefined(NewVReg);
1608 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1609 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1610 MachineOperand &MO = MI->getOperand(i);
1611 if (MO.isReg() && MO.getReg() == li.reg) {
1621 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1622 // Limit the loop depth ridiculousness.
1623 if (loopDepth > 200)
1626 // The loop depth is used to roughly estimate the number of times the
1627 // instruction is executed. Something like 10^d is simple, but will quickly
1628 // overflow a float. This expression behaves like 10^d for small d, but is
1629 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1630 // headroom before overflow.
1631 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1633 return (isDef + isUse) * lc;
1637 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1638 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1639 normalizeSpillWeight(*NewLIs[i]);
1642 std::vector<LiveInterval*> LiveIntervals::
1643 addIntervalsForSpillsFast(const LiveInterval &li,
1644 const MachineLoopInfo *loopInfo,
1646 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1648 std::vector<LiveInterval*> added;
1650 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1653 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1658 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1660 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1661 while (RI != mri_->reg_end()) {
1662 MachineInstr* MI = &*RI;
1664 SmallVector<unsigned, 2> Indices;
1665 bool HasUse = false;
1666 bool HasDef = false;
1668 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1669 MachineOperand& mop = MI->getOperand(i);
1670 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1672 HasUse |= MI->getOperand(i).isUse();
1673 HasDef |= MI->getOperand(i).isDef();
1675 Indices.push_back(i);
1678 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1679 Indices, true, slot, li.reg)) {
1680 unsigned NewVReg = mri_->createVirtualRegister(rc);
1682 vrm.assignVirt2StackSlot(NewVReg, slot);
1684 // create a new register for this spill
1685 LiveInterval &nI = getOrCreateInterval(NewVReg);
1686 nI.markNotSpillable();
1688 // Rewrite register operands to use the new vreg.
1689 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1690 E = Indices.end(); I != E; ++I) {
1691 MI->getOperand(*I).setReg(NewVReg);
1693 if (MI->getOperand(*I).isUse())
1694 MI->getOperand(*I).setIsKill(true);
1697 // Fill in the new live interval.
1698 SlotIndex index = getInstructionIndex(MI);
1700 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1701 nI.getNextValue(SlotIndex(), 0, false,
1702 getVNInfoAllocator()));
1703 DEBUG(dbgs() << " +" << LR);
1705 vrm.addRestorePoint(NewVReg, MI);
1708 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1709 nI.getNextValue(SlotIndex(), 0, false,
1710 getVNInfoAllocator()));
1711 DEBUG(dbgs() << " +" << LR);
1713 vrm.addSpillPoint(NewVReg, true, MI);
1716 added.push_back(&nI);
1719 dbgs() << "\t\t\t\tadded new interval: ";
1726 RI = mri_->reg_begin(li.reg);
1732 std::vector<LiveInterval*> LiveIntervals::
1733 addIntervalsForSpills(const LiveInterval &li,
1734 SmallVectorImpl<LiveInterval*> &SpillIs,
1735 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1737 if (EnableFastSpilling)
1738 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1740 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1743 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1744 li.print(dbgs(), tri_);
1748 // Each bit specify whether a spill is required in the MBB.
1749 BitVector SpillMBBs(mf_->getNumBlockIDs());
1750 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1751 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1752 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1753 DenseMap<unsigned,unsigned> MBBVRegsMap;
1754 std::vector<LiveInterval*> NewLIs;
1755 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1757 unsigned NumValNums = li.getNumValNums();
1758 SmallVector<MachineInstr*, 4> ReMatDefs;
1759 ReMatDefs.resize(NumValNums, NULL);
1760 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1761 ReMatOrigDefs.resize(NumValNums, NULL);
1762 SmallVector<int, 4> ReMatIds;
1763 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1764 BitVector ReMatDelete(NumValNums);
1765 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1767 // Spilling a split live interval. It cannot be split any further. Also,
1768 // it's also guaranteed to be a single val# / range interval.
1769 if (vrm.getPreSplitReg(li.reg)) {
1770 vrm.setIsSplitFromReg(li.reg, 0);
1771 // Unset the split kill marker on the last use.
1772 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1773 if (KillIdx != SlotIndex()) {
1774 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1775 assert(KillMI && "Last use disappeared?");
1776 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1777 assert(KillOp != -1 && "Last use disappeared?");
1778 KillMI->getOperand(KillOp).setIsKill(false);
1780 vrm.removeKillPoint(li.reg);
1781 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1782 Slot = vrm.getStackSlot(li.reg);
1783 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1784 MachineInstr *ReMatDefMI = DefIsReMat ?
1785 vrm.getReMaterializedMI(li.reg) : NULL;
1787 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1788 bool isLoad = isLoadSS ||
1789 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1790 bool IsFirstRange = true;
1791 for (LiveInterval::Ranges::const_iterator
1792 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1793 // If this is a split live interval with multiple ranges, it means there
1794 // are two-address instructions that re-defined the value. Only the
1795 // first def can be rematerialized!
1797 // Note ReMatOrigDefMI has already been deleted.
1798 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1799 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1800 false, vrm, rc, ReMatIds, loopInfo,
1801 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1802 MBBVRegsMap, NewLIs);
1804 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1805 Slot, 0, false, false, false,
1806 false, vrm, rc, ReMatIds, loopInfo,
1807 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1808 MBBVRegsMap, NewLIs);
1810 IsFirstRange = false;
1813 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1814 normalizeSpillWeights(NewLIs);
1818 bool TrySplit = !intervalIsInOneMBB(li);
1821 bool NeedStackSlot = false;
1822 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1824 const VNInfo *VNI = *i;
1825 unsigned VN = VNI->id;
1826 if (VNI->isUnused())
1827 continue; // Dead val#.
1828 // Is the def for the val# rematerializable?
1829 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1830 ? getInstructionFromIndex(VNI->def) : 0;
1832 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1833 // Remember how to remat the def of this val#.
1834 ReMatOrigDefs[VN] = ReMatDefMI;
1835 // Original def may be modified so we have to make a copy here.
1836 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1837 CloneMIs.push_back(Clone);
1838 ReMatDefs[VN] = Clone;
1840 bool CanDelete = true;
1841 if (VNI->hasPHIKill()) {
1842 // A kill is a phi node, not all of its uses can be rematerialized.
1843 // It must not be deleted.
1845 // Need a stack slot if there is any live range where uses cannot be
1847 NeedStackSlot = true;
1850 ReMatDelete.set(VN);
1852 // Need a stack slot if there is any live range where uses cannot be
1854 NeedStackSlot = true;
1858 // One stack slot per live interval.
1859 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1860 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1861 Slot = vrm.assignVirt2StackSlot(li.reg);
1863 // This case only occurs when the prealloc splitter has already assigned
1864 // a stack slot to this vreg.
1866 Slot = vrm.getStackSlot(li.reg);
1869 // Create new intervals and rewrite defs and uses.
1870 for (LiveInterval::Ranges::const_iterator
1871 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1872 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1873 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1874 bool DefIsReMat = ReMatDefMI != NULL;
1875 bool CanDelete = ReMatDelete[I->valno->id];
1877 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1878 bool isLoad = isLoadSS ||
1879 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1880 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1881 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1882 CanDelete, vrm, rc, ReMatIds, loopInfo,
1883 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1884 MBBVRegsMap, NewLIs);
1887 // Insert spills / restores if we are splitting.
1889 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1890 normalizeSpillWeights(NewLIs);
1894 SmallPtrSet<LiveInterval*, 4> AddedKill;
1895 SmallVector<unsigned, 2> Ops;
1896 if (NeedStackSlot) {
1897 int Id = SpillMBBs.find_first();
1899 std::vector<SRInfo> &spills = SpillIdxes[Id];
1900 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1901 SlotIndex index = spills[i].index;
1902 unsigned VReg = spills[i].vreg;
1903 LiveInterval &nI = getOrCreateInterval(VReg);
1904 bool isReMat = vrm.isReMaterialized(VReg);
1905 MachineInstr *MI = getInstructionFromIndex(index);
1906 bool CanFold = false;
1907 bool FoundUse = false;
1909 if (spills[i].canFold) {
1911 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1912 MachineOperand &MO = MI->getOperand(j);
1913 if (!MO.isReg() || MO.getReg() != VReg)
1920 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1921 RestoreMBBs, RestoreIdxes))) {
1922 // MI has two-address uses of the same register. If the use
1923 // isn't the first and only use in the BB, then we can't fold
1924 // it. FIXME: Move this to rewriteInstructionsForSpills.
1931 // Fold the store into the def if possible.
1932 bool Folded = false;
1933 if (CanFold && !Ops.empty()) {
1934 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1937 // Also folded uses, do not issue a load.
1938 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1939 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1941 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1945 // Otherwise tell the spiller to issue a spill.
1947 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1948 bool isKill = LR->end == index.getStoreIndex();
1949 if (!MI->registerDefIsDead(nI.reg))
1950 // No need to spill a dead def.
1951 vrm.addSpillPoint(VReg, isKill, MI);
1953 AddedKill.insert(&nI);
1956 Id = SpillMBBs.find_next(Id);
1960 int Id = RestoreMBBs.find_first();
1962 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1963 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1964 SlotIndex index = restores[i].index;
1965 if (index == SlotIndex())
1967 unsigned VReg = restores[i].vreg;
1968 LiveInterval &nI = getOrCreateInterval(VReg);
1969 bool isReMat = vrm.isReMaterialized(VReg);
1970 MachineInstr *MI = getInstructionFromIndex(index);
1971 bool CanFold = false;
1973 if (restores[i].canFold) {
1975 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1976 MachineOperand &MO = MI->getOperand(j);
1977 if (!MO.isReg() || MO.getReg() != VReg)
1981 // If this restore were to be folded, it would have been folded
1990 // Fold the load into the use if possible.
1991 bool Folded = false;
1992 if (CanFold && !Ops.empty()) {
1994 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1996 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1998 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1999 // If the rematerializable def is a load, also try to fold it.
2000 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2001 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2002 Ops, isLoadSS, LdSlot, VReg);
2004 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2006 // Re-matting an instruction with virtual register use. Add the
2007 // register as an implicit use on the use MI and mark the register
2008 // interval as unspillable.
2009 LiveInterval &ImpLi = getInterval(ImpUse);
2010 ImpLi.markNotSpillable();
2011 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2016 // If folding is not possible / failed, then tell the spiller to issue a
2017 // load / rematerialization for us.
2019 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2021 vrm.addRestorePoint(VReg, MI);
2023 Id = RestoreMBBs.find_next(Id);
2026 // Finalize intervals: add kills, finalize spill weights, and filter out
2028 std::vector<LiveInterval*> RetNewLIs;
2029 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2030 LiveInterval *LI = NewLIs[i];
2032 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2033 if (!AddedKill.count(LI)) {
2034 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2035 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2036 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2037 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2038 assert(UseIdx != -1);
2039 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2040 LastUse->getOperand(UseIdx).setIsKill();
2041 vrm.addKillPoint(LI->reg, LastUseIdx);
2044 RetNewLIs.push_back(LI);
2048 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2049 normalizeSpillWeights(RetNewLIs);
2053 /// hasAllocatableSuperReg - Return true if the specified physical register has
2054 /// any super register that's allocatable.
2055 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2056 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2057 if (allocatableRegs_[*AS] && hasInterval(*AS))
2062 /// getRepresentativeReg - Find the largest super register of the specified
2063 /// physical register.
2064 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2065 // Find the largest super-register that is allocatable.
2066 unsigned BestReg = Reg;
2067 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2068 unsigned SuperReg = *AS;
2069 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2077 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2078 /// specified interval that conflicts with the specified physical register.
2079 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2080 unsigned PhysReg) const {
2081 unsigned NumConflicts = 0;
2082 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2083 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2084 E = mri_->reg_end(); I != E; ++I) {
2085 MachineOperand &O = I.getOperand();
2086 MachineInstr *MI = O.getParent();
2087 if (MI->isDebugValue())
2089 SlotIndex Index = getInstructionIndex(MI);
2090 if (pli.liveAt(Index))
2093 return NumConflicts;
2096 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2097 /// around all defs and uses of the specified interval. Return true if it
2098 /// was able to cut its interval.
2099 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2100 unsigned PhysReg, VirtRegMap &vrm) {
2101 unsigned SpillReg = getRepresentativeReg(PhysReg);
2103 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2104 // If there are registers which alias PhysReg, but which are not a
2105 // sub-register of the chosen representative super register. Assert
2106 // since we can't handle it yet.
2107 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2108 tri_->isSuperRegister(*AS, SpillReg));
2111 SmallVector<unsigned, 4> PRegs;
2112 if (hasInterval(SpillReg))
2113 PRegs.push_back(SpillReg);
2115 SmallSet<unsigned, 4> Added;
2116 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2117 if (Added.insert(*AS) && hasInterval(*AS)) {
2118 PRegs.push_back(*AS);
2119 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2124 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2125 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2126 E = mri_->reg_end(); I != E; ++I) {
2127 MachineOperand &O = I.getOperand();
2128 MachineInstr *MI = O.getParent();
2129 if (MI->isDebugValue() || SeenMIs.count(MI))
2132 SlotIndex Index = getInstructionIndex(MI);
2133 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2134 unsigned PReg = PRegs[i];
2135 LiveInterval &pli = getInterval(PReg);
2136 if (!pli.liveAt(Index))
2138 vrm.addEmergencySpill(PReg, MI);
2139 SlotIndex StartIdx = Index.getLoadIndex();
2140 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2141 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2142 pli.removeRange(StartIdx, EndIdx);
2146 raw_string_ostream Msg(msg);
2147 Msg << "Ran out of registers during register allocation!";
2148 if (MI->isInlineAsm()) {
2149 Msg << "\nPlease check your inline asm statement for invalid "
2150 << "constraints:\n";
2151 MI->print(Msg, tm_);
2153 report_fatal_error(Msg.str());
2155 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2156 if (!hasInterval(*AS))
2158 LiveInterval &spli = getInterval(*AS);
2159 if (spli.liveAt(Index))
2160 spli.removeRange(Index.getLoadIndex(),
2161 Index.getNextIndex().getBaseIndex());
2168 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2169 MachineInstr* startInst) {
2170 LiveInterval& Interval = getOrCreateInterval(reg);
2171 VNInfo* VN = Interval.getNextValue(
2172 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2173 startInst, true, getVNInfoAllocator());
2174 VN->setHasPHIKill(true);
2175 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2177 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2178 getMBBEndIdx(startInst->getParent()), VN);
2179 Interval.addRange(LR);