1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
40 // Hidden options for help debugging.
41 static cl::opt<bool> DisableReMat("disable-rematerialization",
42 cl::init(false), cl::Hidden);
44 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45 cl::init(true), cl::Hidden);
46 static cl::opt<int> SplitLimit("split-limit",
47 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77 VNInfoAllocator.Reset();
78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
82 /// runOnMachineFunction - Register allocate the whole function
84 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86 mri_ = &mf_->getRegInfo();
87 tm_ = &fn.getTarget();
88 tri_ = tm_->getRegisterInfo();
89 tii_ = tm_->getInstrInfo();
90 lv_ = &getAnalysis<LiveVariables>();
91 allocatableRegs_ = tri_->getAllocatableSet(fn);
93 // Number MachineInstrs and MachineBasicBlocks.
94 // Initialize MBB indexes to a sentinal.
95 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
98 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
100 unsigned StartIdx = MIIndex;
102 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
104 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105 assert(inserted && "multiple MachineInstr -> index mappings");
106 i2miMap_.push_back(I);
107 MIIndex += InstrSlots::NUM;
110 // Set the MBB2IdxMap entry for this MBB.
111 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
112 ? std::make_pair(StartIdx, StartIdx) // Empty MBB
113 : std::make_pair(StartIdx, MIIndex - 1);
114 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
116 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
120 numIntervals += getNumIntervals();
122 DOUT << "********** INTERVALS **********\n";
123 for (iterator I = begin(), E = end(); I != E; ++I) {
124 I->second.print(DOUT, tri_);
128 numIntervalsAfter += getNumIntervals();
133 /// print - Implement the dump method.
134 void LiveIntervals::print(std::ostream &O, const Module* ) const {
135 O << "********** INTERVALS **********\n";
136 for (const_iterator I = begin(), E = end(); I != E; ++I) {
137 I->second.print(DOUT, tri_);
141 O << "********** MACHINEINSTRS **********\n";
142 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
143 mbbi != mbbe; ++mbbi) {
144 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
145 for (MachineBasicBlock::iterator mii = mbbi->begin(),
146 mie = mbbi->end(); mii != mie; ++mii) {
147 O << getInstructionIndex(mii) << '\t' << *mii;
152 /// conflictsWithPhysRegDef - Returns true if the specified register
153 /// is defined during the duration of the specified interval.
154 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 for (LiveInterval::Ranges::const_iterator
157 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
158 for (unsigned index = getBaseIndex(I->start),
159 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
160 index += InstrSlots::NUM) {
161 // skip deleted instructions
162 while (index != end && !getInstructionFromIndex(index))
163 index += InstrSlots::NUM;
164 if (index == end) break;
166 MachineInstr *MI = getInstructionFromIndex(index);
167 unsigned SrcReg, DstReg;
168 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
169 if (SrcReg == li.reg || DstReg == li.reg)
171 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
172 MachineOperand& mop = MI->getOperand(i);
173 if (!mop.isRegister())
175 unsigned PhysReg = mop.getReg();
176 if (PhysReg == 0 || PhysReg == li.reg)
178 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
179 if (!vrm.hasPhys(PhysReg))
181 PhysReg = vrm.getPhys(PhysReg);
183 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
192 void LiveIntervals::printRegName(unsigned reg) const {
193 if (TargetRegisterInfo::isPhysicalRegister(reg))
194 cerr << tri_->getName(reg);
196 cerr << "%reg" << reg;
199 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
200 MachineBasicBlock::iterator mi,
202 LiveInterval &interval) {
203 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
204 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
206 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
207 DOUT << "is a implicit_def\n";
211 // Virtual registers may be defined multiple times (due to phi
212 // elimination and 2-addr elimination). Much of what we do only has to be
213 // done once for the vreg. We use an empty interval to detect the first
214 // time we see a vreg.
215 if (interval.empty()) {
216 // Get the Idx of the defining instructions.
217 unsigned defIndex = getDefIndex(MIIdx);
219 MachineInstr *CopyMI = NULL;
220 unsigned SrcReg, DstReg;
221 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
222 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
223 tii_->isMoveInstr(*mi, SrcReg, DstReg))
225 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
227 assert(ValNo->id == 0 && "First value in interval is not 0?");
229 // Loop over all of the blocks that the vreg is defined in. There are
230 // two cases we have to handle here. The most common case is a vreg
231 // whose lifetime is contained within a basic block. In this case there
232 // will be a single kill, in MBB, which comes after the definition.
233 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
234 // FIXME: what about dead vars?
236 if (vi.Kills[0] != mi)
237 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
239 killIdx = defIndex+1;
241 // If the kill happens after the definition, we have an intra-block
243 if (killIdx > defIndex) {
244 assert(vi.AliveBlocks.none() &&
245 "Shouldn't be alive across any blocks!");
246 LiveRange LR(defIndex, killIdx, ValNo);
247 interval.addRange(LR);
248 DOUT << " +" << LR << "\n";
249 interval.addKill(ValNo, killIdx);
254 // The other case we handle is when a virtual register lives to the end
255 // of the defining block, potentially live across some blocks, then is
256 // live into some number of blocks, but gets killed. Start by adding a
257 // range that goes from this definition to the end of the defining block.
258 LiveRange NewLR(defIndex,
259 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
261 DOUT << " +" << NewLR;
262 interval.addRange(NewLR);
264 // Iterate over all of the blocks that the variable is completely
265 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
267 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
268 if (vi.AliveBlocks[i]) {
269 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
271 LiveRange LR(getMBBStartIdx(i),
272 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
274 interval.addRange(LR);
280 // Finally, this virtual register is live from the start of any killing
281 // block to the 'use' slot of the killing instruction.
282 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
283 MachineInstr *Kill = vi.Kills[i];
284 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
285 LiveRange LR(getMBBStartIdx(Kill->getParent()),
287 interval.addRange(LR);
288 interval.addKill(ValNo, killIdx);
293 // If this is the second time we see a virtual register definition, it
294 // must be due to phi elimination or two addr elimination. If this is
295 // the result of two address elimination, then the vreg is one of the
296 // def-and-use register operand.
297 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
298 // If this is a two-address definition, then we have already processed
299 // the live range. The only problem is that we didn't realize there
300 // are actually two values in the live interval. Because of this we
301 // need to take the LiveRegion that defines this register and split it
303 assert(interval.containsOneValue());
304 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
305 unsigned RedefIndex = getDefIndex(MIIdx);
307 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
308 VNInfo *OldValNo = OldLR->valno;
310 // Delete the initial value, which should be short and continuous,
311 // because the 2-addr copy must be in the same MBB as the redef.
312 interval.removeRange(DefIndex, RedefIndex);
314 // Two-address vregs should always only be redefined once. This means
315 // that at this point, there should be exactly one value number in it.
316 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
318 // The new value number (#1) is defined by the instruction we claimed
320 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
323 // Value#0 is now defined by the 2-addr instruction.
324 OldValNo->def = RedefIndex;
327 // Add the new live interval which replaces the range for the input copy.
328 LiveRange LR(DefIndex, RedefIndex, ValNo);
329 DOUT << " replace range with " << LR;
330 interval.addRange(LR);
331 interval.addKill(ValNo, RedefIndex);
333 // If this redefinition is dead, we need to add a dummy unit live
334 // range covering the def slot.
335 if (mi->registerDefIsDead(interval.reg, tri_))
336 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
339 interval.print(DOUT, tri_);
342 // Otherwise, this must be because of phi elimination. If this is the
343 // first redefinition of the vreg that we have seen, go back and change
344 // the live range in the PHI block to be a different value number.
345 if (interval.containsOneValue()) {
346 assert(vi.Kills.size() == 1 &&
347 "PHI elimination vreg should have one kill, the PHI itself!");
349 // Remove the old range that we now know has an incorrect number.
350 VNInfo *VNI = interval.getValNumInfo(0);
351 MachineInstr *Killer = vi.Kills[0];
352 unsigned Start = getMBBStartIdx(Killer->getParent());
353 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
354 DOUT << " Removing [" << Start << "," << End << "] from: ";
355 interval.print(DOUT, tri_); DOUT << "\n";
356 interval.removeRange(Start, End);
357 VNI->hasPHIKill = true;
358 DOUT << " RESULT: "; interval.print(DOUT, tri_);
360 // Replace the interval with one of a NEW value number. Note that this
361 // value number isn't actually defined by an instruction, weird huh? :)
362 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
363 DOUT << " replace range with " << LR;
364 interval.addRange(LR);
365 interval.addKill(LR.valno, End);
366 DOUT << " RESULT: "; interval.print(DOUT, tri_);
369 // In the case of PHI elimination, each variable definition is only
370 // live until the end of the block. We've already taken care of the
371 // rest of the live range.
372 unsigned defIndex = getDefIndex(MIIdx);
375 MachineInstr *CopyMI = NULL;
376 unsigned SrcReg, DstReg;
377 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
378 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
379 tii_->isMoveInstr(*mi, SrcReg, DstReg))
381 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
383 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
384 LiveRange LR(defIndex, killIndex, ValNo);
385 interval.addRange(LR);
386 interval.addKill(ValNo, killIndex);
387 ValNo->hasPHIKill = true;
395 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
396 MachineBasicBlock::iterator mi,
398 LiveInterval &interval,
399 MachineInstr *CopyMI) {
400 // A physical register cannot be live across basic block, so its
401 // lifetime must end somewhere in its defining basic block.
402 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
404 unsigned baseIndex = MIIdx;
405 unsigned start = getDefIndex(baseIndex);
406 unsigned end = start;
408 // If it is not used after definition, it is considered dead at
409 // the instruction defining it. Hence its interval is:
410 // [defSlot(def), defSlot(def)+1)
411 if (mi->registerDefIsDead(interval.reg, tri_)) {
413 end = getDefIndex(start) + 1;
417 // If it is not dead on definition, it must be killed by a
418 // subsequent instruction. Hence its interval is:
419 // [defSlot(def), useSlot(kill)+1)
420 while (++mi != MBB->end()) {
421 baseIndex += InstrSlots::NUM;
422 if (mi->killsRegister(interval.reg, tri_)) {
424 end = getUseIndex(baseIndex) + 1;
426 } else if (mi->modifiesRegister(interval.reg, tri_)) {
427 // Another instruction redefines the register before it is ever read.
428 // Then the register is essentially dead at the instruction that defines
429 // it. Hence its interval is:
430 // [defSlot(def), defSlot(def)+1)
432 end = getDefIndex(start) + 1;
437 // The only case we should have a dead physreg here without a killing or
438 // instruction where we know it's dead is if it is live-in to the function
440 assert(!CopyMI && "physreg was not killed in defining block!");
441 end = getDefIndex(start) + 1; // It's dead.
444 assert(start < end && "did not find end of interval?");
446 // Already exists? Extend old live interval.
447 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
448 VNInfo *ValNo = (OldLR != interval.end())
449 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
450 LiveRange LR(start, end, ValNo);
451 interval.addRange(LR);
452 interval.addKill(LR.valno, end);
453 DOUT << " +" << LR << '\n';
456 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
457 MachineBasicBlock::iterator MI,
460 if (TargetRegisterInfo::isVirtualRegister(reg))
461 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
462 else if (allocatableRegs_[reg]) {
463 MachineInstr *CopyMI = NULL;
464 unsigned SrcReg, DstReg;
465 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
466 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
467 tii_->isMoveInstr(*MI, SrcReg, DstReg))
469 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
470 // Def of a register also defines its sub-registers.
471 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
472 // If MI also modifies the sub-register explicitly, avoid processing it
473 // more than once. Do not pass in TRI here so it checks for exact match.
474 if (!MI->modifiesRegister(*AS))
475 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
479 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
481 LiveInterval &interval, bool isAlias) {
482 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
484 // Look for kills, if it reaches a def before it's killed, then it shouldn't
485 // be considered a livein.
486 MachineBasicBlock::iterator mi = MBB->begin();
487 unsigned baseIndex = MIIdx;
488 unsigned start = baseIndex;
489 unsigned end = start;
490 while (mi != MBB->end()) {
491 if (mi->killsRegister(interval.reg, tri_)) {
493 end = getUseIndex(baseIndex) + 1;
495 } else if (mi->modifiesRegister(interval.reg, tri_)) {
496 // Another instruction redefines the register before it is ever read.
497 // Then the register is essentially dead at the instruction that defines
498 // it. Hence its interval is:
499 // [defSlot(def), defSlot(def)+1)
501 end = getDefIndex(start) + 1;
505 baseIndex += InstrSlots::NUM;
510 // Live-in register might not be used at all.
514 end = getDefIndex(MIIdx) + 1;
516 DOUT << " live through";
521 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
522 interval.addRange(LR);
523 interval.addKill(LR.valno, end);
524 DOUT << " +" << LR << '\n';
527 /// computeIntervals - computes the live intervals for virtual
528 /// registers. for some ordering of the machine instructions [1,N] a
529 /// live interval is an interval [i, j) where 1 <= i <= j < N for
530 /// which a variable is live
531 void LiveIntervals::computeIntervals() {
532 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
533 << "********** Function: "
534 << ((Value*)mf_->getFunction())->getName() << '\n';
535 // Track the index of the current machine instr.
536 unsigned MIIndex = 0;
537 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
539 MachineBasicBlock *MBB = MBBI;
540 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
542 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
544 // Create intervals for live-ins to this BB first.
545 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
546 LE = MBB->livein_end(); LI != LE; ++LI) {
547 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
548 // Multiple live-ins can alias the same register.
549 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
550 if (!hasInterval(*AS))
551 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
555 for (; MI != miEnd; ++MI) {
556 DOUT << MIIndex << "\t" << *MI;
559 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
560 MachineOperand &MO = MI->getOperand(i);
561 // handle register defs - build intervals
562 if (MO.isRegister() && MO.getReg() && MO.isDef())
563 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
566 MIIndex += InstrSlots::NUM;
571 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
572 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
573 std::vector<IdxMBBPair>::const_iterator I =
574 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
577 while (I != Idx2MBBMap.end()) {
578 if (LR.end <= I->first)
580 MBBs.push_back(I->second);
588 LiveInterval LiveIntervals::createInterval(unsigned reg) {
589 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
591 return LiveInterval(reg, Weight);
594 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
595 /// copy field and returns the source register that defines it.
596 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
600 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
601 return VNI->copy->getOperand(1).getReg();
602 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
603 return VNI->copy->getOperand(2).getReg();
604 unsigned SrcReg, DstReg;
605 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
607 assert(0 && "Unrecognized copy instruction!");
611 //===----------------------------------------------------------------------===//
612 // Register allocator hooks.
615 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
616 /// allow one) virtual register operand, then its uses are implicitly using
617 /// the register. Returns the virtual register.
618 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
619 MachineInstr *MI) const {
621 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
622 MachineOperand &MO = MI->getOperand(i);
623 if (!MO.isRegister() || !MO.isUse())
625 unsigned Reg = MO.getReg();
626 if (Reg == 0 || Reg == li.reg)
628 // FIXME: For now, only remat MI with at most one register operand.
630 "Can't rematerialize instruction with multiple register operand!");
637 /// isValNoAvailableAt - Return true if the val# of the specified interval
638 /// which reaches the given instruction also reaches the specified use index.
639 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
640 unsigned UseIdx) const {
641 unsigned Index = getInstructionIndex(MI);
642 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
643 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
644 return UI != li.end() && UI->valno == ValNo;
647 /// isReMaterializable - Returns true if the definition MI of the specified
648 /// val# of the specified interval is re-materializable.
649 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
650 const VNInfo *ValNo, MachineInstr *MI,
656 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
660 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
661 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
662 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
663 // this but remember this is not safe to fold into a two-address
665 // This is a load from fixed stack slot. It can be rematerialized.
668 if (tii_->isTriviallyReMaterializable(MI)) {
669 const TargetInstrDesc &TID = MI->getDesc();
670 isLoad = TID.isSimpleLoad();
672 unsigned ImpUse = getReMatImplicitUse(li, MI);
674 const LiveInterval &ImpLi = getInterval(ImpUse);
675 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
676 re = mri_->use_end(); ri != re; ++ri) {
677 MachineInstr *UseMI = &*ri;
678 unsigned UseIdx = getInstructionIndex(UseMI);
679 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
681 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
691 /// isReMaterializable - Returns true if every definition of MI of every
692 /// val# of the specified interval is re-materializable.
693 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
695 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
697 const VNInfo *VNI = *i;
698 unsigned DefIdx = VNI->def;
700 continue; // Dead val#.
701 // Is the def for the val# rematerializable?
704 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
705 bool DefIsLoad = false;
707 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
714 /// FilterFoldedOps - Filter out two-address use operands. Return
715 /// true if it finds any issue with the operands that ought to prevent
717 static bool FilterFoldedOps(MachineInstr *MI,
718 SmallVector<unsigned, 2> &Ops,
720 SmallVector<unsigned, 2> &FoldOps) {
721 const TargetInstrDesc &TID = MI->getDesc();
724 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
725 unsigned OpIdx = Ops[i];
726 MachineOperand &MO = MI->getOperand(OpIdx);
727 // FIXME: fold subreg use.
731 MRInfo |= (unsigned)VirtRegMap::isMod;
733 // Filter out two-address use operand(s).
734 if (!MO.isImplicit() &&
735 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
736 MRInfo = VirtRegMap::isModRef;
739 MRInfo |= (unsigned)VirtRegMap::isRef;
741 FoldOps.push_back(OpIdx);
747 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
748 /// slot / to reg or any rematerialized load into ith operand of specified
749 /// MI. If it is successul, MI is updated with the newly created MI and
751 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
752 VirtRegMap &vrm, MachineInstr *DefMI,
754 SmallVector<unsigned, 2> &Ops,
755 bool isSS, int Slot, unsigned Reg) {
756 // If it is an implicit def instruction, just delete it.
757 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
758 RemoveMachineInstrFromMaps(MI);
759 vrm.RemoveMachineInstrFromMaps(MI);
760 MI->eraseFromParent();
765 // Filter the list of operand indexes that are to be folded. Abort if
766 // any operand will prevent folding.
768 SmallVector<unsigned, 2> FoldOps;
769 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
772 // The only time it's safe to fold into a two address instruction is when
773 // it's folding reload and spill from / into a spill stack slot.
774 if (DefMI && (MRInfo & VirtRegMap::isMod))
777 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
778 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
780 // Remember this instruction uses the spill slot.
781 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
783 // Attempt to fold the memory reference into the instruction. If
784 // we can do this, we don't need to insert spill code.
786 lv_->instructionChanged(MI, fmi);
788 fmi->copyKillDeadInfo(MI, tri_);
789 MachineBasicBlock &MBB = *MI->getParent();
790 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
791 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
792 vrm.transferSpillPts(MI, fmi);
793 vrm.transferRestorePts(MI, fmi);
794 vrm.transferEmergencySpills(MI, fmi);
796 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
797 mi2iMap_[fmi] = InstrIdx;
798 MI = MBB.insert(MBB.erase(MI), fmi);
805 /// canFoldMemoryOperand - Returns true if the specified load / store
806 /// folding is possible.
807 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
808 SmallVector<unsigned, 2> &Ops,
810 // Filter the list of operand indexes that are to be folded. Abort if
811 // any operand will prevent folding.
813 SmallVector<unsigned, 2> FoldOps;
814 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
817 // It's only legal to remat for a use, not a def.
818 if (ReMat && (MRInfo & VirtRegMap::isMod))
821 return tii_->canFoldMemoryOperand(MI, FoldOps);
824 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
825 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
826 for (LiveInterval::Ranges::const_iterator
827 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
828 std::vector<IdxMBBPair>::const_iterator II =
829 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
830 if (II == Idx2MBBMap.end())
832 if (I->end > II->first) // crossing a MBB.
834 MBBs.insert(II->second);
841 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
842 /// interval on to-be re-materialized operands of MI) with new register.
843 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
844 MachineInstr *MI, unsigned NewVReg,
846 // There is an implicit use. That means one of the other operand is
847 // being remat'ed and the remat'ed instruction has li.reg as an
848 // use operand. Make sure we rewrite that as well.
849 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
850 MachineOperand &MO = MI->getOperand(i);
851 if (!MO.isRegister())
853 unsigned Reg = MO.getReg();
854 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
856 if (!vrm.isReMaterialized(Reg))
858 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
859 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
861 UseMO->setReg(NewVReg);
865 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
866 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
868 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
869 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
870 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
871 unsigned Slot, int LdSlot,
872 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
874 const TargetRegisterClass* rc,
875 SmallVector<int, 4> &ReMatIds,
876 const MachineLoopInfo *loopInfo,
877 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
878 std::map<unsigned,unsigned> &MBBVRegsMap,
879 std::vector<LiveInterval*> &NewLIs) {
880 bool CanFold = false;
882 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
883 MachineOperand& mop = MI->getOperand(i);
884 if (!mop.isRegister())
886 unsigned Reg = mop.getReg();
888 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
893 bool TryFold = !DefIsReMat;
894 bool FoldSS = true; // Default behavior unless it's a remat.
897 // If this is the rematerializable definition MI itself and
898 // all of its uses are rematerialized, simply delete it.
899 if (MI == ReMatOrigDefMI && CanDelete) {
900 DOUT << "\t\t\t\tErasing re-materlizable def: ";
902 RemoveMachineInstrFromMaps(MI);
903 vrm.RemoveMachineInstrFromMaps(MI);
904 MI->eraseFromParent();
908 // If def for this use can't be rematerialized, then try folding.
909 // If def is rematerializable and it's a load, also try folding.
910 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
912 // Try fold loads (from stack slot, constant pool, etc.) into uses.
918 // Scan all of the operands of this instruction rewriting operands
919 // to use NewVReg instead of li.reg as appropriate. We do this for
922 // 1. If the instr reads the same spilled vreg multiple times, we
923 // want to reuse the NewVReg.
924 // 2. If the instr is a two-addr instruction, we are required to
925 // keep the src/dst regs pinned.
927 // Keep track of whether we replace a use and/or def so that we can
928 // create the spill interval with the appropriate range.
930 HasUse = mop.isUse();
931 HasDef = mop.isDef();
932 SmallVector<unsigned, 2> Ops;
934 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
935 const MachineOperand &MOj = MI->getOperand(j);
936 if (!MOj.isRegister())
938 unsigned RegJ = MOj.getReg();
939 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
943 HasUse |= MOj.isUse();
944 HasDef |= MOj.isDef();
949 // Do not fold load / store here if we are splitting. We'll find an
950 // optimal point to insert a load / store later.
952 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
953 Ops, FoldSS, FoldSlot, Reg)) {
954 // Folding the load/store can completely change the instruction in
955 // unpredictable ways, rescan it from the beginning.
961 goto RestartInstruction;
964 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
969 // Create a new virtual register for the spill interval.
970 bool CreatedNewVReg = false;
972 NewVReg = mri_->createVirtualRegister(rc);
974 CreatedNewVReg = true;
977 if (mop.isImplicit())
978 rewriteImplicitOps(li, MI, NewVReg, vrm);
980 // Reuse NewVReg for other reads.
981 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
982 MachineOperand &mopj = MI->getOperand(Ops[j]);
983 mopj.setReg(NewVReg);
984 if (mopj.isImplicit())
985 rewriteImplicitOps(li, MI, NewVReg, vrm);
988 if (CreatedNewVReg) {
990 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
991 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
992 // Each valnum may have its own remat id.
993 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
995 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
997 if (!CanDelete || (HasUse && HasDef)) {
998 // If this is a two-addr instruction then its use operands are
999 // rematerializable but its def is not. It should be assigned a
1001 vrm.assignVirt2StackSlot(NewVReg, Slot);
1004 vrm.assignVirt2StackSlot(NewVReg, Slot);
1006 } else if (HasUse && HasDef &&
1007 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1008 // If this interval hasn't been assigned a stack slot (because earlier
1009 // def is a deleted remat def), do it now.
1010 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1011 vrm.assignVirt2StackSlot(NewVReg, Slot);
1014 // Re-matting an instruction with virtual register use. Add the
1015 // register as an implicit use on the use MI.
1016 if (DefIsReMat && ImpUse)
1017 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1019 // create a new register interval for this spill / remat.
1020 LiveInterval &nI = getOrCreateInterval(NewVReg);
1021 if (CreatedNewVReg) {
1022 NewLIs.push_back(&nI);
1023 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1025 vrm.setIsSplitFromReg(NewVReg, li.reg);
1029 if (CreatedNewVReg) {
1030 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1031 nI.getNextValue(~0U, 0, VNInfoAllocator));
1035 // Extend the split live interval to this def / use.
1036 unsigned End = getUseIndex(index)+1;
1037 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1038 nI.getValNumInfo(nI.getNumValNums()-1));
1044 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1045 nI.getNextValue(~0U, 0, VNInfoAllocator));
1050 DOUT << "\t\t\t\tAdded new interval: ";
1051 nI.print(DOUT, tri_);
1056 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1058 MachineBasicBlock *MBB, unsigned Idx) const {
1059 unsigned End = getMBBEndIdx(MBB);
1060 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1061 unsigned KillIdx = VNI->kills[j];
1062 if (KillIdx > Idx && KillIdx < End)
1068 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1069 const VNInfo *VNI = NULL;
1070 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1071 e = li.vni_end(); i != e; ++i)
1072 if ((*i)->def == DefIdx) {
1079 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1080 /// during spilling.
1081 struct RewriteInfo {
1086 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1087 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1090 struct RewriteInfoCompare {
1091 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1092 return LHS.Index < RHS.Index;
1096 void LiveIntervals::
1097 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1098 LiveInterval::Ranges::const_iterator &I,
1099 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1100 unsigned Slot, int LdSlot,
1101 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1103 const TargetRegisterClass* rc,
1104 SmallVector<int, 4> &ReMatIds,
1105 const MachineLoopInfo *loopInfo,
1106 BitVector &SpillMBBs,
1107 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1108 BitVector &RestoreMBBs,
1109 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1110 std::map<unsigned,unsigned> &MBBVRegsMap,
1111 std::vector<LiveInterval*> &NewLIs) {
1112 bool AllCanFold = true;
1113 unsigned NewVReg = 0;
1114 unsigned start = getBaseIndex(I->start);
1115 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1117 // First collect all the def / use in this live range that will be rewritten.
1118 // Make sure they are sorted according to instruction index.
1119 std::vector<RewriteInfo> RewriteMIs;
1120 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1121 re = mri_->reg_end(); ri != re; ) {
1122 MachineInstr *MI = &*ri;
1123 MachineOperand &O = ri.getOperand();
1125 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1126 unsigned index = getInstructionIndex(MI);
1127 if (index < start || index >= end)
1129 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1131 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1133 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1134 // Now rewrite the defs and uses.
1135 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1136 RewriteInfo &rwi = RewriteMIs[i];
1138 unsigned index = rwi.Index;
1139 bool MIHasUse = rwi.HasUse;
1140 bool MIHasDef = rwi.HasDef;
1141 MachineInstr *MI = rwi.MI;
1142 // If MI def and/or use the same register multiple times, then there
1143 // are multiple entries.
1144 unsigned NumUses = MIHasUse;
1145 while (i != e && RewriteMIs[i].MI == MI) {
1146 assert(RewriteMIs[i].Index == index);
1147 bool isUse = RewriteMIs[i].HasUse;
1148 if (isUse) ++NumUses;
1150 MIHasDef |= RewriteMIs[i].HasDef;
1153 MachineBasicBlock *MBB = MI->getParent();
1155 if (ImpUse && MI != ReMatDefMI) {
1156 // Re-matting an instruction with virtual register use. Update the
1157 // register interval's spill weight to HUGE_VALF to prevent it from
1159 LiveInterval &ImpLi = getInterval(ImpUse);
1160 ImpLi.weight = HUGE_VALF;
1163 unsigned MBBId = MBB->getNumber();
1164 unsigned ThisVReg = 0;
1166 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1167 if (NVI != MBBVRegsMap.end()) {
1168 ThisVReg = NVI->second;
1175 // It's better to start a new interval to avoid artifically
1176 // extend the new interval.
1177 if (MIHasDef && !MIHasUse) {
1178 MBBVRegsMap.erase(MBB->getNumber());
1184 bool IsNew = ThisVReg == 0;
1186 // This ends the previous live interval. If all of its def / use
1187 // can be folded, give it a low spill weight.
1188 if (NewVReg && TrySplit && AllCanFold) {
1189 LiveInterval &nI = getOrCreateInterval(NewVReg);
1196 bool HasDef = false;
1197 bool HasUse = false;
1198 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1199 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1200 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1201 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1202 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1203 if (!HasDef && !HasUse)
1206 AllCanFold &= CanFold;
1208 // Update weight of spill interval.
1209 LiveInterval &nI = getOrCreateInterval(NewVReg);
1211 // The spill weight is now infinity as it cannot be spilled again.
1212 nI.weight = HUGE_VALF;
1216 // Keep track of the last def and first use in each MBB.
1218 if (MI != ReMatOrigDefMI || !CanDelete) {
1219 bool HasKill = false;
1221 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1223 // If this is a two-address code, then this index starts a new VNInfo.
1224 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1226 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1228 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1229 SpillIdxes.find(MBBId);
1231 if (SII == SpillIdxes.end()) {
1232 std::vector<SRInfo> S;
1233 S.push_back(SRInfo(index, NewVReg, true));
1234 SpillIdxes.insert(std::make_pair(MBBId, S));
1235 } else if (SII->second.back().vreg != NewVReg) {
1236 SII->second.push_back(SRInfo(index, NewVReg, true));
1237 } else if ((int)index > SII->second.back().index) {
1238 // If there is an earlier def and this is a two-address
1239 // instruction, then it's not possible to fold the store (which
1240 // would also fold the load).
1241 SRInfo &Info = SII->second.back();
1243 Info.canFold = !HasUse;
1245 SpillMBBs.set(MBBId);
1246 } else if (SII != SpillIdxes.end() &&
1247 SII->second.back().vreg == NewVReg &&
1248 (int)index > SII->second.back().index) {
1249 // There is an earlier def that's not killed (must be two-address).
1250 // The spill is no longer needed.
1251 SII->second.pop_back();
1252 if (SII->second.empty()) {
1253 SpillIdxes.erase(MBBId);
1254 SpillMBBs.reset(MBBId);
1261 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1262 SpillIdxes.find(MBBId);
1263 if (SII != SpillIdxes.end() &&
1264 SII->second.back().vreg == NewVReg &&
1265 (int)index > SII->second.back().index)
1266 // Use(s) following the last def, it's not safe to fold the spill.
1267 SII->second.back().canFold = false;
1268 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1269 RestoreIdxes.find(MBBId);
1270 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1271 // If we are splitting live intervals, only fold if it's the first
1272 // use and there isn't another use later in the MBB.
1273 RII->second.back().canFold = false;
1275 // Only need a reload if there isn't an earlier def / use.
1276 if (RII == RestoreIdxes.end()) {
1277 std::vector<SRInfo> Infos;
1278 Infos.push_back(SRInfo(index, NewVReg, true));
1279 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1281 RII->second.push_back(SRInfo(index, NewVReg, true));
1283 RestoreMBBs.set(MBBId);
1287 // Update spill weight.
1288 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1289 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1292 if (NewVReg && TrySplit && AllCanFold) {
1293 // If all of its def / use can be folded, give it a low spill weight.
1294 LiveInterval &nI = getOrCreateInterval(NewVReg);
1299 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1300 BitVector &RestoreMBBs,
1301 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1302 if (!RestoreMBBs[Id])
1304 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1305 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1306 if (Restores[i].index == index &&
1307 Restores[i].vreg == vr &&
1308 Restores[i].canFold)
1313 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1314 BitVector &RestoreMBBs,
1315 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1316 if (!RestoreMBBs[Id])
1318 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1319 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1320 if (Restores[i].index == index && Restores[i].vreg)
1321 Restores[i].index = -1;
1324 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1325 /// spilled and create empty intervals for their uses.
1327 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1328 const TargetRegisterClass* rc,
1329 std::vector<LiveInterval*> &NewLIs) {
1330 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1331 re = mri_->reg_end(); ri != re; ) {
1332 MachineOperand &O = ri.getOperand();
1333 MachineInstr *MI = &*ri;
1336 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1337 "Register def was not rewritten?");
1338 RemoveMachineInstrFromMaps(MI);
1339 vrm.RemoveMachineInstrFromMaps(MI);
1340 MI->eraseFromParent();
1342 // This must be an use of an implicit_def so it's not part of the live
1343 // interval. Create a new empty live interval for it.
1344 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1345 unsigned NewVReg = mri_->createVirtualRegister(rc);
1347 vrm.setIsImplicitlyDefined(NewVReg);
1348 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1349 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1350 MachineOperand &MO = MI->getOperand(i);
1351 if (MO.isReg() && MO.getReg() == li.reg)
1359 std::vector<LiveInterval*> LiveIntervals::
1360 addIntervalsForSpills(const LiveInterval &li,
1361 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1362 // Since this is called after the analysis is done we don't know if
1363 // LiveVariables is available
1364 lv_ = getAnalysisToUpdate<LiveVariables>();
1366 assert(li.weight != HUGE_VALF &&
1367 "attempt to spill already spilled interval!");
1369 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1370 li.print(DOUT, tri_);
1373 // Each bit specify whether it a spill is required in the MBB.
1374 BitVector SpillMBBs(mf_->getNumBlockIDs());
1375 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1376 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1377 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1378 std::map<unsigned,unsigned> MBBVRegsMap;
1379 std::vector<LiveInterval*> NewLIs;
1380 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1382 unsigned NumValNums = li.getNumValNums();
1383 SmallVector<MachineInstr*, 4> ReMatDefs;
1384 ReMatDefs.resize(NumValNums, NULL);
1385 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1386 ReMatOrigDefs.resize(NumValNums, NULL);
1387 SmallVector<int, 4> ReMatIds;
1388 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1389 BitVector ReMatDelete(NumValNums);
1390 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1392 // Spilling a split live interval. It cannot be split any further. Also,
1393 // it's also guaranteed to be a single val# / range interval.
1394 if (vrm.getPreSplitReg(li.reg)) {
1395 vrm.setIsSplitFromReg(li.reg, 0);
1396 // Unset the split kill marker on the last use.
1397 unsigned KillIdx = vrm.getKillPoint(li.reg);
1399 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1400 assert(KillMI && "Last use disappeared?");
1401 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1402 assert(KillOp != -1 && "Last use disappeared?");
1403 KillMI->getOperand(KillOp).setIsKill(false);
1405 vrm.removeKillPoint(li.reg);
1406 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1407 Slot = vrm.getStackSlot(li.reg);
1408 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1409 MachineInstr *ReMatDefMI = DefIsReMat ?
1410 vrm.getReMaterializedMI(li.reg) : NULL;
1412 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1413 bool isLoad = isLoadSS ||
1414 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1415 bool IsFirstRange = true;
1416 for (LiveInterval::Ranges::const_iterator
1417 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1418 // If this is a split live interval with multiple ranges, it means there
1419 // are two-address instructions that re-defined the value. Only the
1420 // first def can be rematerialized!
1422 // Note ReMatOrigDefMI has already been deleted.
1423 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1424 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1425 false, vrm, rc, ReMatIds, loopInfo,
1426 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1427 MBBVRegsMap, NewLIs);
1429 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1430 Slot, 0, false, false, false,
1431 false, vrm, rc, ReMatIds, loopInfo,
1432 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1433 MBBVRegsMap, NewLIs);
1435 IsFirstRange = false;
1438 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1442 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1443 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1447 bool NeedStackSlot = false;
1448 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1450 const VNInfo *VNI = *i;
1451 unsigned VN = VNI->id;
1452 unsigned DefIdx = VNI->def;
1454 continue; // Dead val#.
1455 // Is the def for the val# rematerializable?
1456 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1457 ? 0 : getInstructionFromIndex(DefIdx);
1459 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1460 // Remember how to remat the def of this val#.
1461 ReMatOrigDefs[VN] = ReMatDefMI;
1462 // Original def may be modified so we have to make a copy here. vrm must
1464 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1466 bool CanDelete = true;
1467 if (VNI->hasPHIKill) {
1468 // A kill is a phi node, not all of its uses can be rematerialized.
1469 // It must not be deleted.
1471 // Need a stack slot if there is any live range where uses cannot be
1473 NeedStackSlot = true;
1476 ReMatDelete.set(VN);
1478 // Need a stack slot if there is any live range where uses cannot be
1480 NeedStackSlot = true;
1484 // One stack slot per live interval.
1485 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1486 Slot = vrm.assignVirt2StackSlot(li.reg);
1488 // Create new intervals and rewrite defs and uses.
1489 for (LiveInterval::Ranges::const_iterator
1490 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1491 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1492 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1493 bool DefIsReMat = ReMatDefMI != NULL;
1494 bool CanDelete = ReMatDelete[I->valno->id];
1496 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1497 bool isLoad = isLoadSS ||
1498 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1499 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1500 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1501 CanDelete, vrm, rc, ReMatIds, loopInfo,
1502 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1503 MBBVRegsMap, NewLIs);
1506 // Insert spills / restores if we are splitting.
1508 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1512 SmallPtrSet<LiveInterval*, 4> AddedKill;
1513 SmallVector<unsigned, 2> Ops;
1514 if (NeedStackSlot) {
1515 int Id = SpillMBBs.find_first();
1517 std::vector<SRInfo> &spills = SpillIdxes[Id];
1518 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1519 int index = spills[i].index;
1520 unsigned VReg = spills[i].vreg;
1521 LiveInterval &nI = getOrCreateInterval(VReg);
1522 bool isReMat = vrm.isReMaterialized(VReg);
1523 MachineInstr *MI = getInstructionFromIndex(index);
1524 bool CanFold = false;
1525 bool FoundUse = false;
1527 if (spills[i].canFold) {
1529 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1530 MachineOperand &MO = MI->getOperand(j);
1531 if (!MO.isRegister() || MO.getReg() != VReg)
1538 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1539 RestoreMBBs, RestoreIdxes))) {
1540 // MI has two-address uses of the same register. If the use
1541 // isn't the first and only use in the BB, then we can't fold
1542 // it. FIXME: Move this to rewriteInstructionsForSpills.
1549 // Fold the store into the def if possible.
1550 bool Folded = false;
1551 if (CanFold && !Ops.empty()) {
1552 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1555 // Also folded uses, do not issue a load.
1556 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1557 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1559 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1563 // Otherwise tell the spiller to issue a spill.
1565 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1566 bool isKill = LR->end == getStoreIndex(index);
1567 vrm.addSpillPoint(VReg, isKill, MI);
1569 AddedKill.insert(&nI);
1572 Id = SpillMBBs.find_next(Id);
1576 int Id = RestoreMBBs.find_first();
1578 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1579 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1580 int index = restores[i].index;
1583 unsigned VReg = restores[i].vreg;
1584 LiveInterval &nI = getOrCreateInterval(VReg);
1585 MachineInstr *MI = getInstructionFromIndex(index);
1586 bool CanFold = false;
1588 if (restores[i].canFold) {
1590 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1591 MachineOperand &MO = MI->getOperand(j);
1592 if (!MO.isRegister() || MO.getReg() != VReg)
1596 // If this restore were to be folded, it would have been folded
1605 // Fold the load into the use if possible.
1606 bool Folded = false;
1607 if (CanFold && !Ops.empty()) {
1608 if (!vrm.isReMaterialized(VReg))
1609 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1611 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1613 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1614 // If the rematerializable def is a load, also try to fold it.
1615 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1616 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1617 Ops, isLoadSS, LdSlot, VReg);
1618 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1620 // Re-matting an instruction with virtual register use. Add the
1621 // register as an implicit use on the use MI and update the register
1622 // interval's spill weight to HUGE_VALF to prevent it from being
1624 LiveInterval &ImpLi = getInterval(ImpUse);
1625 ImpLi.weight = HUGE_VALF;
1626 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1630 // If folding is not possible / failed, then tell the spiller to issue a
1631 // load / rematerialization for us.
1633 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1635 vrm.addRestorePoint(VReg, MI);
1637 Id = RestoreMBBs.find_next(Id);
1640 // Finalize intervals: add kills, finalize spill weights, and filter out
1642 std::vector<LiveInterval*> RetNewLIs;
1643 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1644 LiveInterval *LI = NewLIs[i];
1646 LI->weight /= LI->getSize();
1647 if (!AddedKill.count(LI)) {
1648 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1649 unsigned LastUseIdx = getBaseIndex(LR->end);
1650 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1651 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1652 assert(UseIdx != -1);
1653 if (LastUse->getOperand(UseIdx).isImplicit() ||
1654 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1655 LastUse->getOperand(UseIdx).setIsKill();
1656 vrm.addKillPoint(LI->reg, LastUseIdx);
1659 RetNewLIs.push_back(LI);
1663 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1667 /// hasAllocatableSuperReg - Return true if the specified physical register has
1668 /// any super register that's allocatable.
1669 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1670 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1671 if (allocatableRegs_[*AS] && hasInterval(*AS))
1676 /// getRepresentativeReg - Find the largest super register of the specified
1677 /// physical register.
1678 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1679 // Find the largest super-register that is allocatable.
1680 unsigned BestReg = Reg;
1681 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1682 unsigned SuperReg = *AS;
1683 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1691 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1692 /// specified interval that conflicts with the specified physical register.
1693 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1694 unsigned PhysReg) const {
1695 unsigned NumConflicts = 0;
1696 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1697 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1698 E = mri_->reg_end(); I != E; ++I) {
1699 MachineOperand &O = I.getOperand();
1700 MachineInstr *MI = O.getParent();
1701 unsigned Index = getInstructionIndex(MI);
1702 if (pli.liveAt(Index))
1705 return NumConflicts;
1708 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1709 /// around all defs and uses of the specified interval.
1710 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1711 unsigned PhysReg, VirtRegMap &vrm) {
1712 unsigned SpillReg = getRepresentativeReg(PhysReg);
1714 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1715 // If there are registers which alias PhysReg, but which are not a
1716 // sub-register of the chosen representative super register. Assert
1717 // since we can't handle it yet.
1718 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1719 tri_->isSuperRegister(*AS, SpillReg));
1721 LiveInterval &pli = getInterval(SpillReg);
1722 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1723 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1724 E = mri_->reg_end(); I != E; ++I) {
1725 MachineOperand &O = I.getOperand();
1726 MachineInstr *MI = O.getParent();
1727 if (SeenMIs.count(MI))
1730 unsigned Index = getInstructionIndex(MI);
1731 if (pli.liveAt(Index)) {
1732 vrm.addEmergencySpill(SpillReg, MI);
1733 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1734 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1735 if (!hasInterval(*AS))
1737 LiveInterval &spli = getInterval(*AS);
1738 if (spli.liveAt(Index))
1739 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);