1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
40 // Hidden options for help debugging.
41 cl::opt<bool> DisableReMat("disable-rematerialization",
42 cl::init(false), cl::Hidden);
44 cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45 cl::init(true), cl::Hidden);
46 cl::opt<int> SplitLimit("split-limit",
47 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77 VNInfoAllocator.Reset();
78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
82 /// runOnMachineFunction - Register allocate the whole function
84 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86 tm_ = &fn.getTarget();
87 tri_ = tm_->getRegisterInfo();
88 tii_ = tm_->getInstrInfo();
89 lv_ = &getAnalysis<LiveVariables>();
90 allocatableRegs_ = tri_->getAllocatableSet(fn);
92 // Number MachineInstrs and MachineBasicBlocks.
93 // Initialize MBB indexes to a sentinal.
94 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
97 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99 unsigned StartIdx = MIIndex;
101 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
104 assert(inserted && "multiple MachineInstr -> index mappings");
105 i2miMap_.push_back(I);
106 MIIndex += InstrSlots::NUM;
109 // Set the MBB2IdxMap entry for this MBB.
110 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
111 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
117 numIntervals += getNumIntervals();
119 DOUT << "********** INTERVALS **********\n";
120 for (iterator I = begin(), E = end(); I != E; ++I) {
121 I->second.print(DOUT, tri_);
125 numIntervalsAfter += getNumIntervals();
130 /// print - Implement the dump method.
131 void LiveIntervals::print(std::ostream &O, const Module* ) const {
132 O << "********** INTERVALS **********\n";
133 for (const_iterator I = begin(), E = end(); I != E; ++I) {
134 I->second.print(DOUT, tri_);
138 O << "********** MACHINEINSTRS **********\n";
139 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
140 mbbi != mbbe; ++mbbi) {
141 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
142 for (MachineBasicBlock::iterator mii = mbbi->begin(),
143 mie = mbbi->end(); mii != mie; ++mii) {
144 O << getInstructionIndex(mii) << '\t' << *mii;
149 /// conflictsWithPhysRegDef - Returns true if the specified register
150 /// is defined during the duration of the specified interval.
151 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 for (LiveInterval::Ranges::const_iterator
154 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
155 for (unsigned index = getBaseIndex(I->start),
156 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
157 index += InstrSlots::NUM) {
158 // skip deleted instructions
159 while (index != end && !getInstructionFromIndex(index))
160 index += InstrSlots::NUM;
161 if (index == end) break;
163 MachineInstr *MI = getInstructionFromIndex(index);
164 unsigned SrcReg, DstReg;
165 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
166 if (SrcReg == li.reg || DstReg == li.reg)
168 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
169 MachineOperand& mop = MI->getOperand(i);
170 if (!mop.isRegister())
172 unsigned PhysReg = mop.getReg();
173 if (PhysReg == 0 || PhysReg == li.reg)
175 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
176 if (!vrm.hasPhys(PhysReg))
178 PhysReg = vrm.getPhys(PhysReg);
180 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
189 void LiveIntervals::printRegName(unsigned reg) const {
190 if (TargetRegisterInfo::isPhysicalRegister(reg))
191 cerr << tri_->getName(reg);
193 cerr << "%reg" << reg;
196 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
197 MachineBasicBlock::iterator mi,
199 LiveInterval &interval) {
200 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
201 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203 // Virtual registers may be defined multiple times (due to phi
204 // elimination and 2-addr elimination). Much of what we do only has to be
205 // done once for the vreg. We use an empty interval to detect the first
206 // time we see a vreg.
207 if (interval.empty()) {
208 // Get the Idx of the defining instructions.
209 unsigned defIndex = getDefIndex(MIIdx);
211 MachineInstr *CopyMI = NULL;
212 unsigned SrcReg, DstReg;
213 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
214 tii_->isMoveInstr(*mi, SrcReg, DstReg))
216 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218 assert(ValNo->id == 0 && "First value in interval is not 0?");
220 // Loop over all of the blocks that the vreg is defined in. There are
221 // two cases we have to handle here. The most common case is a vreg
222 // whose lifetime is contained within a basic block. In this case there
223 // will be a single kill, in MBB, which comes after the definition.
224 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
225 // FIXME: what about dead vars?
227 if (vi.Kills[0] != mi)
228 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230 killIdx = defIndex+1;
232 // If the kill happens after the definition, we have an intra-block
234 if (killIdx > defIndex) {
235 assert(vi.AliveBlocks.none() &&
236 "Shouldn't be alive across any blocks!");
237 LiveRange LR(defIndex, killIdx, ValNo);
238 interval.addRange(LR);
239 DOUT << " +" << LR << "\n";
240 interval.addKill(ValNo, killIdx);
245 // The other case we handle is when a virtual register lives to the end
246 // of the defining block, potentially live across some blocks, then is
247 // live into some number of blocks, but gets killed. Start by adding a
248 // range that goes from this definition to the end of the defining block.
249 LiveRange NewLR(defIndex,
250 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252 DOUT << " +" << NewLR;
253 interval.addRange(NewLR);
255 // Iterate over all of the blocks that the variable is completely
256 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
259 if (vi.AliveBlocks[i]) {
260 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262 LiveRange LR(getMBBStartIdx(i),
263 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265 interval.addRange(LR);
271 // Finally, this virtual register is live from the start of any killing
272 // block to the 'use' slot of the killing instruction.
273 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
274 MachineInstr *Kill = vi.Kills[i];
275 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
276 LiveRange LR(getMBBStartIdx(Kill->getParent()),
278 interval.addRange(LR);
279 interval.addKill(ValNo, killIdx);
284 // If this is the second time we see a virtual register definition, it
285 // must be due to phi elimination or two addr elimination. If this is
286 // the result of two address elimination, then the vreg is one of the
287 // def-and-use register operand.
288 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
289 // If this is a two-address definition, then we have already processed
290 // the live range. The only problem is that we didn't realize there
291 // are actually two values in the live interval. Because of this we
292 // need to take the LiveRegion that defines this register and split it
294 assert(interval.containsOneValue());
295 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
296 unsigned RedefIndex = getDefIndex(MIIdx);
298 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
299 VNInfo *OldValNo = OldLR->valno;
301 // Delete the initial value, which should be short and continuous,
302 // because the 2-addr copy must be in the same MBB as the redef.
303 interval.removeRange(DefIndex, RedefIndex);
305 // Two-address vregs should always only be redefined once. This means
306 // that at this point, there should be exactly one value number in it.
307 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309 // The new value number (#1) is defined by the instruction we claimed
311 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
314 // Value#0 is now defined by the 2-addr instruction.
315 OldValNo->def = RedefIndex;
318 // Add the new live interval which replaces the range for the input copy.
319 LiveRange LR(DefIndex, RedefIndex, ValNo);
320 DOUT << " replace range with " << LR;
321 interval.addRange(LR);
322 interval.addKill(ValNo, RedefIndex);
324 // If this redefinition is dead, we need to add a dummy unit live
325 // range covering the def slot.
326 if (lv_->RegisterDefIsDead(mi, interval.reg))
327 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
330 interval.print(DOUT, tri_);
333 // Otherwise, this must be because of phi elimination. If this is the
334 // first redefinition of the vreg that we have seen, go back and change
335 // the live range in the PHI block to be a different value number.
336 if (interval.containsOneValue()) {
337 assert(vi.Kills.size() == 1 &&
338 "PHI elimination vreg should have one kill, the PHI itself!");
340 // Remove the old range that we now know has an incorrect number.
341 VNInfo *VNI = interval.getValNumInfo(0);
342 MachineInstr *Killer = vi.Kills[0];
343 unsigned Start = getMBBStartIdx(Killer->getParent());
344 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
345 DOUT << " Removing [" << Start << "," << End << "] from: ";
346 interval.print(DOUT, tri_); DOUT << "\n";
347 interval.removeRange(Start, End);
348 VNI->hasPHIKill = true;
349 DOUT << " RESULT: "; interval.print(DOUT, tri_);
351 // Replace the interval with one of a NEW value number. Note that this
352 // value number isn't actually defined by an instruction, weird huh? :)
353 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
354 DOUT << " replace range with " << LR;
355 interval.addRange(LR);
356 interval.addKill(LR.valno, End);
357 DOUT << " RESULT: "; interval.print(DOUT, tri_);
360 // In the case of PHI elimination, each variable definition is only
361 // live until the end of the block. We've already taken care of the
362 // rest of the live range.
363 unsigned defIndex = getDefIndex(MIIdx);
366 MachineInstr *CopyMI = NULL;
367 unsigned SrcReg, DstReg;
368 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
369 tii_->isMoveInstr(*mi, SrcReg, DstReg))
371 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
374 LiveRange LR(defIndex, killIndex, ValNo);
375 interval.addRange(LR);
376 interval.addKill(ValNo, killIndex);
377 ValNo->hasPHIKill = true;
385 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
386 MachineBasicBlock::iterator mi,
388 LiveInterval &interval,
389 MachineInstr *CopyMI) {
390 // A physical register cannot be live across basic block, so its
391 // lifetime must end somewhere in its defining basic block.
392 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394 unsigned baseIndex = MIIdx;
395 unsigned start = getDefIndex(baseIndex);
396 unsigned end = start;
398 // If it is not used after definition, it is considered dead at
399 // the instruction defining it. Hence its interval is:
400 // [defSlot(def), defSlot(def)+1)
401 if (lv_->RegisterDefIsDead(mi, interval.reg)) {
403 end = getDefIndex(start) + 1;
407 // If it is not dead on definition, it must be killed by a
408 // subsequent instruction. Hence its interval is:
409 // [defSlot(def), useSlot(kill)+1)
410 while (++mi != MBB->end()) {
411 baseIndex += InstrSlots::NUM;
412 if (lv_->KillsRegister(mi, interval.reg)) {
414 end = getUseIndex(baseIndex) + 1;
416 } else if (lv_->ModifiesRegister(mi, interval.reg)) {
417 // Another instruction redefines the register before it is ever read.
418 // Then the register is essentially dead at the instruction that defines
419 // it. Hence its interval is:
420 // [defSlot(def), defSlot(def)+1)
422 end = getDefIndex(start) + 1;
427 // The only case we should have a dead physreg here without a killing or
428 // instruction where we know it's dead is if it is live-in to the function
430 assert(!CopyMI && "physreg was not killed in defining block!");
431 end = getDefIndex(start) + 1; // It's dead.
434 assert(start < end && "did not find end of interval?");
436 // Already exists? Extend old live interval.
437 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
438 VNInfo *ValNo = (OldLR != interval.end())
439 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
440 LiveRange LR(start, end, ValNo);
441 interval.addRange(LR);
442 interval.addKill(LR.valno, end);
443 DOUT << " +" << LR << '\n';
446 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
447 MachineBasicBlock::iterator MI,
450 if (TargetRegisterInfo::isVirtualRegister(reg))
451 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
452 else if (allocatableRegs_[reg]) {
453 MachineInstr *CopyMI = NULL;
454 unsigned SrcReg, DstReg;
455 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
456 tii_->isMoveInstr(*MI, SrcReg, DstReg))
458 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
459 // Def of a register also defines its sub-registers.
460 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
461 // Avoid processing some defs more than once.
462 if (!MI->findRegisterDefOperand(*AS))
463 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
467 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
469 LiveInterval &interval, bool isAlias) {
470 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
472 // Look for kills, if it reaches a def before it's killed, then it shouldn't
473 // be considered a livein.
474 MachineBasicBlock::iterator mi = MBB->begin();
475 unsigned baseIndex = MIIdx;
476 unsigned start = baseIndex;
477 unsigned end = start;
478 while (mi != MBB->end()) {
479 if (lv_->KillsRegister(mi, interval.reg)) {
481 end = getUseIndex(baseIndex) + 1;
483 } else if (lv_->ModifiesRegister(mi, interval.reg)) {
484 // Another instruction redefines the register before it is ever read.
485 // Then the register is essentially dead at the instruction that defines
486 // it. Hence its interval is:
487 // [defSlot(def), defSlot(def)+1)
489 end = getDefIndex(start) + 1;
493 baseIndex += InstrSlots::NUM;
498 // Live-in register might not be used at all.
502 end = getDefIndex(MIIdx) + 1;
504 DOUT << " live through";
509 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
510 interval.addRange(LR);
511 interval.addKill(LR.valno, end);
512 DOUT << " +" << LR << '\n';
515 /// computeIntervals - computes the live intervals for virtual
516 /// registers. for some ordering of the machine instructions [1,N] a
517 /// live interval is an interval [i, j) where 1 <= i <= j < N for
518 /// which a variable is live
519 void LiveIntervals::computeIntervals() {
520 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
521 << "********** Function: "
522 << ((Value*)mf_->getFunction())->getName() << '\n';
523 // Track the index of the current machine instr.
524 unsigned MIIndex = 0;
525 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
527 MachineBasicBlock *MBB = MBBI;
528 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
530 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
532 // Create intervals for live-ins to this BB first.
533 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
534 LE = MBB->livein_end(); LI != LE; ++LI) {
535 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
536 // Multiple live-ins can alias the same register.
537 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
538 if (!hasInterval(*AS))
539 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
543 for (; MI != miEnd; ++MI) {
544 DOUT << MIIndex << "\t" << *MI;
547 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
548 MachineOperand &MO = MI->getOperand(i);
549 // handle register defs - build intervals
550 if (MO.isRegister() && MO.getReg() && MO.isDef())
551 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
554 MIIndex += InstrSlots::NUM;
559 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
560 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
561 std::vector<IdxMBBPair>::const_iterator I =
562 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
565 while (I != Idx2MBBMap.end()) {
566 if (LR.end <= I->first)
568 MBBs.push_back(I->second);
576 LiveInterval LiveIntervals::createInterval(unsigned reg) {
577 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
579 return LiveInterval(reg, Weight);
582 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
583 /// copy field and returns the source register that defines it.
584 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
588 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
589 return VNI->copy->getOperand(1).getReg();
590 unsigned SrcReg, DstReg;
591 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
593 assert(0 && "Unrecognized copy instruction!");
597 //===----------------------------------------------------------------------===//
598 // Register allocator hooks.
601 /// isReMaterializable - Returns true if the definition MI of the specified
602 /// val# of the specified interval is re-materializable.
603 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
604 const VNInfo *ValNo, MachineInstr *MI,
610 const TargetInstrDesc &TID = MI->getDesc();
611 if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) {
612 isLoad = TID.isSimpleLoad();
617 if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
618 !mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
621 // This is a load from fixed stack slot. It can be rematerialized unless it's
622 // re-defined by a two-address instruction.
624 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
626 const VNInfo *VNI = *i;
629 unsigned DefIdx = VNI->def;
631 continue; // Dead val#.
632 MachineInstr *DefMI = (DefIdx == ~0u)
633 ? NULL : getInstructionFromIndex(DefIdx);
634 if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
642 /// isReMaterializable - Returns true if every definition of MI of every
643 /// val# of the specified interval is re-materializable.
644 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
646 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
648 const VNInfo *VNI = *i;
649 unsigned DefIdx = VNI->def;
651 continue; // Dead val#.
652 // Is the def for the val# rematerializable?
655 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
656 bool DefIsLoad = false;
657 if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
664 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
665 /// slot / to reg or any rematerialized load into ith operand of specified
666 /// MI. If it is successul, MI is updated with the newly created MI and
668 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
669 VirtRegMap &vrm, MachineInstr *DefMI,
671 SmallVector<unsigned, 2> &Ops,
672 bool isSS, int Slot, unsigned Reg) {
674 const TargetInstrDesc &TID = MI->getDesc();
675 // If it is an implicit def instruction, just delete it.
676 if (TID.isImplicitDef()) {
677 RemoveMachineInstrFromMaps(MI);
678 vrm.RemoveMachineInstrFromMaps(MI);
679 MI->eraseFromParent();
684 SmallVector<unsigned, 2> FoldOps;
685 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
686 unsigned OpIdx = Ops[i];
687 // FIXME: fold subreg use.
688 if (MI->getOperand(OpIdx).getSubReg())
690 if (MI->getOperand(OpIdx).isDef())
691 MRInfo |= (unsigned)VirtRegMap::isMod;
693 // Filter out two-address use operand(s).
694 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
695 MRInfo = VirtRegMap::isModRef;
698 MRInfo |= (unsigned)VirtRegMap::isRef;
700 FoldOps.push_back(OpIdx);
703 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
704 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
706 // Attempt to fold the memory reference into the instruction. If
707 // we can do this, we don't need to insert spill code.
709 lv_->instructionChanged(MI, fmi);
711 fmi->copyKillDeadInfo(MI, tri_);
712 MachineBasicBlock &MBB = *MI->getParent();
713 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
714 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
715 vrm.transferSpillPts(MI, fmi);
716 vrm.transferRestorePts(MI, fmi);
718 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
719 mi2iMap_[fmi] = InstrIdx;
720 MI = MBB.insert(MBB.erase(MI), fmi);
727 /// canFoldMemoryOperand - Returns true if the specified load / store
728 /// folding is possible.
729 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
730 SmallVector<unsigned, 2> &Ops) const {
731 SmallVector<unsigned, 2> FoldOps;
732 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
733 unsigned OpIdx = Ops[i];
734 // FIXME: fold subreg use.
735 if (MI->getOperand(OpIdx).getSubReg())
737 FoldOps.push_back(OpIdx);
740 return tii_->canFoldMemoryOperand(MI, FoldOps);
743 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
744 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
745 for (LiveInterval::Ranges::const_iterator
746 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
747 std::vector<IdxMBBPair>::const_iterator II =
748 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
749 if (II == Idx2MBBMap.end())
751 if (I->end > II->first) // crossing a MBB.
753 MBBs.insert(II->second);
760 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
761 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
763 rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
764 unsigned id, unsigned index, unsigned end, MachineInstr *MI,
765 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
766 unsigned Slot, int LdSlot,
767 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
768 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
769 const TargetRegisterClass* rc,
770 SmallVector<int, 4> &ReMatIds,
771 unsigned &NewVReg, bool &HasDef, bool &HasUse,
772 const MachineLoopInfo *loopInfo,
773 std::map<unsigned,unsigned> &MBBVRegsMap,
774 std::vector<LiveInterval*> &NewLIs) {
775 bool CanFold = false;
777 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
778 MachineOperand& mop = MI->getOperand(i);
779 if (!mop.isRegister())
781 unsigned Reg = mop.getReg();
783 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
788 bool TryFold = !DefIsReMat;
789 bool FoldSS = true; // Default behavior unless it's a remat.
792 // If this is the rematerializable definition MI itself and
793 // all of its uses are rematerialized, simply delete it.
794 if (MI == ReMatOrigDefMI && CanDelete) {
795 DOUT << "\t\t\t\tErasing re-materlizable def: ";
797 RemoveMachineInstrFromMaps(MI);
798 vrm.RemoveMachineInstrFromMaps(MI);
799 MI->eraseFromParent();
803 // If def for this use can't be rematerialized, then try folding.
804 // If def is rematerializable and it's a load, also try folding.
805 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
807 // Try fold loads (from stack slot, constant pool, etc.) into uses.
813 // Scan all of the operands of this instruction rewriting operands
814 // to use NewVReg instead of li.reg as appropriate. We do this for
817 // 1. If the instr reads the same spilled vreg multiple times, we
818 // want to reuse the NewVReg.
819 // 2. If the instr is a two-addr instruction, we are required to
820 // keep the src/dst regs pinned.
822 // Keep track of whether we replace a use and/or def so that we can
823 // create the spill interval with the appropriate range.
825 HasUse = mop.isUse();
826 HasDef = mop.isDef();
827 SmallVector<unsigned, 2> Ops;
829 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
830 const MachineOperand &MOj = MI->getOperand(j);
831 if (!MOj.isRegister())
833 unsigned RegJ = MOj.getReg();
834 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
838 HasUse |= MOj.isUse();
839 HasDef |= MOj.isDef();
844 // Do not fold load / store here if we are splitting. We'll find an
845 // optimal point to insert a load / store later.
847 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
848 Ops, FoldSS, FoldSlot, Reg)) {
849 // Folding the load/store can completely change the instruction in
850 // unpredictable ways, rescan it from the beginning.
854 goto RestartInstruction;
857 CanFold = canFoldMemoryOperand(MI, Ops);
862 // Create a new virtual register for the spill interval.
863 bool CreatedNewVReg = false;
865 NewVReg = RegInfo.createVirtualRegister(rc);
867 CreatedNewVReg = true;
871 // Reuse NewVReg for other reads.
872 for (unsigned j = 0, e = Ops.size(); j != e; ++j)
873 MI->getOperand(Ops[j]).setReg(NewVReg);
875 if (CreatedNewVReg) {
877 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
878 if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
879 // Each valnum may have its own remat id.
880 ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
882 vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
884 if (!CanDelete || (HasUse && HasDef)) {
885 // If this is a two-addr instruction then its use operands are
886 // rematerializable but its def is not. It should be assigned a
888 vrm.assignVirt2StackSlot(NewVReg, Slot);
891 vrm.assignVirt2StackSlot(NewVReg, Slot);
893 } else if (HasUse && HasDef &&
894 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
895 // If this interval hasn't been assigned a stack slot (because earlier
896 // def is a deleted remat def), do it now.
897 assert(Slot != VirtRegMap::NO_STACK_SLOT);
898 vrm.assignVirt2StackSlot(NewVReg, Slot);
901 // create a new register interval for this spill / remat.
902 LiveInterval &nI = getOrCreateInterval(NewVReg);
903 if (CreatedNewVReg) {
904 NewLIs.push_back(&nI);
905 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
907 vrm.setIsSplitFromReg(NewVReg, li.reg);
911 if (CreatedNewVReg) {
912 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
913 nI.getNextValue(~0U, 0, VNInfoAllocator));
917 // Extend the split live interval to this def / use.
918 unsigned End = getUseIndex(index)+1;
919 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
920 nI.getValNumInfo(nI.getNumValNums()-1));
926 LiveRange LR(getDefIndex(index), getStoreIndex(index),
927 nI.getNextValue(~0U, 0, VNInfoAllocator));
932 DOUT << "\t\t\t\tAdded new interval: ";
933 nI.print(DOUT, tri_);
938 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
940 MachineBasicBlock *MBB, unsigned Idx) const {
941 unsigned End = getMBBEndIdx(MBB);
942 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
943 unsigned KillIdx = VNI->kills[j];
944 if (KillIdx > Idx && KillIdx < End)
950 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
951 const VNInfo *VNI = NULL;
952 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
953 e = li.vni_end(); i != e; ++i)
954 if ((*i)->def == DefIdx) {
961 /// RewriteInfo - Keep track of machine instrs that will be rewritten
968 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
969 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
972 struct RewriteInfoCompare {
973 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
974 return LHS.Index < RHS.Index;
979 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
980 LiveInterval::Ranges::const_iterator &I,
981 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
982 unsigned Slot, int LdSlot,
983 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
984 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
985 const TargetRegisterClass* rc,
986 SmallVector<int, 4> &ReMatIds,
987 const MachineLoopInfo *loopInfo,
988 BitVector &SpillMBBs,
989 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
990 BitVector &RestoreMBBs,
991 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
992 std::map<unsigned,unsigned> &MBBVRegsMap,
993 std::vector<LiveInterval*> &NewLIs) {
994 bool AllCanFold = true;
995 unsigned NewVReg = 0;
996 unsigned start = getBaseIndex(I->start);
997 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
999 // First collect all the def / use in this live range that will be rewritten.
1000 // Make sure they are sorted according instruction index.
1001 std::vector<RewriteInfo> RewriteMIs;
1002 for (MachineRegisterInfo::reg_iterator ri = RegInfo.reg_begin(li.reg),
1003 re = RegInfo.reg_end(); ri != re; ) {
1004 MachineInstr *MI = &(*ri);
1005 MachineOperand &O = ri.getOperand();
1007 unsigned index = getInstructionIndex(MI);
1008 if (index < start || index >= end)
1010 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1012 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1014 // Now rewrite the defs and uses.
1015 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1016 RewriteInfo &rwi = RewriteMIs[i];
1018 unsigned index = rwi.Index;
1019 bool MIHasUse = rwi.HasUse;
1020 bool MIHasDef = rwi.HasDef;
1021 MachineInstr *MI = rwi.MI;
1022 // If MI def and/or use the same register multiple times, then there
1023 // are multiple entries.
1024 while (i != e && RewriteMIs[i].MI == MI) {
1025 assert(RewriteMIs[i].Index == index);
1026 MIHasUse |= RewriteMIs[i].HasUse;
1027 MIHasDef |= RewriteMIs[i].HasDef;
1030 MachineBasicBlock *MBB = MI->getParent();
1031 unsigned MBBId = MBB->getNumber();
1032 unsigned ThisVReg = 0;
1034 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1035 if (NVI != MBBVRegsMap.end()) {
1036 ThisVReg = NVI->second;
1043 // It's better to start a new interval to avoid artifically
1044 // extend the new interval.
1045 if (MIHasDef && !MIHasUse) {
1046 MBBVRegsMap.erase(MBB->getNumber());
1052 bool IsNew = ThisVReg == 0;
1054 // This ends the previous live interval. If all of its def / use
1055 // can be folded, give it a low spill weight.
1056 if (NewVReg && TrySplit && AllCanFold) {
1057 LiveInterval &nI = getOrCreateInterval(NewVReg);
1064 bool HasDef = false;
1065 bool HasUse = false;
1066 bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1067 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1068 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1069 CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1070 HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1071 if (!HasDef && !HasUse)
1074 AllCanFold &= CanFold;
1076 // Update weight of spill interval.
1077 LiveInterval &nI = getOrCreateInterval(NewVReg);
1079 // The spill weight is now infinity as it cannot be spilled again.
1080 nI.weight = HUGE_VALF;
1084 // Keep track of the last def and first use in each MBB.
1086 if (MI != ReMatOrigDefMI || !CanDelete) {
1087 bool HasKill = false;
1089 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1091 // If this is a two-address code, then this index starts a new VNInfo.
1092 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1094 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1096 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1097 SpillIdxes.find(MBBId);
1099 if (SII == SpillIdxes.end()) {
1100 std::vector<SRInfo> S;
1101 S.push_back(SRInfo(index, NewVReg, true));
1102 SpillIdxes.insert(std::make_pair(MBBId, S));
1103 } else if (SII->second.back().vreg != NewVReg) {
1104 SII->second.push_back(SRInfo(index, NewVReg, true));
1105 } else if ((int)index > SII->second.back().index) {
1106 // If there is an earlier def and this is a two-address
1107 // instruction, then it's not possible to fold the store (which
1108 // would also fold the load).
1109 SRInfo &Info = SII->second.back();
1111 Info.canFold = !HasUse;
1113 SpillMBBs.set(MBBId);
1114 } else if (SII != SpillIdxes.end() &&
1115 SII->second.back().vreg == NewVReg &&
1116 (int)index > SII->second.back().index) {
1117 // There is an earlier def that's not killed (must be two-address).
1118 // The spill is no longer needed.
1119 SII->second.pop_back();
1120 if (SII->second.empty()) {
1121 SpillIdxes.erase(MBBId);
1122 SpillMBBs.reset(MBBId);
1129 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1130 SpillIdxes.find(MBBId);
1131 if (SII != SpillIdxes.end() &&
1132 SII->second.back().vreg == NewVReg &&
1133 (int)index > SII->second.back().index)
1134 // Use(s) following the last def, it's not safe to fold the spill.
1135 SII->second.back().canFold = false;
1136 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1137 RestoreIdxes.find(MBBId);
1138 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1139 // If we are splitting live intervals, only fold if it's the first
1140 // use and there isn't another use later in the MBB.
1141 RII->second.back().canFold = false;
1143 // Only need a reload if there isn't an earlier def / use.
1144 if (RII == RestoreIdxes.end()) {
1145 std::vector<SRInfo> Infos;
1146 Infos.push_back(SRInfo(index, NewVReg, true));
1147 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1149 RII->second.push_back(SRInfo(index, NewVReg, true));
1151 RestoreMBBs.set(MBBId);
1155 // Update spill weight.
1156 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1157 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1160 if (NewVReg && TrySplit && AllCanFold) {
1161 // If all of its def / use can be folded, give it a low spill weight.
1162 LiveInterval &nI = getOrCreateInterval(NewVReg);
1167 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1168 BitVector &RestoreMBBs,
1169 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1170 if (!RestoreMBBs[Id])
1172 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1173 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1174 if (Restores[i].index == index &&
1175 Restores[i].vreg == vr &&
1176 Restores[i].canFold)
1181 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1182 BitVector &RestoreMBBs,
1183 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1184 if (!RestoreMBBs[Id])
1186 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1187 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1188 if (Restores[i].index == index && Restores[i].vreg)
1189 Restores[i].index = -1;
1193 std::vector<LiveInterval*> LiveIntervals::
1194 addIntervalsForSpills(const LiveInterval &li,
1195 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1196 // Since this is called after the analysis is done we don't know if
1197 // LiveVariables is available
1198 lv_ = getAnalysisToUpdate<LiveVariables>();
1200 assert(li.weight != HUGE_VALF &&
1201 "attempt to spill already spilled interval!");
1203 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1204 li.print(DOUT, tri_);
1207 // Each bit specify whether it a spill is required in the MBB.
1208 BitVector SpillMBBs(mf_->getNumBlockIDs());
1209 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1210 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1211 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1212 std::map<unsigned,unsigned> MBBVRegsMap;
1213 std::vector<LiveInterval*> NewLIs;
1214 MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1215 const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1217 unsigned NumValNums = li.getNumValNums();
1218 SmallVector<MachineInstr*, 4> ReMatDefs;
1219 ReMatDefs.resize(NumValNums, NULL);
1220 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1221 ReMatOrigDefs.resize(NumValNums, NULL);
1222 SmallVector<int, 4> ReMatIds;
1223 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1224 BitVector ReMatDelete(NumValNums);
1225 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1227 // Spilling a split live interval. It cannot be split any further. Also,
1228 // it's also guaranteed to be a single val# / range interval.
1229 if (vrm.getPreSplitReg(li.reg)) {
1230 vrm.setIsSplitFromReg(li.reg, 0);
1231 // Unset the split kill marker on the last use.
1232 unsigned KillIdx = vrm.getKillPoint(li.reg);
1234 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1235 assert(KillMI && "Last use disappeared?");
1236 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1237 assert(KillOp != -1 && "Last use disappeared?");
1238 KillMI->getOperand(KillOp).setIsKill(false);
1240 vrm.removeKillPoint(li.reg);
1241 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1242 Slot = vrm.getStackSlot(li.reg);
1243 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1244 MachineInstr *ReMatDefMI = DefIsReMat ?
1245 vrm.getReMaterializedMI(li.reg) : NULL;
1247 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1248 bool isLoad = isLoadSS ||
1249 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1250 bool IsFirstRange = true;
1251 for (LiveInterval::Ranges::const_iterator
1252 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1253 // If this is a split live interval with multiple ranges, it means there
1254 // are two-address instructions that re-defined the value. Only the
1255 // first def can be rematerialized!
1257 // Note ReMatOrigDefMI has already been deleted.
1258 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1259 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1260 false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1261 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1262 MBBVRegsMap, NewLIs);
1264 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1265 Slot, 0, false, false, false,
1266 false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1267 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1268 MBBVRegsMap, NewLIs);
1270 IsFirstRange = false;
1275 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1276 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1280 bool NeedStackSlot = false;
1281 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1283 const VNInfo *VNI = *i;
1284 unsigned VN = VNI->id;
1285 unsigned DefIdx = VNI->def;
1287 continue; // Dead val#.
1288 // Is the def for the val# rematerializable?
1289 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1290 ? 0 : getInstructionFromIndex(DefIdx);
1292 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1293 // Remember how to remat the def of this val#.
1294 ReMatOrigDefs[VN] = ReMatDefMI;
1295 // Original def may be modified so we have to make a copy here. vrm must
1297 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1299 bool CanDelete = true;
1300 if (VNI->hasPHIKill) {
1301 // A kill is a phi node, not all of its uses can be rematerialized.
1302 // It must not be deleted.
1304 // Need a stack slot if there is any live range where uses cannot be
1306 NeedStackSlot = true;
1309 ReMatDelete.set(VN);
1311 // Need a stack slot if there is any live range where uses cannot be
1313 NeedStackSlot = true;
1317 // One stack slot per live interval.
1318 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1319 Slot = vrm.assignVirt2StackSlot(li.reg);
1321 // Create new intervals and rewrite defs and uses.
1322 for (LiveInterval::Ranges::const_iterator
1323 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1324 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1325 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1326 bool DefIsReMat = ReMatDefMI != NULL;
1327 bool CanDelete = ReMatDelete[I->valno->id];
1329 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1330 bool isLoad = isLoadSS ||
1331 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1332 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1333 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1334 CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1335 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1336 MBBVRegsMap, NewLIs);
1339 // Insert spills / restores if we are splitting.
1343 SmallPtrSet<LiveInterval*, 4> AddedKill;
1344 SmallVector<unsigned, 2> Ops;
1345 if (NeedStackSlot) {
1346 int Id = SpillMBBs.find_first();
1348 std::vector<SRInfo> &spills = SpillIdxes[Id];
1349 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1350 int index = spills[i].index;
1351 unsigned VReg = spills[i].vreg;
1352 LiveInterval &nI = getOrCreateInterval(VReg);
1353 bool isReMat = vrm.isReMaterialized(VReg);
1354 MachineInstr *MI = getInstructionFromIndex(index);
1355 bool CanFold = false;
1356 bool FoundUse = false;
1358 if (spills[i].canFold) {
1360 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1361 MachineOperand &MO = MI->getOperand(j);
1362 if (!MO.isRegister() || MO.getReg() != VReg)
1369 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1370 RestoreMBBs, RestoreIdxes))) {
1371 // MI has two-address uses of the same register. If the use
1372 // isn't the first and only use in the BB, then we can't fold
1373 // it. FIXME: Move this to rewriteInstructionsForSpills.
1380 // Fold the store into the def if possible.
1381 bool Folded = false;
1382 if (CanFold && !Ops.empty()) {
1383 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1386 // Also folded uses, do not issue a load.
1387 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1388 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1390 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1394 // Else tell the spiller to issue a spill.
1396 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1397 bool isKill = LR->end == getStoreIndex(index);
1398 vrm.addSpillPoint(VReg, isKill, MI);
1400 AddedKill.insert(&nI);
1403 Id = SpillMBBs.find_next(Id);
1407 int Id = RestoreMBBs.find_first();
1409 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1410 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1411 int index = restores[i].index;
1414 unsigned VReg = restores[i].vreg;
1415 LiveInterval &nI = getOrCreateInterval(VReg);
1416 MachineInstr *MI = getInstructionFromIndex(index);
1417 bool CanFold = false;
1419 if (restores[i].canFold) {
1421 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1422 MachineOperand &MO = MI->getOperand(j);
1423 if (!MO.isRegister() || MO.getReg() != VReg)
1427 // If this restore were to be folded, it would have been folded
1436 // Fold the load into the use if possible.
1437 bool Folded = false;
1438 if (CanFold && !Ops.empty()) {
1439 if (!vrm.isReMaterialized(VReg))
1440 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1442 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1444 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1445 // If the rematerializable def is a load, also try to fold it.
1446 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1447 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1448 Ops, isLoadSS, LdSlot, VReg);
1451 // If folding is not possible / failed, then tell the spiller to issue a
1452 // load / rematerialization for us.
1454 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1456 vrm.addRestorePoint(VReg, MI);
1458 Id = RestoreMBBs.find_next(Id);
1461 // Finalize intervals: add kills, finalize spill weights, and filter out
1463 std::vector<LiveInterval*> RetNewLIs;
1464 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1465 LiveInterval *LI = NewLIs[i];
1467 LI->weight /= LI->getSize();
1468 if (!AddedKill.count(LI)) {
1469 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1470 unsigned LastUseIdx = getBaseIndex(LR->end);
1471 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1472 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1473 assert(UseIdx != -1);
1474 if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) ==
1476 LastUse->getOperand(UseIdx).setIsKill();
1477 vrm.addKillPoint(LI->reg, LastUseIdx);
1480 RetNewLIs.push_back(LI);