1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "llvm/Function.h"
13 #include "llvm/CodeGen/MachineFrameInfo.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/ErrorHandling.h"
19 #include "llvm/Support/raw_ostream.h"
20 #include "llvm/Target/TargetInstrInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/ADT/DepthFirstIterator.h"
23 #include "llvm/ADT/Statistic.h"
27 STATISTIC(NumDSE , "Number of dead stores elided");
28 STATISTIC(NumDSS , "Number of dead spill slots removed");
29 STATISTIC(NumCommutes, "Number of instructions commuted");
30 STATISTIC(NumDRM , "Number of re-materializable defs elided");
31 STATISTIC(NumStores , "Number of stores added");
32 STATISTIC(NumPSpills , "Number of physical register spills");
33 STATISTIC(NumOmitted , "Number of reloads omited");
34 STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
35 STATISTIC(NumCopified, "Number of available reloads turned into copies");
36 STATISTIC(NumReMats , "Number of re-materialization");
37 STATISTIC(NumLoads , "Number of loads added");
38 STATISTIC(NumReused , "Number of values reused");
39 STATISTIC(NumDCE , "Number of copies elided");
40 STATISTIC(NumSUnfold , "Number of stores unfolded");
41 STATISTIC(NumModRefUnfold, "Number of modref unfolded");
44 enum RewriterName { local, trivial };
47 static cl::opt<RewriterName>
48 RewriterOpt("rewriter",
49 cl::desc("Rewriter to use (default=local)"),
51 cl::values(clEnumVal(local, "local rewriter"),
52 clEnumVal(trivial, "trivial rewriter"),
57 ScheduleSpills("schedule-spills",
58 cl::desc("Schedule spill code"),
61 VirtRegRewriter::~VirtRegRewriter() {}
63 /// substitutePhysReg - Replace virtual register in MachineOperand with a
64 /// physical register. Do the right thing with the sub-register index.
65 /// Note that operands may be added, so the MO reference is no longer valid.
66 static void substitutePhysReg(MachineOperand &MO, unsigned Reg,
67 const TargetRegisterInfo &TRI) {
68 if (unsigned SubIdx = MO.getSubReg()) {
69 // Insert the physical subreg and reset the subreg field.
70 MO.setReg(TRI.getSubReg(Reg, SubIdx));
73 // Any def, dead, and kill flags apply to the full virtual register, so they
74 // also apply to the full physical register. Add imp-def/dead and imp-kill
76 MachineInstr &MI = *MO.getParent();
79 MI.addRegisterDead(Reg, &TRI, /*AddIfNotFound=*/ true);
81 MI.addRegisterDefined(Reg, &TRI);
82 else if (!MO.isUndef() &&
84 MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
85 MI.addRegisterKilled(Reg, &TRI, /*AddIfNotFound=*/ true);
93 /// This class is intended for use with the new spilling framework only. It
94 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
96 struct TrivialRewriter : public VirtRegRewriter {
98 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
100 DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
101 DEBUG(dbgs() << "********** Function: "
102 << MF.getFunction()->getName() << '\n');
103 DEBUG(dbgs() << "**** Machine Instrs"
104 << "(NOTE! Does not include spills and reloads!) ****\n");
107 MachineRegisterInfo *mri = &MF.getRegInfo();
108 const TargetRegisterInfo *tri = MF.getTarget().getRegisterInfo();
110 bool changed = false;
112 for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
113 liItr != liEnd; ++liItr) {
115 const LiveInterval *li = liItr->second;
116 unsigned reg = li->reg;
118 if (TargetRegisterInfo::isPhysicalRegister(reg)) {
120 mri->setPhysRegUsed(reg);
123 if (!VRM.hasPhys(reg))
125 unsigned pReg = VRM.getPhys(reg);
126 mri->setPhysRegUsed(pReg);
127 // Copy the register use-list before traversing it.
128 SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
129 for (MachineRegisterInfo::reg_iterator I = mri->reg_begin(reg),
130 E = mri->reg_end(); I != E; ++I)
131 reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
132 for (unsigned N=0; N != reglist.size(); ++N)
133 substitutePhysReg(reglist[N].first->getOperand(reglist[N].second),
135 changed |= !reglist.empty();
139 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
149 // ************************************************************************ //
153 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
154 /// from top down, keep track of which spill slots or remat are available in
157 /// Note that not all physregs are created equal here. In particular, some
158 /// physregs are reloads that we are allowed to clobber or ignore at any time.
159 /// Other physregs are values that the register allocated program is using
160 /// that we cannot CHANGE, but we can read if we like. We keep track of this
161 /// on a per-stack-slot / remat id basis as the low bit in the value of the
162 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
163 /// this bit and addAvailable sets it if.
164 class AvailableSpills {
165 const TargetRegisterInfo *TRI;
166 const TargetInstrInfo *TII;
168 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
169 // or remat'ed virtual register values that are still available, due to
170 // being loaded or stored to, but not invalidated yet.
171 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
173 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
174 // indicating which stack slot values are currently held by a physreg. This
175 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
176 // physreg is modified.
177 std::multimap<unsigned, int> PhysRegsAvailable;
179 void disallowClobberPhysRegOnly(unsigned PhysReg);
181 void ClobberPhysRegOnly(unsigned PhysReg);
183 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
184 : TRI(tri), TII(tii) {
187 /// clear - Reset the state.
189 SpillSlotsOrReMatsAvailable.clear();
190 PhysRegsAvailable.clear();
193 const TargetRegisterInfo *getRegInfo() const { return TRI; }
195 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
196 /// available in a physical register, return that PhysReg, otherwise
198 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
199 std::map<int, unsigned>::const_iterator I =
200 SpillSlotsOrReMatsAvailable.find(Slot);
201 if (I != SpillSlotsOrReMatsAvailable.end()) {
202 return I->second >> 1; // Remove the CanClobber bit.
207 /// addAvailable - Mark that the specified stack slot / remat is available
208 /// in the specified physreg. If CanClobber is true, the physreg can be
209 /// modified at any time without changing the semantics of the program.
210 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
211 // If this stack slot is thought to be available in some other physreg,
212 // remove its record.
213 ModifyStackSlotOrReMat(SlotOrReMat);
215 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
216 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
217 (unsigned)CanClobber;
219 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
220 DEBUG(dbgs() << "Remembering RM#"
221 << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
223 DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat);
224 DEBUG(dbgs() << " in physreg " << TRI->getName(Reg) << "\n");
227 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
228 /// the value of the specified stackslot register if it desires. The
229 /// specified stack slot must be available in a physreg for this query to
231 bool canClobberPhysRegForSS(int SlotOrReMat) const {
232 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
233 "Value not available!");
234 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
237 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
238 /// physical register where values for some stack slot(s) might be
240 bool canClobberPhysReg(unsigned PhysReg) const {
241 std::multimap<unsigned, int>::const_iterator I =
242 PhysRegsAvailable.lower_bound(PhysReg);
243 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
244 int SlotOrReMat = I->second;
246 if (!canClobberPhysRegForSS(SlotOrReMat))
252 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
253 /// stackslot register. The register is still available but is no longer
254 /// allowed to be modifed.
255 void disallowClobberPhysReg(unsigned PhysReg);
257 /// ClobberPhysReg - This is called when the specified physreg changes
258 /// value. We use this to invalidate any info about stuff that lives in
259 /// it and any of its aliases.
260 void ClobberPhysReg(unsigned PhysReg);
262 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
263 /// slot changes. This removes information about which register the
264 /// previous value for this slot lives in (as the previous value is dead
266 void ModifyStackSlotOrReMat(int SlotOrReMat);
268 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
269 /// into the specified MBB. Add available physical registers as potential
270 /// live-in's. If they are reused in the MBB, they will be added to the
271 /// live-in set to make register scavenger and post-allocation scheduler.
272 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
273 std::vector<MachineOperand*> &KillOps);
278 // ************************************************************************ //
280 // Given a location where a reload of a spilled register or a remat of
281 // a constant is to be inserted, attempt to find a safe location to
282 // insert the load at an earlier point in the basic-block, to hide
283 // latency of the load and to avoid address-generation interlock
285 static MachineBasicBlock::iterator
286 ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
287 MachineBasicBlock::iterator const Begin,
289 const TargetRegisterInfo *TRI,
292 const TargetInstrInfo *TII,
293 const MachineFunction &MF)
298 // Spill backscheduling is of primary interest to addresses, so
299 // don't do anything if the register isn't in the register class
300 // used for pointers.
302 const TargetLowering *TL = MF.getTarget().getTargetLowering();
304 if (!TL->isTypeLegal(TL->getPointerTy()))
305 // Believe it or not, this is true on PIC16.
308 const TargetRegisterClass *ptrRegClass =
309 TL->getRegClassFor(TL->getPointerTy());
310 if (!ptrRegClass->contains(PhysReg))
313 // Scan upwards through the preceding instructions. If an instruction doesn't
314 // reference the stack slot or the register we're loading, we can
315 // backschedule the reload up past it.
316 MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
317 while (NewInsertLoc != Begin) {
318 MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
319 for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
320 MachineOperand &Op = Prev->getOperand(i);
321 if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
324 if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
325 Prev->findRegisterDefOperand(PhysReg))
327 for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
328 if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
329 Prev->findRegisterDefOperand(*Alias))
335 // If we made it to the beginning of the block, turn around and move back
336 // down just past any existing reloads. They're likely to be reloads/remats
337 // for instructions earlier than what our current reload/remat is for, so
338 // they should be scheduled earlier.
339 if (NewInsertLoc == Begin) {
341 while (InsertLoc != NewInsertLoc &&
342 (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
343 TII->isTriviallyReMaterializable(NewInsertLoc)))
352 // ReusedOp - For each reused operand, we keep track of a bit of information,
353 // in case we need to rollback upon processing a new operand. See comments
356 // The MachineInstr operand that reused an available value.
359 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
360 unsigned StackSlotOrReMat;
362 // PhysRegReused - The physical register the value was available in.
363 unsigned PhysRegReused;
365 // AssignedPhysReg - The physreg that was assigned for use by the reload.
366 unsigned AssignedPhysReg;
368 // VirtReg - The virtual register itself.
371 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
373 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
374 AssignedPhysReg(apr), VirtReg(vreg) {}
377 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
378 /// is reused instead of reloaded.
381 std::vector<ReusedOp> Reuses;
382 BitVector PhysRegsClobbered;
384 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
385 PhysRegsClobbered.resize(tri->getNumRegs());
388 bool hasReuses() const {
389 return !Reuses.empty();
392 /// addReuse - If we choose to reuse a virtual register that is already
393 /// available instead of reloading it, remember that we did so.
394 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
395 unsigned PhysRegReused, unsigned AssignedPhysReg,
397 // If the reload is to the assigned register anyway, no undo will be
399 if (PhysRegReused == AssignedPhysReg) return;
401 // Otherwise, remember this.
402 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
403 AssignedPhysReg, VirtReg));
406 void markClobbered(unsigned PhysReg) {
407 PhysRegsClobbered.set(PhysReg);
410 bool isClobbered(unsigned PhysReg) const {
411 return PhysRegsClobbered.test(PhysReg);
414 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
415 /// is some other operand that is using the specified register, either pick
416 /// a new register to use, or evict the previous reload and use this reg.
417 unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
418 MachineFunction &MF, MachineInstr *MI,
419 AvailableSpills &Spills,
420 std::vector<MachineInstr*> &MaybeDeadStores,
421 SmallSet<unsigned, 8> &Rejected,
423 std::vector<MachineOperand*> &KillOps,
426 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
427 /// 'Rejected' set to remember which registers have been considered and
428 /// rejected for the reload. This avoids infinite looping in case like
431 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
432 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
434 /// sees r1 is taken by t2, tries t2's reload register r0
435 /// sees r0 is taken by t3, tries t3's reload register r1
436 /// sees r1 is taken by t2, tries t2's reload register r0 ...
437 unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
438 AvailableSpills &Spills,
439 std::vector<MachineInstr*> &MaybeDeadStores,
441 std::vector<MachineOperand*> &KillOps,
443 SmallSet<unsigned, 8> Rejected;
444 MachineFunction &MF = *MI->getParent()->getParent();
445 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
446 return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
447 Rejected, RegKills, KillOps, VRM);
453 // ****************** //
454 // Utility Functions //
455 // ****************** //
457 /// findSinglePredSuccessor - Return via reference a vector of machine basic
458 /// blocks each of which is a successor of the specified BB and has no other
460 static void findSinglePredSuccessor(MachineBasicBlock *MBB,
461 SmallVectorImpl<MachineBasicBlock *> &Succs) {
462 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
463 SE = MBB->succ_end(); SI != SE; ++SI) {
464 MachineBasicBlock *SuccMBB = *SI;
465 if (SuccMBB->pred_size() == 1)
466 Succs.push_back(SuccMBB);
470 /// InvalidateKill - Invalidate register kill information for a specific
471 /// register. This also unsets the kills marker on the last kill operand.
472 static void InvalidateKill(unsigned Reg,
473 const TargetRegisterInfo* TRI,
475 std::vector<MachineOperand*> &KillOps) {
477 KillOps[Reg]->setIsKill(false);
478 // KillOps[Reg] might be a def of a super-register.
479 unsigned KReg = KillOps[Reg]->getReg();
480 KillOps[KReg] = NULL;
481 RegKills.reset(KReg);
482 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
484 KillOps[*SR]->setIsKill(false);
492 /// InvalidateKills - MI is going to be deleted. If any of its operands are
493 /// marked kill, then invalidate the information.
494 static void InvalidateKills(MachineInstr &MI,
495 const TargetRegisterInfo* TRI,
497 std::vector<MachineOperand*> &KillOps,
498 SmallVector<unsigned, 2> *KillRegs = NULL) {
499 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
500 MachineOperand &MO = MI.getOperand(i);
501 if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
503 unsigned Reg = MO.getReg();
504 if (TargetRegisterInfo::isVirtualRegister(Reg))
507 KillRegs->push_back(Reg);
508 assert(Reg < KillOps.size());
509 if (KillOps[Reg] == &MO) {
512 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
522 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
523 /// (since its spill instruction is removed), mark it isDead. Also checks if
524 /// the def MI has other definition operands that are not dead. Returns it by
526 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
527 MachineInstr &NewDef, unsigned Reg,
529 const TargetRegisterInfo *TRI) {
530 // Due to remat, it's possible this reg isn't being reused. That is,
531 // the def of this reg (by prev MI) is now dead.
532 MachineInstr *DefMI = I;
533 MachineOperand *DefOp = NULL;
534 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
535 MachineOperand &MO = DefMI->getOperand(i);
536 if (!MO.isReg() || !MO.isDef() || !MO.isKill() || MO.isUndef())
538 if (MO.getReg() == Reg)
540 else if (!MO.isDead())
546 bool FoundUse = false, Done = false;
547 MachineBasicBlock::iterator E = &NewDef;
549 for (; !Done && I != E; ++I) {
550 MachineInstr *NMI = I;
551 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
552 MachineOperand &MO = NMI->getOperand(j);
553 if (!MO.isReg() || MO.getReg() == 0 ||
554 (MO.getReg() != Reg && !TRI->isSubRegister(Reg, MO.getReg())))
558 Done = true; // Stop after scanning all the operands of this MI.
569 /// UpdateKills - Track and update kill info. If a MI reads a register that is
570 /// marked kill, then it must be due to register reuse. Transfer the kill info
572 static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
574 std::vector<MachineOperand*> &KillOps) {
575 // These do not affect kill info at all.
576 if (MI.isDebugValue())
578 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
579 MachineOperand &MO = MI.getOperand(i);
580 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
582 unsigned Reg = MO.getReg();
586 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
587 // That can't be right. Register is killed but not re-defined and it's
588 // being reused. Let's fix that.
589 KillOps[Reg]->setIsKill(false);
590 // KillOps[Reg] might be a def of a super-register.
591 unsigned KReg = KillOps[Reg]->getReg();
592 KillOps[KReg] = NULL;
593 RegKills.reset(KReg);
595 // Must be a def of a super-register. Its other sub-regsters are no
596 // longer killed as well.
597 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
602 // Check for subreg kills as well.
608 // = d4 <avoiding reload>
609 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
611 if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI) {
612 KillOps[SReg]->setIsKill(false);
613 unsigned KReg = KillOps[SReg]->getReg();
614 KillOps[KReg] = NULL;
615 RegKills.reset(KReg);
617 for (const unsigned *SSR = TRI->getSubRegisters(KReg); *SSR; ++SSR) {
618 KillOps[*SSR] = NULL;
619 RegKills.reset(*SSR);
628 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
635 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
636 const MachineOperand &MO = MI.getOperand(i);
637 if (!MO.isReg() || !MO.getReg() || !MO.isDef())
639 unsigned Reg = MO.getReg();
642 // It also defines (or partially define) aliases.
643 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
647 for (const unsigned *SR = TRI->getSuperRegisters(Reg); *SR; ++SR) {
654 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
656 static void ReMaterialize(MachineBasicBlock &MBB,
657 MachineBasicBlock::iterator &MII,
658 unsigned DestReg, unsigned Reg,
659 const TargetInstrInfo *TII,
660 const TargetRegisterInfo *TRI,
662 MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
664 const TargetInstrDesc &TID = ReMatDefMI->getDesc();
665 assert(TID.getNumDefs() == 1 &&
666 "Don't know how to remat instructions that define > 1 values!");
668 TII->reMaterialize(MBB, MII, DestReg,
669 ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI, TRI);
670 MachineInstr *NewMI = prior(MII);
671 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
672 MachineOperand &MO = NewMI->getOperand(i);
673 if (!MO.isReg() || MO.getReg() == 0)
675 unsigned VirtReg = MO.getReg();
676 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
679 unsigned Phys = VRM.getPhys(VirtReg);
680 assert(Phys && "Virtual register is not assigned a register?");
681 substitutePhysReg(MO, Phys, *TRI);
686 /// findSuperReg - Find the SubReg's super-register of given register class
687 /// where its SubIdx sub-register is SubReg.
688 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
689 unsigned SubIdx, const TargetRegisterInfo *TRI) {
690 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
693 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
699 // ******************************** //
700 // Available Spills Implementation //
701 // ******************************** //
703 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
704 /// stackslot register. The register is still available but is no longer
705 /// allowed to be modifed.
706 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
707 std::multimap<unsigned, int>::iterator I =
708 PhysRegsAvailable.lower_bound(PhysReg);
709 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
710 int SlotOrReMat = I->second;
712 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
713 "Bidirectional map mismatch!");
714 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
715 DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
716 << " copied, it is available for use but can no longer be modified\n");
720 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
721 /// stackslot register and its aliases. The register and its aliases may
722 /// still available but is no longer allowed to be modifed.
723 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
724 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
725 disallowClobberPhysRegOnly(*AS);
726 disallowClobberPhysRegOnly(PhysReg);
729 /// ClobberPhysRegOnly - This is called when the specified physreg changes
730 /// value. We use this to invalidate any info about stuff we thing lives in it.
731 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
732 std::multimap<unsigned, int>::iterator I =
733 PhysRegsAvailable.lower_bound(PhysReg);
734 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
735 int SlotOrReMat = I->second;
736 PhysRegsAvailable.erase(I++);
737 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
738 "Bidirectional map mismatch!");
739 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
740 DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
741 << " clobbered, invalidating ");
742 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
743 DEBUG(dbgs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
745 DEBUG(dbgs() << "SS#" << SlotOrReMat << "\n");
749 /// ClobberPhysReg - This is called when the specified physreg changes
750 /// value. We use this to invalidate any info about stuff we thing lives in
751 /// it and any of its aliases.
752 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
753 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
754 ClobberPhysRegOnly(*AS);
755 ClobberPhysRegOnly(PhysReg);
758 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
759 /// into the specified MBB. Add available physical registers as potential
760 /// live-in's. If they are reused in the MBB, they will be added to the
761 /// live-in set to make register scavenger and post-allocation scheduler.
762 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
764 std::vector<MachineOperand*> &KillOps) {
765 std::set<unsigned> NotAvailable;
766 for (std::multimap<unsigned, int>::iterator
767 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
769 unsigned Reg = I->first;
770 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
771 // FIXME: A temporary workaround. We can't reuse available value if it's
772 // not safe to move the def of the virtual register's class. e.g.
773 // X86::RFP* register classes. Do not add it as a live-in.
774 if (!TII->isSafeToMoveRegClassDefs(RC))
775 // This is no longer available.
776 NotAvailable.insert(Reg);
779 InvalidateKill(Reg, TRI, RegKills, KillOps);
782 // Skip over the same register.
783 std::multimap<unsigned, int>::iterator NI = llvm::next(I);
784 while (NI != E && NI->first == Reg) {
790 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
791 E = NotAvailable.end(); I != E; ++I) {
793 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
795 ClobberPhysReg(*SubRegs);
799 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
800 /// slot changes. This removes information about which register the previous
801 /// value for this slot lives in (as the previous value is dead now).
802 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
803 std::map<int, unsigned>::iterator It =
804 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
805 if (It == SpillSlotsOrReMatsAvailable.end()) return;
806 unsigned Reg = It->second >> 1;
807 SpillSlotsOrReMatsAvailable.erase(It);
809 // This register may hold the value of multiple stack slots, only remove this
810 // stack slot from the set of values the register contains.
811 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
813 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
814 "Map inverse broken!");
815 if (I->second == SlotOrReMat) break;
817 PhysRegsAvailable.erase(I);
820 // ************************** //
821 // Reuse Info Implementation //
822 // ************************** //
824 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
825 /// is some other operand that is using the specified register, either pick
826 /// a new register to use, or evict the previous reload and use this reg.
827 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
830 MachineInstr *MI, AvailableSpills &Spills,
831 std::vector<MachineInstr*> &MaybeDeadStores,
832 SmallSet<unsigned, 8> &Rejected,
834 std::vector<MachineOperand*> &KillOps,
836 const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
837 const TargetRegisterInfo *TRI = Spills.getRegInfo();
839 if (Reuses.empty()) return PhysReg; // This is most often empty.
841 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
842 ReusedOp &Op = Reuses[ro];
843 // If we find some other reuse that was supposed to use this register
844 // exactly for its reload, we can change this reload to use ITS reload
845 // register. That is, unless its reload register has already been
846 // considered and subsequently rejected because it has also been reused
847 // by another operand.
848 if (Op.PhysRegReused == PhysReg &&
849 Rejected.count(Op.AssignedPhysReg) == 0 &&
850 RC->contains(Op.AssignedPhysReg)) {
851 // Yup, use the reload register that we didn't use before.
852 unsigned NewReg = Op.AssignedPhysReg;
853 Rejected.insert(PhysReg);
854 return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores, Rejected,
855 RegKills, KillOps, VRM);
857 // Otherwise, we might also have a problem if a previously reused
858 // value aliases the new register. If so, codegen the previous reload
860 unsigned PRRU = Op.PhysRegReused;
861 if (TRI->regsOverlap(PRRU, PhysReg)) {
862 // Okay, we found out that an alias of a reused register
863 // was used. This isn't good because it means we have
864 // to undo a previous reuse.
865 MachineBasicBlock *MBB = MI->getParent();
866 const TargetRegisterClass *AliasRC =
867 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
869 // Copy Op out of the vector and remove it, we're going to insert an
870 // explicit load for it.
872 Reuses.erase(Reuses.begin()+ro);
874 // MI may be using only a sub-register of PhysRegUsed.
875 unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
877 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
878 "A reuse cannot be a virtual register");
879 if (PRRU != RealPhysRegUsed) {
880 // What was the sub-register index?
881 SubIdx = TRI->getSubRegIndex(PRRU, RealPhysRegUsed);
883 "Operand physreg is not a sub-register of PhysRegUsed");
886 // Ok, we're going to try to reload the assigned physreg into the
887 // slot that we were supposed to in the first place. However, that
888 // register could hold a reuse. Check to see if it conflicts or
889 // would prefer us to use a different register.
890 unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
891 MF, MI, Spills, MaybeDeadStores,
892 Rejected, RegKills, KillOps, VRM);
894 bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
895 int SSorRMId = DoReMat
896 ? VRM.getReMatId(NewOp.VirtReg) : NewOp.StackSlotOrReMat;
898 // Back-schedule reloads and remats.
899 MachineBasicBlock::iterator InsertLoc =
900 ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
901 DoReMat, SSorRMId, TII, MF);
904 ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
907 TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
908 NewOp.StackSlotOrReMat, AliasRC);
909 MachineInstr *LoadMI = prior(InsertLoc);
910 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
911 // Any stores to this stack slot are not dead anymore.
912 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
915 Spills.ClobberPhysReg(NewPhysReg);
916 Spills.ClobberPhysReg(NewOp.PhysRegReused);
918 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
919 MI->getOperand(NewOp.Operand).setReg(RReg);
920 MI->getOperand(NewOp.Operand).setSubReg(0);
922 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
923 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
924 DEBUG(dbgs() << '\t' << *prior(InsertLoc));
926 DEBUG(dbgs() << "Reuse undone!\n");
929 // Finally, PhysReg is now available, go ahead and use it.
937 // ************************************************************************ //
939 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
940 /// stack slot mod/ref. It also checks if it's possible to unfold the
941 /// instruction by having it define a specified physical register instead.
942 static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
943 const TargetInstrInfo *TII,
944 const TargetRegisterInfo *TRI,
946 if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
950 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
951 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
952 unsigned VirtReg = I->second.first;
953 VirtRegMap::ModRef MR = I->second.second;
954 if (MR & VirtRegMap::isModRef)
955 if (VRM.getStackSlot(VirtReg) == SS) {
956 Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
963 // Does the instruction uses a register that overlaps the scratch register?
964 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
965 MachineOperand &MO = MI.getOperand(i);
966 if (!MO.isReg() || MO.getReg() == 0)
968 unsigned Reg = MO.getReg();
969 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
970 if (!VRM.hasPhys(Reg))
972 Reg = VRM.getPhys(Reg);
974 if (TRI->regsOverlap(PhysReg, Reg))
980 /// FindFreeRegister - Find a free register of a given register class by looking
981 /// at (at most) the last two machine instructions.
982 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
983 MachineBasicBlock &MBB,
984 const TargetRegisterClass *RC,
985 const TargetRegisterInfo *TRI,
986 BitVector &AllocatableRegs) {
987 BitVector Defs(TRI->getNumRegs());
988 BitVector Uses(TRI->getNumRegs());
989 SmallVector<unsigned, 4> LocalUses;
990 SmallVector<unsigned, 4> Kills;
992 // Take a look at 2 instructions at most.
995 if (MII == MBB.begin())
997 MachineInstr *PrevMI = prior(MII);
1000 if (PrevMI->isDebugValue())
1001 continue; // Skip over dbg_value instructions.
1004 for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
1005 MachineOperand &MO = PrevMI->getOperand(i);
1006 if (!MO.isReg() || MO.getReg() == 0)
1008 unsigned Reg = MO.getReg();
1011 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1014 LocalUses.push_back(Reg);
1015 if (MO.isKill() && AllocatableRegs[Reg])
1016 Kills.push_back(Reg);
1020 for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
1021 unsigned Kill = Kills[i];
1022 if (!Defs[Kill] && !Uses[Kill] &&
1023 TRI->getPhysicalRegisterRegClass(Kill) == RC)
1026 for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
1027 unsigned Reg = LocalUses[i];
1029 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1038 void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
1039 const TargetRegisterInfo &TRI) {
1040 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1041 MachineOperand &MO = MI->getOperand(i);
1042 if (MO.isReg() && MO.getReg() == VirtReg)
1043 substitutePhysReg(MO, PhysReg, TRI);
1050 bool operator()(const std::pair<MachineInstr*, int> &A,
1051 const std::pair<MachineInstr*, int> &B) {
1052 return A.second < B.second;
1056 // ***************************** //
1057 // Local Spiller Implementation //
1058 // ***************************** //
1060 class LocalRewriter : public VirtRegRewriter {
1061 MachineRegisterInfo *MRI;
1062 const TargetRegisterInfo *TRI;
1063 const TargetInstrInfo *TII;
1065 BitVector AllocatableRegs;
1066 DenseMap<MachineInstr*, unsigned> DistanceMap;
1068 MachineBasicBlock *MBB; // Basic block currently being processed.
1072 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
1073 LiveIntervals* LIs);
1077 bool OptimizeByUnfold2(unsigned VirtReg, int SS,
1078 MachineBasicBlock::iterator &MII,
1079 std::vector<MachineInstr*> &MaybeDeadStores,
1080 AvailableSpills &Spills,
1081 BitVector &RegKills,
1082 std::vector<MachineOperand*> &KillOps);
1084 bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1085 std::vector<MachineInstr*> &MaybeDeadStores,
1086 AvailableSpills &Spills,
1087 BitVector &RegKills,
1088 std::vector<MachineOperand*> &KillOps);
1090 bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1091 unsigned VirtReg, unsigned SrcReg, int SS,
1092 AvailableSpills &Spills,
1093 BitVector &RegKills,
1094 std::vector<MachineOperand*> &KillOps,
1095 const TargetRegisterInfo *TRI);
1097 void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1098 int Idx, unsigned PhysReg, int StackSlot,
1099 const TargetRegisterClass *RC,
1100 bool isAvailable, MachineInstr *&LastStore,
1101 AvailableSpills &Spills,
1102 SmallSet<MachineInstr*, 4> &ReMatDefs,
1103 BitVector &RegKills,
1104 std::vector<MachineOperand*> &KillOps);
1106 void TransferDeadness(unsigned Reg, BitVector &RegKills,
1107 std::vector<MachineOperand*> &KillOps);
1109 bool InsertEmergencySpills(MachineInstr *MI);
1111 bool InsertRestores(MachineInstr *MI,
1112 AvailableSpills &Spills,
1113 BitVector &RegKills,
1114 std::vector<MachineOperand*> &KillOps);
1116 bool InsertSpills(MachineInstr *MI);
1118 void RewriteMBB(LiveIntervals *LIs,
1119 AvailableSpills &Spills, BitVector &RegKills,
1120 std::vector<MachineOperand*> &KillOps);
1124 bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
1125 LiveIntervals* LIs) {
1126 MRI = &MF.getRegInfo();
1127 TRI = MF.getTarget().getRegisterInfo();
1128 TII = MF.getTarget().getInstrInfo();
1130 AllocatableRegs = TRI->getAllocatableSet(MF);
1131 DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1132 << MF.getFunction()->getName() << "':\n");
1133 DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1134 " reloads!) ****\n");
1137 // Spills - Keep track of which spilled values are available in physregs
1138 // so that we can choose to reuse the physregs instead of emitting
1139 // reloads. This is usually refreshed per basic block.
1140 AvailableSpills Spills(TRI, TII);
1142 // Keep track of kill information.
1143 BitVector RegKills(TRI->getNumRegs());
1144 std::vector<MachineOperand*> KillOps;
1145 KillOps.resize(TRI->getNumRegs(), NULL);
1147 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1148 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
1149 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
1151 // Traverse the basic blocks depth first.
1152 MachineBasicBlock *Entry = MF.begin();
1153 SmallPtrSet<MachineBasicBlock*,16> Visited;
1154 for (df_ext_iterator<MachineBasicBlock*,
1155 SmallPtrSet<MachineBasicBlock*,16> >
1156 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1159 if (!EarlyVisited.count(MBB))
1160 RewriteMBB(LIs, Spills, RegKills, KillOps);
1162 // If this MBB is the only predecessor of a successor. Keep the
1163 // availability information and visit it next.
1165 // Keep visiting single predecessor successor as long as possible.
1166 SinglePredSuccs.clear();
1167 findSinglePredSuccessor(MBB, SinglePredSuccs);
1168 if (SinglePredSuccs.empty())
1171 // FIXME: More than one successors, each of which has MBB has
1172 // the only predecessor.
1173 MBB = SinglePredSuccs[0];
1174 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
1175 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
1176 RewriteMBB(LIs, Spills, RegKills, KillOps);
1181 // Clear the availability info.
1185 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1188 // Mark unused spill slots.
1189 MachineFrameInfo *MFI = MF.getFrameInfo();
1190 int SS = VRM->getLowSpillSlot();
1191 if (SS != VirtRegMap::NO_STACK_SLOT)
1192 for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS)
1193 if (!VRM->isSpillSlotUsed(SS)) {
1194 MFI->RemoveStackObject(SS);
1201 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1202 /// a scratch register is available.
1203 /// xorq %r12<kill>, %r13
1204 /// addq %rax, -184(%rbp)
1205 /// addq %r13, -184(%rbp)
1207 /// xorq %r12<kill>, %r13
1208 /// movq -184(%rbp), %r12
1211 /// movq %r12, -184(%rbp)
1212 bool LocalRewriter::
1213 OptimizeByUnfold2(unsigned VirtReg, int SS,
1214 MachineBasicBlock::iterator &MII,
1215 std::vector<MachineInstr*> &MaybeDeadStores,
1216 AvailableSpills &Spills,
1217 BitVector &RegKills,
1218 std::vector<MachineOperand*> &KillOps) {
1220 MachineBasicBlock::iterator NextMII = llvm::next(MII);
1221 // Skip over dbg_value instructions.
1222 while (NextMII != MBB->end() && NextMII->isDebugValue())
1223 NextMII = llvm::next(NextMII);
1224 if (NextMII == MBB->end())
1227 if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
1230 // Now let's see if the last couple of instructions happens to have freed up
1232 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1233 unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
1237 MachineFunction &MF = *MBB->getParent();
1238 TRI = MF.getTarget().getRegisterInfo();
1239 MachineInstr &MI = *MII;
1240 if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
1243 // If the next instruction also folds the same SS modref and can be unfoled,
1244 // then it's worthwhile to issue a load from SS into the free register and
1245 // then unfold these instructions.
1246 if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
1249 // Back-schedule reloads and remats.
1250 ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
1252 // Load from SS to the spare physical register.
1253 TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC);
1254 // This invalidates Phys.
1255 Spills.ClobberPhysReg(PhysReg);
1256 // Remember it's available.
1257 Spills.addAvailable(SS, PhysReg);
1258 MaybeDeadStores[SS] = NULL;
1260 // Unfold current MI.
1261 SmallVector<MachineInstr*, 4> NewMIs;
1262 if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
1263 llvm_unreachable("Unable unfold the load / store folding instruction!");
1264 assert(NewMIs.size() == 1);
1265 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1266 VRM->transferRestorePts(&MI, NewMIs[0]);
1267 MII = MBB->insert(MII, NewMIs[0]);
1268 InvalidateKills(MI, TRI, RegKills, KillOps);
1269 VRM->RemoveMachineInstrFromMaps(&MI);
1273 // Unfold next instructions that fold the same SS.
1275 MachineInstr &NextMI = *NextMII;
1276 NextMII = llvm::next(NextMII);
1278 if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
1279 llvm_unreachable("Unable unfold the load / store folding instruction!");
1280 assert(NewMIs.size() == 1);
1281 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1282 VRM->transferRestorePts(&NextMI, NewMIs[0]);
1283 MBB->insert(NextMII, NewMIs[0]);
1284 InvalidateKills(NextMI, TRI, RegKills, KillOps);
1285 VRM->RemoveMachineInstrFromMaps(&NextMI);
1286 MBB->erase(&NextMI);
1288 // Skip over dbg_value instructions.
1289 while (NextMII != MBB->end() && NextMII->isDebugValue())
1290 NextMII = llvm::next(NextMII);
1291 if (NextMII == MBB->end())
1293 } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
1295 // Store the value back into SS.
1296 TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC);
1297 MachineInstr *StoreMI = prior(NextMII);
1298 VRM->addSpillSlotUse(SS, StoreMI);
1299 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1304 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1305 /// instruction. e.g.
1307 /// movl %eax, -32(%ebp)
1308 /// movl -36(%ebp), %eax
1309 /// orl %eax, -32(%ebp)
1312 /// orl -36(%ebp), %eax
1313 /// mov %eax, -32(%ebp)
1314 /// This enables unfolding optimization for a subsequent instruction which will
1315 /// also eliminate the newly introduced store instruction.
1316 bool LocalRewriter::
1317 OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1318 std::vector<MachineInstr*> &MaybeDeadStores,
1319 AvailableSpills &Spills,
1320 BitVector &RegKills,
1321 std::vector<MachineOperand*> &KillOps) {
1322 MachineFunction &MF = *MBB->getParent();
1323 MachineInstr &MI = *MII;
1324 unsigned UnfoldedOpc = 0;
1325 unsigned UnfoldPR = 0;
1326 unsigned UnfoldVR = 0;
1327 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1328 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1329 for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
1330 // Only transform a MI that folds a single register.
1333 UnfoldVR = I->second.first;
1334 VirtRegMap::ModRef MR = I->second.second;
1335 // MI2VirtMap be can updated which invalidate the iterator.
1336 // Increment the iterator first.
1338 if (VRM->isAssignedReg(UnfoldVR))
1340 // If this reference is not a use, any previous store is now dead.
1341 // Otherwise, the store to this stack slot is not dead anymore.
1342 FoldedSS = VRM->getStackSlot(UnfoldVR);
1343 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1344 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1345 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1346 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1349 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1358 // Look for other unfolding opportunities.
1359 return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
1363 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1364 MachineOperand &MO = MI.getOperand(i);
1365 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1367 unsigned VirtReg = MO.getReg();
1368 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1370 if (VRM->isAssignedReg(VirtReg)) {
1371 unsigned PhysReg = VRM->getPhys(VirtReg);
1372 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1374 } else if (VRM->isReMaterialized(VirtReg))
1376 int SS = VRM->getStackSlot(VirtReg);
1377 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1379 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1383 if (VRM->hasPhys(VirtReg)) {
1384 PhysReg = VRM->getPhys(VirtReg);
1385 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1389 // Ok, we'll need to reload the value into a register which makes
1390 // it impossible to perform the store unfolding optimization later.
1391 // Let's see if it is possible to fold the load if the store is
1392 // unfolded. This allows us to perform the store unfolding
1394 SmallVector<MachineInstr*, 4> NewMIs;
1395 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1396 assert(NewMIs.size() == 1);
1397 MachineInstr *NewMI = NewMIs.back();
1399 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1401 SmallVector<unsigned, 1> Ops;
1403 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
1405 VRM->addSpillSlotUse(SS, FoldedMI);
1406 if (!VRM->hasPhys(UnfoldVR))
1407 VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
1408 VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1409 MII = MBB->insert(MII, FoldedMI);
1410 InvalidateKills(MI, TRI, RegKills, KillOps);
1411 VRM->RemoveMachineInstrFromMaps(&MI);
1413 MF.DeleteMachineInstr(NewMI);
1416 MF.DeleteMachineInstr(NewMI);
1423 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1424 /// where SrcReg is r1 and it is tied to r0. Return true if after
1425 /// commuting this instruction it will be r0 = op r2, r1.
1426 static bool CommuteChangesDestination(MachineInstr *DefMI,
1427 const TargetInstrDesc &TID,
1429 const TargetInstrInfo *TII,
1431 if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
1433 if (!DefMI->getOperand(1).isReg() ||
1434 DefMI->getOperand(1).getReg() != SrcReg)
1437 if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
1439 unsigned SrcIdx1, SrcIdx2;
1440 if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
1442 if (SrcIdx1 == 1 && SrcIdx2 == 2) {
1449 /// CommuteToFoldReload -
1452 /// r1 = op r1, r2<kill>
1455 /// If op is commutable and r2 is killed, then we can xform these to
1456 /// r2 = op r2, fi#1
1458 bool LocalRewriter::
1459 CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1460 unsigned VirtReg, unsigned SrcReg, int SS,
1461 AvailableSpills &Spills,
1462 BitVector &RegKills,
1463 std::vector<MachineOperand*> &KillOps,
1464 const TargetRegisterInfo *TRI) {
1465 if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
1468 MachineFunction &MF = *MBB->getParent();
1469 MachineInstr &MI = *MII;
1470 MachineBasicBlock::iterator DefMII = prior(MII);
1471 MachineInstr *DefMI = DefMII;
1472 const TargetInstrDesc &TID = DefMI->getDesc();
1474 if (DefMII != MBB->begin() &&
1475 TID.isCommutable() &&
1476 CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
1477 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1478 unsigned NewReg = NewDstMO.getReg();
1479 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1481 MachineInstr *ReloadMI = prior(DefMII);
1483 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1484 if (DestReg != SrcReg || FrameIdx != SS)
1486 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1490 if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1492 assert(DefMI->getOperand(DefIdx).isReg() &&
1493 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1495 // Now commute def instruction.
1496 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1499 SmallVector<unsigned, 1> Ops;
1500 Ops.push_back(NewDstIdx);
1501 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1502 // Not needed since foldMemoryOperand returns new MI.
1503 MF.DeleteMachineInstr(CommutedMI);
1507 VRM->addSpillSlotUse(SS, FoldedMI);
1508 VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1509 // Insert new def MI and spill MI.
1510 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1511 TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC);
1513 MachineInstr *StoreMI = MII;
1514 VRM->addSpillSlotUse(SS, StoreMI);
1515 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1516 MII = MBB->insert(MII, FoldedMI); // Update MII to backtrack.
1518 // Delete all 3 old instructions.
1519 InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
1520 VRM->RemoveMachineInstrFromMaps(ReloadMI);
1521 MBB->erase(ReloadMI);
1522 InvalidateKills(*DefMI, TRI, RegKills, KillOps);
1523 VRM->RemoveMachineInstrFromMaps(DefMI);
1525 InvalidateKills(MI, TRI, RegKills, KillOps);
1526 VRM->RemoveMachineInstrFromMaps(&MI);
1529 // If NewReg was previously holding value of some SS, it's now clobbered.
1530 // This has to be done now because it's a physical register. When this
1531 // instruction is re-visited, it's ignored.
1532 Spills.ClobberPhysReg(NewReg);
1541 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1542 /// the last store to the same slot is now dead. If so, remove the last store.
1543 void LocalRewriter::
1544 SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1545 int Idx, unsigned PhysReg, int StackSlot,
1546 const TargetRegisterClass *RC,
1547 bool isAvailable, MachineInstr *&LastStore,
1548 AvailableSpills &Spills,
1549 SmallSet<MachineInstr*, 4> &ReMatDefs,
1550 BitVector &RegKills,
1551 std::vector<MachineOperand*> &KillOps) {
1553 MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1554 TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC);
1555 MachineInstr *StoreMI = prior(oldNextMII);
1556 VRM->addSpillSlotUse(StackSlot, StoreMI);
1557 DEBUG(dbgs() << "Store:\t" << *StoreMI);
1559 // If there is a dead store to this stack slot, nuke it now.
1561 DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
1563 SmallVector<unsigned, 2> KillRegs;
1564 InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
1565 MachineBasicBlock::iterator PrevMII = LastStore;
1566 bool CheckDef = PrevMII != MBB->begin();
1569 VRM->RemoveMachineInstrFromMaps(LastStore);
1570 MBB->erase(LastStore);
1572 // Look at defs of killed registers on the store. Mark the defs
1573 // as dead since the store has been deleted and they aren't
1575 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1576 bool HasOtherDef = false;
1577 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
1578 MachineInstr *DeadDef = PrevMII;
1579 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1580 // FIXME: This assumes a remat def does not have side effects.
1581 VRM->RemoveMachineInstrFromMaps(DeadDef);
1582 MBB->erase(DeadDef);
1590 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1591 // the last of multiple instructions is the actual store.
1592 LastStore = prior(oldNextMII);
1594 // If the stack slot value was previously available in some other
1595 // register, change it now. Otherwise, make the register available,
1597 Spills.ModifyStackSlotOrReMat(StackSlot);
1598 Spills.ClobberPhysReg(PhysReg);
1599 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1603 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1604 /// effect and all of its defs are dead.
1605 static bool isSafeToDelete(MachineInstr &MI) {
1606 const TargetInstrDesc &TID = MI.getDesc();
1607 if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
1608 TID.isCall() || TID.isBarrier() || TID.isReturn() ||
1609 TID.hasUnmodeledSideEffects())
1611 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1612 MachineOperand &MO = MI.getOperand(i);
1613 if (!MO.isReg() || !MO.getReg())
1615 if (MO.isDef() && !MO.isDead())
1617 if (MO.isUse() && MO.isKill())
1618 // FIXME: We can't remove kill markers or else the scavenger will assert.
1619 // An alternative is to add a ADD pseudo instruction to replace kill
1626 /// TransferDeadness - A identity copy definition is dead and it's being
1627 /// removed. Find the last def or use and mark it as dead / kill.
1628 void LocalRewriter::
1629 TransferDeadness(unsigned Reg, BitVector &RegKills,
1630 std::vector<MachineOperand*> &KillOps) {
1631 SmallPtrSet<MachineInstr*, 4> Seens;
1632 SmallVector<std::pair<MachineInstr*, int>,8> Refs;
1633 for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
1634 RE = MRI->reg_end(); RI != RE; ++RI) {
1635 MachineInstr *UDMI = &*RI;
1636 if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
1638 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1639 if (DI == DistanceMap.end())
1641 if (Seens.insert(UDMI))
1642 Refs.push_back(std::make_pair(UDMI, DI->second));
1647 std::sort(Refs.begin(), Refs.end(), RefSorter());
1649 while (!Refs.empty()) {
1650 MachineInstr *LastUDMI = Refs.back().first;
1653 MachineOperand *LastUD = NULL;
1654 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1655 MachineOperand &MO = LastUDMI->getOperand(i);
1656 if (!MO.isReg() || MO.getReg() != Reg)
1658 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1660 if (LastUDMI->isRegTiedToDefOperand(i))
1663 if (LastUD->isDef()) {
1664 // If the instruction has no side effect, delete it and propagate
1665 // backward further. Otherwise, mark is dead and we are done.
1666 if (!isSafeToDelete(*LastUDMI)) {
1667 LastUD->setIsDead();
1670 VRM->RemoveMachineInstrFromMaps(LastUDMI);
1671 MBB->erase(LastUDMI);
1673 LastUD->setIsKill();
1675 KillOps[Reg] = LastUD;
1681 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1682 /// VRM. Return true if spills were inserted.
1683 bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
1684 if (!VRM->hasEmergencySpills(MI))
1686 MachineBasicBlock::iterator MII = MI;
1687 SmallSet<int, 4> UsedSS;
1688 std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
1689 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1690 unsigned PhysReg = EmSpills[i];
1691 const TargetRegisterClass *RC = TRI->getPhysicalRegisterRegClass(PhysReg);
1692 assert(RC && "Unable to determine register class!");
1693 int SS = VRM->getEmergencySpillSlot(RC);
1694 if (UsedSS.count(SS))
1695 llvm_unreachable("Need to spill more than one physical registers!");
1697 TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC);
1698 MachineInstr *StoreMI = prior(MII);
1699 VRM->addSpillSlotUse(SS, StoreMI);
1701 // Back-schedule reloads and remats.
1702 MachineBasicBlock::iterator InsertLoc =
1703 ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
1704 TII, *MBB->getParent());
1706 TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC);
1708 MachineInstr *LoadMI = prior(InsertLoc);
1709 VRM->addSpillSlotUse(SS, LoadMI);
1711 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1716 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1717 /// true is any instructions were inserted.
1718 bool LocalRewriter::InsertRestores(MachineInstr *MI,
1719 AvailableSpills &Spills,
1720 BitVector &RegKills,
1721 std::vector<MachineOperand*> &KillOps) {
1722 if (!VRM->isRestorePt(MI))
1724 MachineBasicBlock::iterator MII = MI;
1725 std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
1726 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1727 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1728 if (!VRM->getPreSplitReg(VirtReg))
1729 continue; // Split interval spilled again.
1730 unsigned Phys = VRM->getPhys(VirtReg);
1731 MRI->setPhysRegUsed(Phys);
1733 // Check if the value being restored if available. If so, it must be
1734 // from a predecessor BB that fallthrough into this BB. We do not
1740 // ... # r1 not clobbered
1743 bool DoReMat = VRM->isReMaterialized(VirtReg);
1744 int SSorRMId = DoReMat
1745 ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
1746 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1747 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1748 if (InReg == Phys) {
1749 // If the value is already available in the expected register, save
1750 // a reload / remat.
1752 DEBUG(dbgs() << "Reusing RM#"
1753 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1755 DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1756 DEBUG(dbgs() << " from physreg "
1757 << TRI->getName(InReg) << " for vreg"
1758 << VirtReg <<" instead of reloading into physreg "
1759 << TRI->getName(Phys) << '\n');
1762 } else if (InReg && InReg != Phys) {
1764 DEBUG(dbgs() << "Reusing RM#"
1765 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1767 DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1768 DEBUG(dbgs() << " from physreg "
1769 << TRI->getName(InReg) << " for vreg"
1770 << VirtReg <<" by copying it into physreg "
1771 << TRI->getName(Phys) << '\n');
1773 // If the reloaded / remat value is available in another register,
1774 // copy it to the desired register.
1776 // Back-schedule reloads and remats.
1777 MachineBasicBlock::iterator InsertLoc =
1778 ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1781 TII->copyRegToReg(*MBB, InsertLoc, Phys, InReg, RC, RC);
1783 // This invalidates Phys.
1784 Spills.ClobberPhysReg(Phys);
1785 // Remember it's available.
1786 Spills.addAvailable(SSorRMId, Phys);
1789 MachineInstr *CopyMI = prior(InsertLoc);
1790 CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1791 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1792 KillOpnd->setIsKill();
1793 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
1795 DEBUG(dbgs() << '\t' << *CopyMI);
1800 // Back-schedule reloads and remats.
1801 MachineBasicBlock::iterator InsertLoc =
1802 ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1805 if (VRM->isReMaterialized(VirtReg)) {
1806 ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
1808 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1809 TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC);
1810 MachineInstr *LoadMI = prior(InsertLoc);
1811 VRM->addSpillSlotUse(SSorRMId, LoadMI);
1813 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1816 // This invalidates Phys.
1817 Spills.ClobberPhysReg(Phys);
1818 // Remember it's available.
1819 Spills.addAvailable(SSorRMId, Phys);
1821 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
1822 DEBUG(dbgs() << '\t' << *prior(MII));
1827 /// InsertEmergencySpills - Insert spills after MI if requested by VRM. Return
1828 /// true if spills were inserted.
1829 bool LocalRewriter::InsertSpills(MachineInstr *MI) {
1830 if (!VRM->isSpillPt(MI))
1832 MachineBasicBlock::iterator MII = MI;
1833 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1834 VRM->getSpillPtSpills(MI);
1835 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1836 unsigned VirtReg = SpillRegs[i].first;
1837 bool isKill = SpillRegs[i].second;
1838 if (!VRM->getPreSplitReg(VirtReg))
1839 continue; // Split interval spilled again.
1840 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
1841 unsigned Phys = VRM->getPhys(VirtReg);
1842 int StackSlot = VRM->getStackSlot(VirtReg);
1843 MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1844 TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
1846 MachineInstr *StoreMI = prior(oldNextMII);
1847 VRM->addSpillSlotUse(StackSlot, StoreMI);
1848 DEBUG(dbgs() << "Store:\t" << *StoreMI);
1849 VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1855 /// rewriteMBB - Keep track of which spills are available even after the
1856 /// register allocator is done with them. If possible, avid reloading vregs.
1858 LocalRewriter::RewriteMBB(LiveIntervals *LIs,
1859 AvailableSpills &Spills, BitVector &RegKills,
1860 std::vector<MachineOperand*> &KillOps) {
1862 DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
1863 << MBB->getName() << "':\n");
1865 MachineFunction &MF = *MBB->getParent();
1867 // MaybeDeadStores - When we need to write a value back into a stack slot,
1868 // keep track of the inserted store. If the stack slot value is never read
1869 // (because the value was used from some available register, for example), and
1870 // subsequently stored to, the original store is dead. This map keeps track
1871 // of inserted stores that are not used. If we see a subsequent store to the
1872 // same stack slot, the original store is deleted.
1873 std::vector<MachineInstr*> MaybeDeadStores;
1874 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1876 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1877 SmallSet<MachineInstr*, 4> ReMatDefs;
1880 SmallSet<unsigned, 2> KilledMIRegs;
1883 KillOps.resize(TRI->getNumRegs(), NULL);
1885 DistanceMap.clear();
1886 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
1888 MachineBasicBlock::iterator NextMII = llvm::next(MII);
1890 if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
1891 NextMII = llvm::next(MII);
1893 if (InsertEmergencySpills(MII))
1894 NextMII = llvm::next(MII);
1896 InsertRestores(MII, Spills, RegKills, KillOps);
1898 if (InsertSpills(MII))
1899 NextMII = llvm::next(MII);
1901 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1902 bool Erased = false;
1903 bool BackTracked = false;
1904 MachineInstr &MI = *MII;
1906 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1908 ReuseInfo ReusedOperands(MI, TRI);
1909 SmallVector<unsigned, 4> VirtUseOps;
1910 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1911 MachineOperand &MO = MI.getOperand(i);
1912 if (!MO.isReg() || MO.getReg() == 0)
1913 continue; // Ignore non-register operands.
1915 unsigned VirtReg = MO.getReg();
1916 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1917 // Ignore physregs for spilling, but remember that it is used by this
1919 MRI->setPhysRegUsed(VirtReg);
1923 // We want to process implicit virtual register uses first.
1924 if (MO.isImplicit())
1925 // If the virtual register is implicitly defined, emit a implicit_def
1926 // before so scavenger knows it's "defined".
1927 // FIXME: This is a horrible hack done the by register allocator to
1928 // remat a definition with virtual register operand.
1929 VirtUseOps.insert(VirtUseOps.begin(), i);
1931 VirtUseOps.push_back(i);
1934 // Process all of the spilled uses and all non spilled reg references.
1935 SmallVector<int, 2> PotentialDeadStoreSlots;
1936 KilledMIRegs.clear();
1937 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1938 unsigned i = VirtUseOps[j];
1939 unsigned VirtReg = MI.getOperand(i).getReg();
1940 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1941 "Not a virtual register?");
1943 unsigned SubIdx = MI.getOperand(i).getSubReg();
1944 if (VRM->isAssignedReg(VirtReg)) {
1945 // This virtual register was assigned a physreg!
1946 unsigned Phys = VRM->getPhys(VirtReg);
1947 MRI->setPhysRegUsed(Phys);
1948 if (MI.getOperand(i).isDef())
1949 ReusedOperands.markClobbered(Phys);
1950 substitutePhysReg(MI.getOperand(i), Phys, *TRI);
1951 if (VRM->isImplicitlyDefined(VirtReg))
1952 // FIXME: Is this needed?
1953 BuildMI(*MBB, &MI, MI.getDebugLoc(),
1954 TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
1958 // This virtual register is now known to be a spilled value.
1959 if (!MI.getOperand(i).isUse())
1960 continue; // Handle defs in the loop below (handle use&def here though)
1962 bool AvoidReload = MI.getOperand(i).isUndef();
1963 // Check if it is defined by an implicit def. It should not be spilled.
1964 // Note, this is for correctness reason. e.g.
1965 // 8 %reg1024<def> = IMPLICIT_DEF
1966 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1967 // The live range [12, 14) are not part of the r1024 live interval since
1968 // it's defined by an implicit def. It will not conflicts with live
1969 // interval of r1025. Now suppose both registers are spilled, you can
1970 // easily see a situation where both registers are reloaded before
1971 // the INSERT_SUBREG and both target registers that would overlap.
1972 bool DoReMat = VRM->isReMaterialized(VirtReg);
1973 int SSorRMId = DoReMat
1974 ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
1975 int ReuseSlot = SSorRMId;
1977 // Check to see if this stack slot is available.
1978 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1980 // If this is a sub-register use, make sure the reuse register is in the
1981 // right register class. For example, for x86 not all of the 32-bit
1982 // registers have accessible sub-registers.
1983 // Similarly so for EXTRACT_SUBREG. Consider this:
1985 // MOV32_mr fi#1, EDI
1987 // = EXTRACT_SUBREG fi#1
1988 // fi#1 is available in EDI, but it cannot be reused because it's not in
1989 // the right register file.
1990 if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
1991 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1992 if (!RC->contains(PhysReg))
1996 if (PhysReg && !AvoidReload) {
1997 // This spilled operand might be part of a two-address operand. If this
1998 // is the case, then changing it will necessarily require changing the
1999 // def part of the instruction as well. However, in some cases, we
2000 // aren't allowed to modify the reused register. If none of these cases
2002 bool CanReuse = true;
2003 bool isTied = MI.isRegTiedToDefOperand(i);
2005 // Okay, we have a two address operand. We can reuse this physreg as
2006 // long as we are allowed to clobber the value and there isn't an
2007 // earlier def that has already clobbered the physreg.
2008 CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
2009 Spills.canClobberPhysReg(PhysReg);
2013 // If this stack slot value is already available, reuse it!
2014 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2015 DEBUG(dbgs() << "Reusing RM#"
2016 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2018 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2019 DEBUG(dbgs() << " from physreg "
2020 << TRI->getName(PhysReg) << " for vreg"
2021 << VirtReg <<" instead of reloading into physreg "
2022 << TRI->getName(VRM->getPhys(VirtReg)) << '\n');
2023 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2024 MI.getOperand(i).setReg(RReg);
2025 MI.getOperand(i).setSubReg(0);
2027 // The only technical detail we have is that we don't know that
2028 // PhysReg won't be clobbered by a reloaded stack slot that occurs
2029 // later in the instruction. In particular, consider 'op V1, V2'.
2030 // If V1 is available in physreg R0, we would choose to reuse it
2031 // here, instead of reloading it into the register the allocator
2032 // indicated (say R1). However, V2 might have to be reloaded
2033 // later, and it might indicate that it needs to live in R0. When
2034 // this occurs, we need to have information available that
2035 // indicates it is safe to use R1 for the reload instead of R0.
2037 // To further complicate matters, we might conflict with an alias,
2038 // or R0 and R1 might not be compatible with each other. In this
2039 // case, we actually insert a reload for V1 in R1, ensuring that
2040 // we can get at R0 or its alias.
2041 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
2042 VRM->getPhys(VirtReg), VirtReg);
2044 // Only mark it clobbered if this is a use&def operand.
2045 ReusedOperands.markClobbered(PhysReg);
2048 if (MI.getOperand(i).isKill() &&
2049 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
2051 // The store of this spilled value is potentially dead, but we
2052 // won't know for certain until we've confirmed that the re-use
2053 // above is valid, which means waiting until the other operands
2054 // are processed. For now we just track the spill slot, we'll
2055 // remove it after the other operands are processed if valid.
2057 PotentialDeadStoreSlots.push_back(ReuseSlot);
2060 // Mark is isKill if it's there no other uses of the same virtual
2061 // register and it's not a two-address operand. IsKill will be
2062 // unset if reg is reused.
2063 if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
2064 MI.getOperand(i).setIsKill();
2065 KilledMIRegs.insert(VirtReg);
2071 // Otherwise we have a situation where we have a two-address instruction
2072 // whose mod/ref operand needs to be reloaded. This reload is already
2073 // available in some register "PhysReg", but if we used PhysReg as the
2074 // operand to our 2-addr instruction, the instruction would modify
2075 // PhysReg. This isn't cool if something later uses PhysReg and expects
2076 // to get its initial value.
2078 // To avoid this problem, and to avoid doing a load right after a store,
2079 // we emit a copy from PhysReg into the designated register for this
2081 unsigned DesignatedReg = VRM->getPhys(VirtReg);
2082 assert(DesignatedReg && "Must map virtreg to physreg!");
2084 // Note that, if we reused a register for a previous operand, the
2085 // register we want to reload into might not actually be
2086 // available. If this occurs, use the register indicated by the
2088 if (ReusedOperands.hasReuses())
2089 DesignatedReg = ReusedOperands.
2090 GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
2091 MaybeDeadStores, RegKills, KillOps, *VRM);
2093 // If the mapped designated register is actually the physreg we have
2094 // incoming, we don't need to inserted a dead copy.
2095 if (DesignatedReg == PhysReg) {
2096 // If this stack slot value is already available, reuse it!
2097 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2098 DEBUG(dbgs() << "Reusing RM#"
2099 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2101 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2102 DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
2103 << " for vreg" << VirtReg
2104 << " instead of reloading into same physreg.\n");
2105 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2106 MI.getOperand(i).setReg(RReg);
2107 MI.getOperand(i).setSubReg(0);
2108 ReusedOperands.markClobbered(RReg);
2113 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
2114 MRI->setPhysRegUsed(DesignatedReg);
2115 ReusedOperands.markClobbered(DesignatedReg);
2117 // Back-schedule reloads and remats.
2118 MachineBasicBlock::iterator InsertLoc =
2119 ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
2122 TII->copyRegToReg(*MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
2124 MachineInstr *CopyMI = prior(InsertLoc);
2125 CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2126 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
2128 // This invalidates DesignatedReg.
2129 Spills.ClobberPhysReg(DesignatedReg);
2131 Spills.addAvailable(ReuseSlot, DesignatedReg);
2133 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
2134 MI.getOperand(i).setReg(RReg);
2135 MI.getOperand(i).setSubReg(0);
2136 DEBUG(dbgs() << '\t' << *prior(MII));
2141 // Otherwise, reload it and remember that we have it.
2142 PhysReg = VRM->getPhys(VirtReg);
2143 assert(PhysReg && "Must map virtreg to physreg!");
2145 // Note that, if we reused a register for a previous operand, the
2146 // register we want to reload into might not actually be
2147 // available. If this occurs, use the register indicated by the
2149 if (ReusedOperands.hasReuses())
2150 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2151 Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2153 MRI->setPhysRegUsed(PhysReg);
2154 ReusedOperands.markClobbered(PhysReg);
2158 // Back-schedule reloads and remats.
2159 MachineBasicBlock::iterator InsertLoc =
2160 ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, DoReMat,
2164 ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
2166 const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
2167 TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC);
2168 MachineInstr *LoadMI = prior(InsertLoc);
2169 VRM->addSpillSlotUse(SSorRMId, LoadMI);
2171 DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
2173 // This invalidates PhysReg.
2174 Spills.ClobberPhysReg(PhysReg);
2176 // Any stores to this stack slot are not dead anymore.
2178 MaybeDeadStores[SSorRMId] = NULL;
2179 Spills.addAvailable(SSorRMId, PhysReg);
2180 // Assumes this is the last use. IsKill will be unset if reg is reused
2181 // unless it's a two-address operand.
2182 if (!MI.isRegTiedToDefOperand(i) &&
2183 KilledMIRegs.count(VirtReg) == 0) {
2184 MI.getOperand(i).setIsKill();
2185 KilledMIRegs.insert(VirtReg);
2188 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
2189 DEBUG(dbgs() << '\t' << *prior(InsertLoc));
2191 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2192 MI.getOperand(i).setReg(RReg);
2193 MI.getOperand(i).setSubReg(0);
2196 // Ok - now we can remove stores that have been confirmed dead.
2197 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
2198 // This was the last use and the spilled value is still available
2199 // for reuse. That means the spill was unnecessary!
2200 int PDSSlot = PotentialDeadStoreSlots[j];
2201 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
2203 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2204 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2205 VRM->RemoveMachineInstrFromMaps(DeadStore);
2206 MBB->erase(DeadStore);
2207 MaybeDeadStores[PDSSlot] = NULL;
2213 DEBUG(dbgs() << '\t' << MI);
2216 // If we have folded references to memory operands, make sure we clear all
2217 // physical registers that may contain the value of the spilled virtual
2219 SmallSet<int, 2> FoldedSS;
2220 for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
2221 unsigned VirtReg = I->second.first;
2222 VirtRegMap::ModRef MR = I->second.second;
2223 DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
2225 // MI2VirtMap be can updated which invalidate the iterator.
2226 // Increment the iterator first.
2228 int SS = VRM->getStackSlot(VirtReg);
2229 if (SS == VirtRegMap::NO_STACK_SLOT)
2231 FoldedSS.insert(SS);
2232 DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
2234 // If this folded instruction is just a use, check to see if it's a
2235 // straight load from the virt reg slot.
2236 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
2238 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
2239 if (DestReg && FrameIdx == SS) {
2240 // If this spill slot is available, turn it into a copy (or nothing)
2241 // instead of leaving it as a load!
2242 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
2243 DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
2244 if (DestReg != InReg) {
2245 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
2246 TII->copyRegToReg(*MBB, &MI, DestReg, InReg, RC, RC);
2247 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
2248 unsigned SubIdx = DefMO->getSubReg();
2249 // Revisit the copy so we make sure to notice the effects of the
2250 // operation on the destreg (either needing to RA it if it's
2251 // virtual or needing to clobber any values if it's physical).
2253 --NextMII; // backtrack to the copy.
2254 NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2255 // Propagate the sub-register index over.
2257 DefMO = NextMII->findRegisterDefOperand(DestReg);
2258 DefMO->setSubReg(SubIdx);
2262 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
2263 KillOpnd->setIsKill();
2267 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2268 // Unset last kill since it's being reused.
2269 InvalidateKill(InReg, TRI, RegKills, KillOps);
2270 Spills.disallowClobberPhysReg(InReg);
2273 InvalidateKills(MI, TRI, RegKills, KillOps);
2274 VRM->RemoveMachineInstrFromMaps(&MI);
2277 goto ProcessNextInst;
2280 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2281 SmallVector<MachineInstr*, 4> NewMIs;
2283 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
2284 MBB->insert(MII, NewMIs[0]);
2285 InvalidateKills(MI, TRI, RegKills, KillOps);
2286 VRM->RemoveMachineInstrFromMaps(&MI);
2289 --NextMII; // backtrack to the unfolded instruction.
2291 goto ProcessNextInst;
2296 // If this reference is not a use, any previous store is now dead.
2297 // Otherwise, the store to this stack slot is not dead anymore.
2298 MachineInstr* DeadStore = MaybeDeadStores[SS];
2300 bool isDead = !(MR & VirtRegMap::isRef);
2301 MachineInstr *NewStore = NULL;
2302 if (MR & VirtRegMap::isModRef) {
2303 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2304 SmallVector<MachineInstr*, 4> NewMIs;
2305 // We can reuse this physreg as long as we are allowed to clobber
2306 // the value and there isn't an earlier def that has already clobbered
2309 !ReusedOperands.isClobbered(PhysReg) &&
2310 Spills.canClobberPhysReg(PhysReg) &&
2311 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
2312 MachineOperand *KillOpnd =
2313 DeadStore->findRegisterUseOperand(PhysReg, true);
2314 // Note, if the store is storing a sub-register, it's possible the
2315 // super-register is needed below.
2316 if (KillOpnd && !KillOpnd->getSubReg() &&
2317 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
2318 MBB->insert(MII, NewMIs[0]);
2319 NewStore = NewMIs[1];
2320 MBB->insert(MII, NewStore);
2321 VRM->addSpillSlotUse(SS, NewStore);
2322 InvalidateKills(MI, TRI, RegKills, KillOps);
2323 VRM->RemoveMachineInstrFromMaps(&MI);
2327 --NextMII; // backtrack to the unfolded instruction.
2335 if (isDead) { // Previous store is dead.
2336 // If we get here, the store is dead, nuke it now.
2337 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2338 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2339 VRM->RemoveMachineInstrFromMaps(DeadStore);
2340 MBB->erase(DeadStore);
2345 MaybeDeadStores[SS] = NULL;
2347 // Treat this store as a spill merged into a copy. That makes the
2348 // stack slot value available.
2349 VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
2350 goto ProcessNextInst;
2354 // If the spill slot value is available, and this is a new definition of
2355 // the value, the value is not available anymore.
2356 if (MR & VirtRegMap::isMod) {
2357 // Notice that the value in this stack slot has been modified.
2358 Spills.ModifyStackSlotOrReMat(SS);
2360 // If this is *just* a mod of the value, check to see if this is just a
2361 // store to the spill slot (i.e. the spill got merged into the copy). If
2362 // so, realize that the vreg is available now, and add the store to the
2363 // MaybeDeadStore info.
2365 if (!(MR & VirtRegMap::isRef)) {
2366 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2367 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2368 "Src hasn't been allocated yet?");
2370 if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
2371 Spills, RegKills, KillOps, TRI)) {
2372 NextMII = llvm::next(MII);
2374 goto ProcessNextInst;
2377 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2378 // this as a potentially dead store in case there is a subsequent
2379 // store into the stack slot without a read from it.
2380 MaybeDeadStores[StackSlot] = &MI;
2382 // If the stack slot value was previously available in some other
2383 // register, change it now. Otherwise, make the register
2384 // available in PhysReg.
2385 Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2391 // Process all of the spilled defs.
2392 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2393 MachineOperand &MO = MI.getOperand(i);
2394 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2397 unsigned VirtReg = MO.getReg();
2398 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2399 // Check to see if this is a noop copy. If so, eliminate the
2400 // instruction before considering the dest reg to be changed.
2401 // Also check if it's copying from an "undef", if so, we can't
2402 // eliminate this or else the undef marker is lost and it will
2403 // confuses the scavenger. This is extremely rare.
2404 unsigned Src, Dst, SrcSR, DstSR;
2405 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst &&
2406 !MI.findRegisterUseOperand(Src)->isUndef()) {
2408 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2409 SmallVector<unsigned, 2> KillRegs;
2410 InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
2411 if (MO.isDead() && !KillRegs.empty()) {
2412 // Source register or an implicit super/sub-register use is killed.
2413 assert(KillRegs[0] == Dst ||
2414 TRI->isSubRegister(KillRegs[0], Dst) ||
2415 TRI->isSuperRegister(KillRegs[0], Dst));
2416 // Last def is now dead.
2417 TransferDeadness(Src, RegKills, KillOps);
2419 VRM->RemoveMachineInstrFromMaps(&MI);
2422 Spills.disallowClobberPhysReg(VirtReg);
2423 goto ProcessNextInst;
2426 // If it's not a no-op copy, it clobbers the value in the destreg.
2427 Spills.ClobberPhysReg(VirtReg);
2428 ReusedOperands.markClobbered(VirtReg);
2430 // Check to see if this instruction is a load from a stack slot into
2431 // a register. If so, this provides the stack slot value in the reg.
2433 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2434 assert(DestReg == VirtReg && "Unknown load situation!");
2436 // If it is a folded reference, then it's not safe to clobber.
2437 bool Folded = FoldedSS.count(FrameIdx);
2438 // Otherwise, if it wasn't available, remember that it is now!
2439 Spills.addAvailable(FrameIdx, DestReg, !Folded);
2440 goto ProcessNextInst;
2446 unsigned SubIdx = MO.getSubReg();
2447 bool DoReMat = VRM->isReMaterialized(VirtReg);
2449 ReMatDefs.insert(&MI);
2451 // The only vregs left are stack slot definitions.
2452 int StackSlot = VRM->getStackSlot(VirtReg);
2453 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
2455 // If this def is part of a two-address operand, make sure to execute
2456 // the store from the correct physical register.
2459 if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2460 PhysReg = MI.getOperand(TiedOp).getReg();
2462 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2463 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2464 "Can't find corresponding super-register!");
2468 PhysReg = VRM->getPhys(VirtReg);
2469 if (ReusedOperands.isClobbered(PhysReg)) {
2470 // Another def has taken the assigned physreg. It must have been a
2471 // use&def which got it due to reuse. Undo the reuse!
2472 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2473 Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2477 assert(PhysReg && "VR not assigned a physical register?");
2478 MRI->setPhysRegUsed(PhysReg);
2479 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2480 ReusedOperands.markClobbered(RReg);
2481 MI.getOperand(i).setReg(RReg);
2482 MI.getOperand(i).setSubReg(0);
2485 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2486 SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
2487 LastStore, Spills, ReMatDefs, RegKills, KillOps);
2488 NextMII = llvm::next(MII);
2490 // Check to see if this is a noop copy. If so, eliminate the
2491 // instruction before considering the dest reg to be changed.
2493 unsigned Src, Dst, SrcSR, DstSR;
2494 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
2496 DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2497 InvalidateKills(MI, TRI, RegKills, KillOps);
2498 VRM->RemoveMachineInstrFromMaps(&MI);
2501 UpdateKills(*LastStore, TRI, RegKills, KillOps);
2502 goto ProcessNextInst;
2508 // Delete dead instructions without side effects.
2509 if (!Erased && !BackTracked && isSafeToDelete(MI)) {
2510 InvalidateKills(MI, TRI, RegKills, KillOps);
2511 VRM->RemoveMachineInstrFromMaps(&MI);
2516 DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
2517 if (!Erased && !BackTracked) {
2518 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2519 UpdateKills(*II, TRI, RegKills, KillOps);
2526 llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2527 switch (RewriterOpt) {
2528 default: llvm_unreachable("Unreachable!");
2530 return new LocalRewriter();
2532 return new TrivialRewriter();