1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "llvm/Function.h"
13 #include "llvm/CodeGen/MachineFrameInfo.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/ErrorHandling.h"
19 #include "llvm/Support/raw_ostream.h"
20 #include "llvm/Target/TargetInstrInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/ADT/DepthFirstIterator.h"
23 #include "llvm/ADT/Statistic.h"
27 STATISTIC(NumDSE , "Number of dead stores elided");
28 STATISTIC(NumDSS , "Number of dead spill slots removed");
29 STATISTIC(NumCommutes, "Number of instructions commuted");
30 STATISTIC(NumDRM , "Number of re-materializable defs elided");
31 STATISTIC(NumStores , "Number of stores added");
32 STATISTIC(NumPSpills , "Number of physical register spills");
33 STATISTIC(NumOmitted , "Number of reloads omited");
34 STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
35 STATISTIC(NumCopified, "Number of available reloads turned into copies");
36 STATISTIC(NumReMats , "Number of re-materialization");
37 STATISTIC(NumLoads , "Number of loads added");
38 STATISTIC(NumReused , "Number of values reused");
39 STATISTIC(NumDCE , "Number of copies elided");
40 STATISTIC(NumSUnfold , "Number of stores unfolded");
41 STATISTIC(NumModRefUnfold, "Number of modref unfolded");
44 enum RewriterName { local, trivial };
47 static cl::opt<RewriterName>
48 RewriterOpt("rewriter",
49 cl::desc("Rewriter to use: (default: local)"),
51 cl::values(clEnumVal(local, "local rewriter"),
52 clEnumVal(trivial, "trivial rewriter"),
57 ScheduleSpills("schedule-spills",
58 cl::desc("Schedule spill code"),
61 VirtRegRewriter::~VirtRegRewriter() {}
65 /// This class is intended for use with the new spilling framework only. It
66 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
68 struct TrivialRewriter : public VirtRegRewriter {
70 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
72 DEBUG(errs() << "********** REWRITE MACHINE CODE **********\n");
73 DEBUG(errs() << "********** Function: "
74 << MF.getFunction()->getName() << '\n');
75 DEBUG(errs() << "**** Machine Instrs"
76 << "(NOTE! Does not include spills and reloads!) ****\n");
79 MachineRegisterInfo *mri = &MF.getRegInfo();
83 for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
84 liItr != liEnd; ++liItr) {
86 if (TargetRegisterInfo::isVirtualRegister(liItr->first)) {
87 if (VRM.hasPhys(liItr->first)) {
88 unsigned preg = VRM.getPhys(liItr->first);
89 mri->replaceRegWith(liItr->first, preg);
90 mri->setPhysRegUsed(preg);
95 if (!liItr->second->empty()) {
96 mri->setPhysRegUsed(liItr->first);
102 DEBUG(errs() << "**** Post Machine Instrs ****\n");
112 // ************************************************************************ //
116 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
117 /// from top down, keep track of which spill slots or remat are available in
120 /// Note that not all physregs are created equal here. In particular, some
121 /// physregs are reloads that we are allowed to clobber or ignore at any time.
122 /// Other physregs are values that the register allocated program is using
123 /// that we cannot CHANGE, but we can read if we like. We keep track of this
124 /// on a per-stack-slot / remat id basis as the low bit in the value of the
125 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
126 /// this bit and addAvailable sets it if.
127 class AvailableSpills {
128 const TargetRegisterInfo *TRI;
129 const TargetInstrInfo *TII;
131 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
132 // or remat'ed virtual register values that are still available, due to
133 // being loaded or stored to, but not invalidated yet.
134 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
136 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
137 // indicating which stack slot values are currently held by a physreg. This
138 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
139 // physreg is modified.
140 std::multimap<unsigned, int> PhysRegsAvailable;
142 void disallowClobberPhysRegOnly(unsigned PhysReg);
144 void ClobberPhysRegOnly(unsigned PhysReg);
146 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
147 : TRI(tri), TII(tii) {
150 /// clear - Reset the state.
152 SpillSlotsOrReMatsAvailable.clear();
153 PhysRegsAvailable.clear();
156 const TargetRegisterInfo *getRegInfo() const { return TRI; }
158 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
159 /// available in a physical register, return that PhysReg, otherwise
161 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
162 std::map<int, unsigned>::const_iterator I =
163 SpillSlotsOrReMatsAvailable.find(Slot);
164 if (I != SpillSlotsOrReMatsAvailable.end()) {
165 return I->second >> 1; // Remove the CanClobber bit.
170 /// addAvailable - Mark that the specified stack slot / remat is available
171 /// in the specified physreg. If CanClobber is true, the physreg can be
172 /// modified at any time without changing the semantics of the program.
173 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
174 // If this stack slot is thought to be available in some other physreg,
175 // remove its record.
176 ModifyStackSlotOrReMat(SlotOrReMat);
178 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
179 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
180 (unsigned)CanClobber;
182 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
183 DEBUG(errs() << "Remembering RM#"
184 << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
186 DEBUG(errs() << "Remembering SS#" << SlotOrReMat);
187 DEBUG(errs() << " in physreg " << TRI->getName(Reg) << "\n");
190 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
191 /// the value of the specified stackslot register if it desires. The
192 /// specified stack slot must be available in a physreg for this query to
194 bool canClobberPhysRegForSS(int SlotOrReMat) const {
195 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
196 "Value not available!");
197 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
200 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
201 /// physical register where values for some stack slot(s) might be
203 bool canClobberPhysReg(unsigned PhysReg) const {
204 std::multimap<unsigned, int>::const_iterator I =
205 PhysRegsAvailable.lower_bound(PhysReg);
206 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
207 int SlotOrReMat = I->second;
209 if (!canClobberPhysRegForSS(SlotOrReMat))
215 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
216 /// stackslot register. The register is still available but is no longer
217 /// allowed to be modifed.
218 void disallowClobberPhysReg(unsigned PhysReg);
220 /// ClobberPhysReg - This is called when the specified physreg changes
221 /// value. We use this to invalidate any info about stuff that lives in
222 /// it and any of its aliases.
223 void ClobberPhysReg(unsigned PhysReg);
225 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
226 /// slot changes. This removes information about which register the
227 /// previous value for this slot lives in (as the previous value is dead
229 void ModifyStackSlotOrReMat(int SlotOrReMat);
231 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
232 /// into the specified MBB. Add available physical registers as potential
233 /// live-in's. If they are reused in the MBB, they will be added to the
234 /// live-in set to make register scavenger and post-allocation scheduler.
235 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
236 std::vector<MachineOperand*> &KillOps);
241 // ************************************************************************ //
243 // Given a location where a reload of a spilled register or a remat of
244 // a constant is to be inserted, attempt to find a safe location to
245 // insert the load at an earlier point in the basic-block, to hide
246 // latency of the load and to avoid address-generation interlock
248 static MachineBasicBlock::iterator
249 ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
250 MachineBasicBlock::iterator const Begin,
252 const TargetRegisterInfo *TRI,
255 const TargetInstrInfo *TII,
256 const MachineFunction &MF)
261 // Spill backscheduling is of primary interest to addresses, so
262 // don't do anything if the register isn't in the register class
263 // used for pointers.
265 const TargetLowering *TL = MF.getTarget().getTargetLowering();
267 if (!TL->isTypeLegal(TL->getPointerTy()))
268 // Believe it or not, this is true on PIC16.
271 const TargetRegisterClass *ptrRegClass =
272 TL->getRegClassFor(TL->getPointerTy());
273 if (!ptrRegClass->contains(PhysReg))
276 // Scan upwards through the preceding instructions. If an instruction doesn't
277 // reference the stack slot or the register we're loading, we can
278 // backschedule the reload up past it.
279 MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
280 while (NewInsertLoc != Begin) {
281 MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
282 for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
283 MachineOperand &Op = Prev->getOperand(i);
284 if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
287 if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
288 Prev->findRegisterDefOperand(PhysReg))
290 for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
291 if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
292 Prev->findRegisterDefOperand(*Alias))
298 // If we made it to the beginning of the block, turn around and move back
299 // down just past any existing reloads. They're likely to be reloads/remats
300 // for instructions earlier than what our current reload/remat is for, so
301 // they should be scheduled earlier.
302 if (NewInsertLoc == Begin) {
304 while (InsertLoc != NewInsertLoc &&
305 (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
306 TII->isTriviallyReMaterializable(NewInsertLoc)))
315 // ReusedOp - For each reused operand, we keep track of a bit of information,
316 // in case we need to rollback upon processing a new operand. See comments
319 // The MachineInstr operand that reused an available value.
322 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
323 unsigned StackSlotOrReMat;
325 // PhysRegReused - The physical register the value was available in.
326 unsigned PhysRegReused;
328 // AssignedPhysReg - The physreg that was assigned for use by the reload.
329 unsigned AssignedPhysReg;
331 // VirtReg - The virtual register itself.
334 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
336 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
337 AssignedPhysReg(apr), VirtReg(vreg) {}
340 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
341 /// is reused instead of reloaded.
344 std::vector<ReusedOp> Reuses;
345 BitVector PhysRegsClobbered;
347 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
348 PhysRegsClobbered.resize(tri->getNumRegs());
351 bool hasReuses() const {
352 return !Reuses.empty();
355 /// addReuse - If we choose to reuse a virtual register that is already
356 /// available instead of reloading it, remember that we did so.
357 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
358 unsigned PhysRegReused, unsigned AssignedPhysReg,
360 // If the reload is to the assigned register anyway, no undo will be
362 if (PhysRegReused == AssignedPhysReg) return;
364 // Otherwise, remember this.
365 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
366 AssignedPhysReg, VirtReg));
369 void markClobbered(unsigned PhysReg) {
370 PhysRegsClobbered.set(PhysReg);
373 bool isClobbered(unsigned PhysReg) const {
374 return PhysRegsClobbered.test(PhysReg);
377 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
378 /// is some other operand that is using the specified register, either pick
379 /// a new register to use, or evict the previous reload and use this reg.
380 unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
381 MachineFunction &MF, MachineInstr *MI,
382 AvailableSpills &Spills,
383 std::vector<MachineInstr*> &MaybeDeadStores,
384 SmallSet<unsigned, 8> &Rejected,
386 std::vector<MachineOperand*> &KillOps,
389 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
390 /// 'Rejected' set to remember which registers have been considered and
391 /// rejected for the reload. This avoids infinite looping in case like
394 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
395 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
397 /// sees r1 is taken by t2, tries t2's reload register r0
398 /// sees r0 is taken by t3, tries t3's reload register r1
399 /// sees r1 is taken by t2, tries t2's reload register r0 ...
400 unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
401 AvailableSpills &Spills,
402 std::vector<MachineInstr*> &MaybeDeadStores,
404 std::vector<MachineOperand*> &KillOps,
406 SmallSet<unsigned, 8> Rejected;
407 MachineFunction &MF = *MI->getParent()->getParent();
408 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
409 return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
410 Rejected, RegKills, KillOps, VRM);
416 // ****************** //
417 // Utility Functions //
418 // ****************** //
420 /// findSinglePredSuccessor - Return via reference a vector of machine basic
421 /// blocks each of which is a successor of the specified BB and has no other
423 static void findSinglePredSuccessor(MachineBasicBlock *MBB,
424 SmallVectorImpl<MachineBasicBlock *> &Succs) {
425 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
426 SE = MBB->succ_end(); SI != SE; ++SI) {
427 MachineBasicBlock *SuccMBB = *SI;
428 if (SuccMBB->pred_size() == 1)
429 Succs.push_back(SuccMBB);
433 /// InvalidateKill - Invalidate register kill information for a specific
434 /// register. This also unsets the kills marker on the last kill operand.
435 static void InvalidateKill(unsigned Reg,
436 const TargetRegisterInfo* TRI,
438 std::vector<MachineOperand*> &KillOps) {
440 KillOps[Reg]->setIsKill(false);
441 // KillOps[Reg] might be a def of a super-register.
442 unsigned KReg = KillOps[Reg]->getReg();
443 KillOps[KReg] = NULL;
444 RegKills.reset(KReg);
445 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
447 KillOps[*SR]->setIsKill(false);
455 /// InvalidateKills - MI is going to be deleted. If any of its operands are
456 /// marked kill, then invalidate the information.
457 static void InvalidateKills(MachineInstr &MI,
458 const TargetRegisterInfo* TRI,
460 std::vector<MachineOperand*> &KillOps,
461 SmallVector<unsigned, 2> *KillRegs = NULL) {
462 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
463 MachineOperand &MO = MI.getOperand(i);
464 if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
466 unsigned Reg = MO.getReg();
467 if (TargetRegisterInfo::isVirtualRegister(Reg))
470 KillRegs->push_back(Reg);
471 assert(Reg < KillOps.size());
472 if (KillOps[Reg] == &MO) {
475 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
485 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
486 /// (since its spill instruction is removed), mark it isDead. Also checks if
487 /// the def MI has other definition operands that are not dead. Returns it by
489 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
490 MachineInstr &NewDef, unsigned Reg,
492 const TargetRegisterInfo *TRI) {
493 // Due to remat, it's possible this reg isn't being reused. That is,
494 // the def of this reg (by prev MI) is now dead.
495 MachineInstr *DefMI = I;
496 MachineOperand *DefOp = NULL;
497 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
498 MachineOperand &MO = DefMI->getOperand(i);
499 if (!MO.isReg() || !MO.isDef() || !MO.isKill() || MO.isUndef())
501 if (MO.getReg() == Reg)
503 else if (!MO.isDead())
509 bool FoundUse = false, Done = false;
510 MachineBasicBlock::iterator E = &NewDef;
512 for (; !Done && I != E; ++I) {
513 MachineInstr *NMI = I;
514 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
515 MachineOperand &MO = NMI->getOperand(j);
516 if (!MO.isReg() || MO.getReg() == 0 ||
517 (MO.getReg() != Reg && !TRI->isSubRegister(Reg, MO.getReg())))
521 Done = true; // Stop after scanning all the operands of this MI.
532 /// UpdateKills - Track and update kill info. If a MI reads a register that is
533 /// marked kill, then it must be due to register reuse. Transfer the kill info
535 static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
537 std::vector<MachineOperand*> &KillOps) {
538 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
539 MachineOperand &MO = MI.getOperand(i);
540 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
542 unsigned Reg = MO.getReg();
546 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
547 // That can't be right. Register is killed but not re-defined and it's
548 // being reused. Let's fix that.
549 KillOps[Reg]->setIsKill(false);
550 // KillOps[Reg] might be a def of a super-register.
551 unsigned KReg = KillOps[Reg]->getReg();
552 KillOps[KReg] = NULL;
553 RegKills.reset(KReg);
555 // Must be a def of a super-register. Its other sub-regsters are no
556 // longer killed as well.
557 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
562 // Check for subreg kills as well.
568 // = d4 <avoiding reload>
569 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
571 if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI) {
572 KillOps[SReg]->setIsKill(false);
573 unsigned KReg = KillOps[SReg]->getReg();
574 KillOps[KReg] = NULL;
575 RegKills.reset(KReg);
577 for (const unsigned *SSR = TRI->getSubRegisters(KReg); *SSR; ++SSR) {
578 KillOps[*SSR] = NULL;
579 RegKills.reset(*SSR);
588 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
595 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
596 const MachineOperand &MO = MI.getOperand(i);
597 if (!MO.isReg() || !MO.isDef())
599 unsigned Reg = MO.getReg();
602 // It also defines (or partially define) aliases.
603 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
607 for (const unsigned *SR = TRI->getSuperRegisters(Reg); *SR; ++SR) {
614 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
616 static void ReMaterialize(MachineBasicBlock &MBB,
617 MachineBasicBlock::iterator &MII,
618 unsigned DestReg, unsigned Reg,
619 const TargetInstrInfo *TII,
620 const TargetRegisterInfo *TRI,
622 MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
624 const TargetInstrDesc &TID = ReMatDefMI->getDesc();
625 assert(TID.getNumDefs() == 1 &&
626 "Don't know how to remat instructions that define > 1 values!");
628 TII->reMaterialize(MBB, MII, DestReg,
629 ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI);
630 MachineInstr *NewMI = prior(MII);
631 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
632 MachineOperand &MO = NewMI->getOperand(i);
633 if (!MO.isReg() || MO.getReg() == 0)
635 unsigned VirtReg = MO.getReg();
636 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
639 unsigned SubIdx = MO.getSubReg();
640 unsigned Phys = VRM.getPhys(VirtReg);
641 assert(Phys && "Virtual register is not assigned a register?");
642 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
649 /// findSuperReg - Find the SubReg's super-register of given register class
650 /// where its SubIdx sub-register is SubReg.
651 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
652 unsigned SubIdx, const TargetRegisterInfo *TRI) {
653 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
656 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
662 // ******************************** //
663 // Available Spills Implementation //
664 // ******************************** //
666 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
667 /// stackslot register. The register is still available but is no longer
668 /// allowed to be modifed.
669 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
670 std::multimap<unsigned, int>::iterator I =
671 PhysRegsAvailable.lower_bound(PhysReg);
672 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
673 int SlotOrReMat = I->second;
675 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
676 "Bidirectional map mismatch!");
677 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
678 DEBUG(errs() << "PhysReg " << TRI->getName(PhysReg)
679 << " copied, it is available for use but can no longer be modified\n");
683 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
684 /// stackslot register and its aliases. The register and its aliases may
685 /// still available but is no longer allowed to be modifed.
686 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
687 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
688 disallowClobberPhysRegOnly(*AS);
689 disallowClobberPhysRegOnly(PhysReg);
692 /// ClobberPhysRegOnly - This is called when the specified physreg changes
693 /// value. We use this to invalidate any info about stuff we thing lives in it.
694 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
695 std::multimap<unsigned, int>::iterator I =
696 PhysRegsAvailable.lower_bound(PhysReg);
697 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
698 int SlotOrReMat = I->second;
699 PhysRegsAvailable.erase(I++);
700 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
701 "Bidirectional map mismatch!");
702 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
703 DEBUG(errs() << "PhysReg " << TRI->getName(PhysReg)
704 << " clobbered, invalidating ");
705 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
706 DEBUG(errs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
708 DEBUG(errs() << "SS#" << SlotOrReMat << "\n");
712 /// ClobberPhysReg - This is called when the specified physreg changes
713 /// value. We use this to invalidate any info about stuff we thing lives in
714 /// it and any of its aliases.
715 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
716 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
717 ClobberPhysRegOnly(*AS);
718 ClobberPhysRegOnly(PhysReg);
721 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
722 /// into the specified MBB. Add available physical registers as potential
723 /// live-in's. If they are reused in the MBB, they will be added to the
724 /// live-in set to make register scavenger and post-allocation scheduler.
725 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
727 std::vector<MachineOperand*> &KillOps) {
728 std::set<unsigned> NotAvailable;
729 for (std::multimap<unsigned, int>::iterator
730 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
732 unsigned Reg = I->first;
733 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
734 // FIXME: A temporary workaround. We can't reuse available value if it's
735 // not safe to move the def of the virtual register's class. e.g.
736 // X86::RFP* register classes. Do not add it as a live-in.
737 if (!TII->isSafeToMoveRegClassDefs(RC))
738 // This is no longer available.
739 NotAvailable.insert(Reg);
742 InvalidateKill(Reg, TRI, RegKills, KillOps);
745 // Skip over the same register.
746 std::multimap<unsigned, int>::iterator NI = next(I);
747 while (NI != E && NI->first == Reg) {
753 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
754 E = NotAvailable.end(); I != E; ++I) {
756 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
758 ClobberPhysReg(*SubRegs);
762 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
763 /// slot changes. This removes information about which register the previous
764 /// value for this slot lives in (as the previous value is dead now).
765 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
766 std::map<int, unsigned>::iterator It =
767 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
768 if (It == SpillSlotsOrReMatsAvailable.end()) return;
769 unsigned Reg = It->second >> 1;
770 SpillSlotsOrReMatsAvailable.erase(It);
772 // This register may hold the value of multiple stack slots, only remove this
773 // stack slot from the set of values the register contains.
774 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
776 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
777 "Map inverse broken!");
778 if (I->second == SlotOrReMat) break;
780 PhysRegsAvailable.erase(I);
783 // ************************** //
784 // Reuse Info Implementation //
785 // ************************** //
787 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
788 /// is some other operand that is using the specified register, either pick
789 /// a new register to use, or evict the previous reload and use this reg.
790 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
793 MachineInstr *MI, AvailableSpills &Spills,
794 std::vector<MachineInstr*> &MaybeDeadStores,
795 SmallSet<unsigned, 8> &Rejected,
797 std::vector<MachineOperand*> &KillOps,
799 const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
800 const TargetRegisterInfo *TRI = Spills.getRegInfo();
802 if (Reuses.empty()) return PhysReg; // This is most often empty.
804 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
805 ReusedOp &Op = Reuses[ro];
806 // If we find some other reuse that was supposed to use this register
807 // exactly for its reload, we can change this reload to use ITS reload
808 // register. That is, unless its reload register has already been
809 // considered and subsequently rejected because it has also been reused
810 // by another operand.
811 if (Op.PhysRegReused == PhysReg &&
812 Rejected.count(Op.AssignedPhysReg) == 0 &&
813 RC->contains(Op.AssignedPhysReg)) {
814 // Yup, use the reload register that we didn't use before.
815 unsigned NewReg = Op.AssignedPhysReg;
816 Rejected.insert(PhysReg);
817 return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores, Rejected,
818 RegKills, KillOps, VRM);
820 // Otherwise, we might also have a problem if a previously reused
821 // value aliases the new register. If so, codegen the previous reload
823 unsigned PRRU = Op.PhysRegReused;
824 if (TRI->regsOverlap(PRRU, PhysReg)) {
825 // Okay, we found out that an alias of a reused register
826 // was used. This isn't good because it means we have
827 // to undo a previous reuse.
828 MachineBasicBlock *MBB = MI->getParent();
829 const TargetRegisterClass *AliasRC =
830 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
832 // Copy Op out of the vector and remove it, we're going to insert an
833 // explicit load for it.
835 Reuses.erase(Reuses.begin()+ro);
837 // MI may be using only a sub-register of PhysRegUsed.
838 unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
840 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
841 "A reuse cannot be a virtual register");
842 if (PRRU != RealPhysRegUsed) {
843 // What was the sub-register index?
845 for (SubIdx = 1; (SubReg = TRI->getSubReg(PRRU, SubIdx)); SubIdx++)
846 if (SubReg == RealPhysRegUsed)
848 assert(SubReg == RealPhysRegUsed &&
849 "Operand physreg is not a sub-register of PhysRegUsed");
852 // Ok, we're going to try to reload the assigned physreg into the
853 // slot that we were supposed to in the first place. However, that
854 // register could hold a reuse. Check to see if it conflicts or
855 // would prefer us to use a different register.
856 unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
857 MF, MI, Spills, MaybeDeadStores,
858 Rejected, RegKills, KillOps, VRM);
860 bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
861 int SSorRMId = DoReMat
862 ? VRM.getReMatId(NewOp.VirtReg) : NewOp.StackSlotOrReMat;
864 // Back-schedule reloads and remats.
865 MachineBasicBlock::iterator InsertLoc =
866 ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
867 DoReMat, SSorRMId, TII, MF);
870 ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
873 TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
874 NewOp.StackSlotOrReMat, AliasRC);
875 MachineInstr *LoadMI = prior(InsertLoc);
876 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
877 // Any stores to this stack slot are not dead anymore.
878 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
881 Spills.ClobberPhysReg(NewPhysReg);
882 Spills.ClobberPhysReg(NewOp.PhysRegReused);
884 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
885 MI->getOperand(NewOp.Operand).setReg(RReg);
886 MI->getOperand(NewOp.Operand).setSubReg(0);
888 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
889 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
890 DEBUG(errs() << '\t' << *prior(InsertLoc));
892 DEBUG(errs() << "Reuse undone!\n");
895 // Finally, PhysReg is now available, go ahead and use it.
903 // ************************************************************************ //
905 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
906 /// stack slot mod/ref. It also checks if it's possible to unfold the
907 /// instruction by having it define a specified physical register instead.
908 static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
909 const TargetInstrInfo *TII,
910 const TargetRegisterInfo *TRI,
912 if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
916 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
917 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
918 unsigned VirtReg = I->second.first;
919 VirtRegMap::ModRef MR = I->second.second;
920 if (MR & VirtRegMap::isModRef)
921 if (VRM.getStackSlot(VirtReg) == SS) {
922 Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
929 // Does the instruction uses a register that overlaps the scratch register?
930 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
931 MachineOperand &MO = MI.getOperand(i);
932 if (!MO.isReg() || MO.getReg() == 0)
934 unsigned Reg = MO.getReg();
935 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
936 if (!VRM.hasPhys(Reg))
938 Reg = VRM.getPhys(Reg);
940 if (TRI->regsOverlap(PhysReg, Reg))
946 /// FindFreeRegister - Find a free register of a given register class by looking
947 /// at (at most) the last two machine instructions.
948 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
949 MachineBasicBlock &MBB,
950 const TargetRegisterClass *RC,
951 const TargetRegisterInfo *TRI,
952 BitVector &AllocatableRegs) {
953 BitVector Defs(TRI->getNumRegs());
954 BitVector Uses(TRI->getNumRegs());
955 SmallVector<unsigned, 4> LocalUses;
956 SmallVector<unsigned, 4> Kills;
958 // Take a look at 2 instructions at most.
959 for (unsigned Count = 0; Count < 2; ++Count) {
960 if (MII == MBB.begin())
962 MachineInstr *PrevMI = prior(MII);
963 for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
964 MachineOperand &MO = PrevMI->getOperand(i);
965 if (!MO.isReg() || MO.getReg() == 0)
967 unsigned Reg = MO.getReg();
970 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
973 LocalUses.push_back(Reg);
974 if (MO.isKill() && AllocatableRegs[Reg])
975 Kills.push_back(Reg);
979 for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
980 unsigned Kill = Kills[i];
981 if (!Defs[Kill] && !Uses[Kill] &&
982 TRI->getPhysicalRegisterRegClass(Kill) == RC)
985 for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
986 unsigned Reg = LocalUses[i];
988 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
999 void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg) {
1000 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1001 MachineOperand &MO = MI->getOperand(i);
1002 if (MO.isReg() && MO.getReg() == VirtReg)
1009 bool operator()(const std::pair<MachineInstr*, int> &A,
1010 const std::pair<MachineInstr*, int> &B) {
1011 return A.second < B.second;
1016 // ***************************** //
1017 // Local Spiller Implementation //
1018 // ***************************** //
1022 class LocalRewriter : public VirtRegRewriter {
1023 MachineRegisterInfo *RegInfo;
1024 const TargetRegisterInfo *TRI;
1025 const TargetInstrInfo *TII;
1026 BitVector AllocatableRegs;
1027 DenseMap<MachineInstr*, unsigned> DistanceMap;
1030 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
1031 LiveIntervals* LIs) {
1032 RegInfo = &MF.getRegInfo();
1033 TRI = MF.getTarget().getRegisterInfo();
1034 TII = MF.getTarget().getInstrInfo();
1035 AllocatableRegs = TRI->getAllocatableSet(MF);
1036 DEBUG(errs() << "\n**** Local spiller rewriting function '"
1037 << MF.getFunction()->getName() << "':\n");
1038 DEBUG(errs() << "**** Machine Instrs (NOTE! Does not include spills and"
1039 " reloads!) ****\n");
1042 // Spills - Keep track of which spilled values are available in physregs
1043 // so that we can choose to reuse the physregs instead of emitting
1044 // reloads. This is usually refreshed per basic block.
1045 AvailableSpills Spills(TRI, TII);
1047 // Keep track of kill information.
1048 BitVector RegKills(TRI->getNumRegs());
1049 std::vector<MachineOperand*> KillOps;
1050 KillOps.resize(TRI->getNumRegs(), NULL);
1052 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1053 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
1054 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
1056 // Traverse the basic blocks depth first.
1057 MachineBasicBlock *Entry = MF.begin();
1058 SmallPtrSet<MachineBasicBlock*,16> Visited;
1059 for (df_ext_iterator<MachineBasicBlock*,
1060 SmallPtrSet<MachineBasicBlock*,16> >
1061 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1063 MachineBasicBlock *MBB = *DFI;
1064 if (!EarlyVisited.count(MBB))
1065 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
1067 // If this MBB is the only predecessor of a successor. Keep the
1068 // availability information and visit it next.
1070 // Keep visiting single predecessor successor as long as possible.
1071 SinglePredSuccs.clear();
1072 findSinglePredSuccessor(MBB, SinglePredSuccs);
1073 if (SinglePredSuccs.empty())
1076 // FIXME: More than one successors, each of which has MBB has
1077 // the only predecessor.
1078 MBB = SinglePredSuccs[0];
1079 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
1080 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
1081 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
1086 // Clear the availability info.
1090 DEBUG(errs() << "**** Post Machine Instrs ****\n");
1093 // Mark unused spill slots.
1094 MachineFrameInfo *MFI = MF.getFrameInfo();
1095 int SS = VRM.getLowSpillSlot();
1096 if (SS != VirtRegMap::NO_STACK_SLOT)
1097 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
1098 if (!VRM.isSpillSlotUsed(SS)) {
1099 MFI->RemoveStackObject(SS);
1108 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1109 /// a scratch register is available.
1110 /// xorq %r12<kill>, %r13
1111 /// addq %rax, -184(%rbp)
1112 /// addq %r13, -184(%rbp)
1114 /// xorq %r12<kill>, %r13
1115 /// movq -184(%rbp), %r12
1118 /// movq %r12, -184(%rbp)
1119 bool OptimizeByUnfold2(unsigned VirtReg, int SS,
1120 MachineBasicBlock &MBB,
1121 MachineBasicBlock::iterator &MII,
1122 std::vector<MachineInstr*> &MaybeDeadStores,
1123 AvailableSpills &Spills,
1124 BitVector &RegKills,
1125 std::vector<MachineOperand*> &KillOps,
1128 MachineBasicBlock::iterator NextMII = next(MII);
1129 if (NextMII == MBB.end())
1132 if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
1135 // Now let's see if the last couple of instructions happens to have freed up
1137 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1138 unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs);
1142 MachineFunction &MF = *MBB.getParent();
1143 TRI = MF.getTarget().getRegisterInfo();
1144 MachineInstr &MI = *MII;
1145 if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM))
1148 // If the next instruction also folds the same SS modref and can be unfoled,
1149 // then it's worthwhile to issue a load from SS into the free register and
1150 // then unfold these instructions.
1151 if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM))
1154 // Back-schedule reloads and remats.
1155 ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF);
1157 // Load from SS to the spare physical register.
1158 TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC);
1159 // This invalidates Phys.
1160 Spills.ClobberPhysReg(PhysReg);
1161 // Remember it's available.
1162 Spills.addAvailable(SS, PhysReg);
1163 MaybeDeadStores[SS] = NULL;
1165 // Unfold current MI.
1166 SmallVector<MachineInstr*, 4> NewMIs;
1167 if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
1168 llvm_unreachable("Unable unfold the load / store folding instruction!");
1169 assert(NewMIs.size() == 1);
1170 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1171 VRM.transferRestorePts(&MI, NewMIs[0]);
1172 MII = MBB.insert(MII, NewMIs[0]);
1173 InvalidateKills(MI, TRI, RegKills, KillOps);
1174 VRM.RemoveMachineInstrFromMaps(&MI);
1178 // Unfold next instructions that fold the same SS.
1180 MachineInstr &NextMI = *NextMII;
1181 NextMII = next(NextMII);
1183 if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
1184 llvm_unreachable("Unable unfold the load / store folding instruction!");
1185 assert(NewMIs.size() == 1);
1186 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1187 VRM.transferRestorePts(&NextMI, NewMIs[0]);
1188 MBB.insert(NextMII, NewMIs[0]);
1189 InvalidateKills(NextMI, TRI, RegKills, KillOps);
1190 VRM.RemoveMachineInstrFromMaps(&NextMI);
1193 if (NextMII == MBB.end())
1195 } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM));
1197 // Store the value back into SS.
1198 TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC);
1199 MachineInstr *StoreMI = prior(NextMII);
1200 VRM.addSpillSlotUse(SS, StoreMI);
1201 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1206 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1207 /// instruction. e.g.
1209 /// movl %eax, -32(%ebp)
1210 /// movl -36(%ebp), %eax
1211 /// orl %eax, -32(%ebp)
1214 /// orl -36(%ebp), %eax
1215 /// mov %eax, -32(%ebp)
1216 /// This enables unfolding optimization for a subsequent instruction which will
1217 /// also eliminate the newly introduced store instruction.
1218 bool OptimizeByUnfold(MachineBasicBlock &MBB,
1219 MachineBasicBlock::iterator &MII,
1220 std::vector<MachineInstr*> &MaybeDeadStores,
1221 AvailableSpills &Spills,
1222 BitVector &RegKills,
1223 std::vector<MachineOperand*> &KillOps,
1225 MachineFunction &MF = *MBB.getParent();
1226 MachineInstr &MI = *MII;
1227 unsigned UnfoldedOpc = 0;
1228 unsigned UnfoldPR = 0;
1229 unsigned UnfoldVR = 0;
1230 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1231 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1232 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1233 // Only transform a MI that folds a single register.
1236 UnfoldVR = I->second.first;
1237 VirtRegMap::ModRef MR = I->second.second;
1238 // MI2VirtMap be can updated which invalidate the iterator.
1239 // Increment the iterator first.
1241 if (VRM.isAssignedReg(UnfoldVR))
1243 // If this reference is not a use, any previous store is now dead.
1244 // Otherwise, the store to this stack slot is not dead anymore.
1245 FoldedSS = VRM.getStackSlot(UnfoldVR);
1246 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1247 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1248 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1249 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1252 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1261 // Look for other unfolding opportunities.
1262 return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII,
1263 MaybeDeadStores, Spills, RegKills, KillOps, VRM);
1266 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1267 MachineOperand &MO = MI.getOperand(i);
1268 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1270 unsigned VirtReg = MO.getReg();
1271 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1273 if (VRM.isAssignedReg(VirtReg)) {
1274 unsigned PhysReg = VRM.getPhys(VirtReg);
1275 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1277 } else if (VRM.isReMaterialized(VirtReg))
1279 int SS = VRM.getStackSlot(VirtReg);
1280 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1282 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1286 if (VRM.hasPhys(VirtReg)) {
1287 PhysReg = VRM.getPhys(VirtReg);
1288 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1292 // Ok, we'll need to reload the value into a register which makes
1293 // it impossible to perform the store unfolding optimization later.
1294 // Let's see if it is possible to fold the load if the store is
1295 // unfolded. This allows us to perform the store unfolding
1297 SmallVector<MachineInstr*, 4> NewMIs;
1298 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1299 assert(NewMIs.size() == 1);
1300 MachineInstr *NewMI = NewMIs.back();
1302 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1304 SmallVector<unsigned, 1> Ops;
1306 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
1308 VRM.addSpillSlotUse(SS, FoldedMI);
1309 if (!VRM.hasPhys(UnfoldVR))
1310 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
1311 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1312 MII = MBB.insert(MII, FoldedMI);
1313 InvalidateKills(MI, TRI, RegKills, KillOps);
1314 VRM.RemoveMachineInstrFromMaps(&MI);
1316 MF.DeleteMachineInstr(NewMI);
1319 MF.DeleteMachineInstr(NewMI);
1326 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1327 /// where SrcReg is r1 and it is tied to r0. Return true if after
1328 /// commuting this instruction it will be r0 = op r2, r1.
1329 static bool CommuteChangesDestination(MachineInstr *DefMI,
1330 const TargetInstrDesc &TID,
1332 const TargetInstrInfo *TII,
1334 if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
1336 if (!DefMI->getOperand(1).isReg() ||
1337 DefMI->getOperand(1).getReg() != SrcReg)
1340 if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
1342 unsigned SrcIdx1, SrcIdx2;
1343 if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
1345 if (SrcIdx1 == 1 && SrcIdx2 == 2) {
1352 /// CommuteToFoldReload -
1355 /// r1 = op r1, r2<kill>
1358 /// If op is commutable and r2 is killed, then we can xform these to
1359 /// r2 = op r2, fi#1
1361 bool CommuteToFoldReload(MachineBasicBlock &MBB,
1362 MachineBasicBlock::iterator &MII,
1363 unsigned VirtReg, unsigned SrcReg, int SS,
1364 AvailableSpills &Spills,
1365 BitVector &RegKills,
1366 std::vector<MachineOperand*> &KillOps,
1367 const TargetRegisterInfo *TRI,
1369 if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
1372 MachineFunction &MF = *MBB.getParent();
1373 MachineInstr &MI = *MII;
1374 MachineBasicBlock::iterator DefMII = prior(MII);
1375 MachineInstr *DefMI = DefMII;
1376 const TargetInstrDesc &TID = DefMI->getDesc();
1378 if (DefMII != MBB.begin() &&
1379 TID.isCommutable() &&
1380 CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
1381 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1382 unsigned NewReg = NewDstMO.getReg();
1383 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1385 MachineInstr *ReloadMI = prior(DefMII);
1387 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1388 if (DestReg != SrcReg || FrameIdx != SS)
1390 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1394 if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1396 assert(DefMI->getOperand(DefIdx).isReg() &&
1397 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1399 // Now commute def instruction.
1400 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1403 SmallVector<unsigned, 1> Ops;
1404 Ops.push_back(NewDstIdx);
1405 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1406 // Not needed since foldMemoryOperand returns new MI.
1407 MF.DeleteMachineInstr(CommutedMI);
1411 VRM.addSpillSlotUse(SS, FoldedMI);
1412 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1413 // Insert new def MI and spill MI.
1414 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1415 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
1417 MachineInstr *StoreMI = MII;
1418 VRM.addSpillSlotUse(SS, StoreMI);
1419 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1420 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
1422 // Delete all 3 old instructions.
1423 InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
1424 VRM.RemoveMachineInstrFromMaps(ReloadMI);
1425 MBB.erase(ReloadMI);
1426 InvalidateKills(*DefMI, TRI, RegKills, KillOps);
1427 VRM.RemoveMachineInstrFromMaps(DefMI);
1429 InvalidateKills(MI, TRI, RegKills, KillOps);
1430 VRM.RemoveMachineInstrFromMaps(&MI);
1433 // If NewReg was previously holding value of some SS, it's now clobbered.
1434 // This has to be done now because it's a physical register. When this
1435 // instruction is re-visited, it's ignored.
1436 Spills.ClobberPhysReg(NewReg);
1445 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1446 /// the last store to the same slot is now dead. If so, remove the last store.
1447 void SpillRegToStackSlot(MachineBasicBlock &MBB,
1448 MachineBasicBlock::iterator &MII,
1449 int Idx, unsigned PhysReg, int StackSlot,
1450 const TargetRegisterClass *RC,
1451 bool isAvailable, MachineInstr *&LastStore,
1452 AvailableSpills &Spills,
1453 SmallSet<MachineInstr*, 4> &ReMatDefs,
1454 BitVector &RegKills,
1455 std::vector<MachineOperand*> &KillOps,
1458 MachineBasicBlock::iterator oldNextMII = next(MII);
1459 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
1460 MachineInstr *StoreMI = prior(oldNextMII);
1461 VRM.addSpillSlotUse(StackSlot, StoreMI);
1462 DEBUG(errs() << "Store:\t" << *StoreMI);
1464 // If there is a dead store to this stack slot, nuke it now.
1466 DEBUG(errs() << "Removed dead store:\t" << *LastStore);
1468 SmallVector<unsigned, 2> KillRegs;
1469 InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
1470 MachineBasicBlock::iterator PrevMII = LastStore;
1471 bool CheckDef = PrevMII != MBB.begin();
1474 VRM.RemoveMachineInstrFromMaps(LastStore);
1475 MBB.erase(LastStore);
1477 // Look at defs of killed registers on the store. Mark the defs
1478 // as dead since the store has been deleted and they aren't
1480 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1481 bool HasOtherDef = false;
1482 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
1483 MachineInstr *DeadDef = PrevMII;
1484 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1485 // FIXME: This assumes a remat def does not have side effects.
1486 VRM.RemoveMachineInstrFromMaps(DeadDef);
1495 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1496 // the last of multiple instructions is the actual store.
1497 LastStore = prior(oldNextMII);
1499 // If the stack slot value was previously available in some other
1500 // register, change it now. Otherwise, make the register available,
1502 Spills.ModifyStackSlotOrReMat(StackSlot);
1503 Spills.ClobberPhysReg(PhysReg);
1504 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1508 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1509 /// effect and all of its defs are dead.
1510 static bool isSafeToDelete(MachineInstr &MI) {
1511 const TargetInstrDesc &TID = MI.getDesc();
1512 if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
1513 TID.isCall() || TID.isBarrier() || TID.isReturn() ||
1514 TID.hasUnmodeledSideEffects())
1516 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1517 MachineOperand &MO = MI.getOperand(i);
1518 if (!MO.isReg() || !MO.getReg())
1520 if (MO.isDef() && !MO.isDead())
1522 if (MO.isUse() && MO.isKill())
1523 // FIXME: We can't remove kill markers or else the scavenger will assert.
1524 // An alternative is to add a ADD pseudo instruction to replace kill
1531 /// TransferDeadness - A identity copy definition is dead and it's being
1532 /// removed. Find the last def or use and mark it as dead / kill.
1533 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
1534 unsigned Reg, BitVector &RegKills,
1535 std::vector<MachineOperand*> &KillOps,
1537 SmallPtrSet<MachineInstr*, 4> Seens;
1538 SmallVector<std::pair<MachineInstr*, int>,8> Refs;
1539 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
1540 RE = RegInfo->reg_end(); RI != RE; ++RI) {
1541 MachineInstr *UDMI = &*RI;
1542 if (UDMI->getParent() != MBB)
1544 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1545 if (DI == DistanceMap.end() || DI->second > CurDist)
1547 if (Seens.insert(UDMI))
1548 Refs.push_back(std::make_pair(UDMI, DI->second));
1553 std::sort(Refs.begin(), Refs.end(), RefSorter());
1555 while (!Refs.empty()) {
1556 MachineInstr *LastUDMI = Refs.back().first;
1559 MachineOperand *LastUD = NULL;
1560 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1561 MachineOperand &MO = LastUDMI->getOperand(i);
1562 if (!MO.isReg() || MO.getReg() != Reg)
1564 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1566 if (LastUDMI->isRegTiedToDefOperand(i))
1569 if (LastUD->isDef()) {
1570 // If the instruction has no side effect, delete it and propagate
1571 // backward further. Otherwise, mark is dead and we are done.
1572 if (!isSafeToDelete(*LastUDMI)) {
1573 LastUD->setIsDead();
1576 VRM.RemoveMachineInstrFromMaps(LastUDMI);
1577 MBB->erase(LastUDMI);
1579 LastUD->setIsKill();
1581 KillOps[Reg] = LastUD;
1587 /// rewriteMBB - Keep track of which spills are available even after the
1588 /// register allocator is done with them. If possible, avid reloading vregs.
1589 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
1591 AvailableSpills &Spills, BitVector &RegKills,
1592 std::vector<MachineOperand*> &KillOps) {
1594 DEBUG(errs() << "\n**** Local spiller rewriting MBB '"
1595 << MBB.getBasicBlock()->getName() << "':\n");
1597 MachineFunction &MF = *MBB.getParent();
1599 // MaybeDeadStores - When we need to write a value back into a stack slot,
1600 // keep track of the inserted store. If the stack slot value is never read
1601 // (because the value was used from some available register, for example), and
1602 // subsequently stored to, the original store is dead. This map keeps track
1603 // of inserted stores that are not used. If we see a subsequent store to the
1604 // same stack slot, the original store is deleted.
1605 std::vector<MachineInstr*> MaybeDeadStores;
1606 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1608 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1609 SmallSet<MachineInstr*, 4> ReMatDefs;
1612 SmallSet<unsigned, 2> KilledMIRegs;
1615 KillOps.resize(TRI->getNumRegs(), NULL);
1618 DistanceMap.clear();
1619 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
1621 MachineBasicBlock::iterator NextMII = next(MII);
1623 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1624 bool Erased = false;
1625 bool BackTracked = false;
1626 if (OptimizeByUnfold(MBB, MII,
1627 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
1628 NextMII = next(MII);
1630 MachineInstr &MI = *MII;
1632 if (VRM.hasEmergencySpills(&MI)) {
1633 // Spill physical register(s) in the rare case the allocator has run out
1634 // of registers to allocate.
1635 SmallSet<int, 4> UsedSS;
1636 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
1637 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1638 unsigned PhysReg = EmSpills[i];
1639 const TargetRegisterClass *RC =
1640 TRI->getPhysicalRegisterRegClass(PhysReg);
1641 assert(RC && "Unable to determine register class!");
1642 int SS = VRM.getEmergencySpillSlot(RC);
1643 if (UsedSS.count(SS))
1644 llvm_unreachable("Need to spill more than one physical registers!");
1646 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
1647 MachineInstr *StoreMI = prior(MII);
1648 VRM.addSpillSlotUse(SS, StoreMI);
1650 // Back-schedule reloads and remats.
1651 MachineBasicBlock::iterator InsertLoc =
1652 ComputeReloadLoc(next(MII), MBB.begin(), PhysReg, TRI, false,
1655 TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC);
1657 MachineInstr *LoadMI = prior(InsertLoc);
1658 VRM.addSpillSlotUse(SS, LoadMI);
1660 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
1662 NextMII = next(MII);
1665 // Insert restores here if asked to.
1666 if (VRM.isRestorePt(&MI)) {
1667 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
1668 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1669 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1670 if (!VRM.getPreSplitReg(VirtReg))
1671 continue; // Split interval spilled again.
1672 unsigned Phys = VRM.getPhys(VirtReg);
1673 RegInfo->setPhysRegUsed(Phys);
1675 // Check if the value being restored if available. If so, it must be
1676 // from a predecessor BB that fallthrough into this BB. We do not
1682 // ... # r1 not clobbered
1685 bool DoReMat = VRM.isReMaterialized(VirtReg);
1686 int SSorRMId = DoReMat
1687 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1688 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1689 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1690 if (InReg == Phys) {
1691 // If the value is already available in the expected register, save
1692 // a reload / remat.
1694 DEBUG(errs() << "Reusing RM#"
1695 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1697 DEBUG(errs() << "Reusing SS#" << SSorRMId);
1698 DEBUG(errs() << " from physreg "
1699 << TRI->getName(InReg) << " for vreg"
1700 << VirtReg <<" instead of reloading into physreg "
1701 << TRI->getName(Phys) << '\n');
1704 } else if (InReg && InReg != Phys) {
1706 DEBUG(errs() << "Reusing RM#"
1707 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1709 DEBUG(errs() << "Reusing SS#" << SSorRMId);
1710 DEBUG(errs() << " from physreg "
1711 << TRI->getName(InReg) << " for vreg"
1712 << VirtReg <<" by copying it into physreg "
1713 << TRI->getName(Phys) << '\n');
1715 // If the reloaded / remat value is available in another register,
1716 // copy it to the desired register.
1718 // Back-schedule reloads and remats.
1719 MachineBasicBlock::iterator InsertLoc =
1720 ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
1723 TII->copyRegToReg(MBB, InsertLoc, Phys, InReg, RC, RC);
1725 // This invalidates Phys.
1726 Spills.ClobberPhysReg(Phys);
1727 // Remember it's available.
1728 Spills.addAvailable(SSorRMId, Phys);
1731 MachineInstr *CopyMI = prior(InsertLoc);
1732 CopyMI->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
1733 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1734 KillOpnd->setIsKill();
1735 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
1737 DEBUG(errs() << '\t' << *CopyMI);
1742 // Back-schedule reloads and remats.
1743 MachineBasicBlock::iterator InsertLoc =
1744 ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
1747 if (VRM.isReMaterialized(VirtReg)) {
1748 ReMaterialize(MBB, InsertLoc, Phys, VirtReg, TII, TRI, VRM);
1750 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1751 TII->loadRegFromStackSlot(MBB, InsertLoc, Phys, SSorRMId, RC);
1752 MachineInstr *LoadMI = prior(InsertLoc);
1753 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1755 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
1758 // This invalidates Phys.
1759 Spills.ClobberPhysReg(Phys);
1760 // Remember it's available.
1761 Spills.addAvailable(SSorRMId, Phys);
1763 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
1764 DEBUG(errs() << '\t' << *prior(MII));
1768 // Insert spills here if asked to.
1769 if (VRM.isSpillPt(&MI)) {
1770 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1771 VRM.getSpillPtSpills(&MI);
1772 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1773 unsigned VirtReg = SpillRegs[i].first;
1774 bool isKill = SpillRegs[i].second;
1775 if (!VRM.getPreSplitReg(VirtReg))
1776 continue; // Split interval spilled again.
1777 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1778 unsigned Phys = VRM.getPhys(VirtReg);
1779 int StackSlot = VRM.getStackSlot(VirtReg);
1780 MachineBasicBlock::iterator oldNextMII = next(MII);
1781 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1782 MachineInstr *StoreMI = prior(oldNextMII);
1783 VRM.addSpillSlotUse(StackSlot, StoreMI);
1784 DEBUG(errs() << "Store:\t" << *StoreMI);
1785 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1787 NextMII = next(MII);
1790 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1792 ReuseInfo ReusedOperands(MI, TRI);
1793 SmallVector<unsigned, 4> VirtUseOps;
1794 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1795 MachineOperand &MO = MI.getOperand(i);
1796 if (!MO.isReg() || MO.getReg() == 0)
1797 continue; // Ignore non-register operands.
1799 unsigned VirtReg = MO.getReg();
1800 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1801 // Ignore physregs for spilling, but remember that it is used by this
1803 RegInfo->setPhysRegUsed(VirtReg);
1807 // We want to process implicit virtual register uses first.
1808 if (MO.isImplicit())
1809 // If the virtual register is implicitly defined, emit a implicit_def
1810 // before so scavenger knows it's "defined".
1811 // FIXME: This is a horrible hack done the by register allocator to
1812 // remat a definition with virtual register operand.
1813 VirtUseOps.insert(VirtUseOps.begin(), i);
1815 VirtUseOps.push_back(i);
1818 // Process all of the spilled uses and all non spilled reg references.
1819 SmallVector<int, 2> PotentialDeadStoreSlots;
1820 KilledMIRegs.clear();
1821 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1822 unsigned i = VirtUseOps[j];
1823 MachineOperand &MO = MI.getOperand(i);
1824 unsigned VirtReg = MO.getReg();
1825 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1826 "Not a virtual register?");
1828 unsigned SubIdx = MO.getSubReg();
1829 if (VRM.isAssignedReg(VirtReg)) {
1830 // This virtual register was assigned a physreg!
1831 unsigned Phys = VRM.getPhys(VirtReg);
1832 RegInfo->setPhysRegUsed(Phys);
1834 ReusedOperands.markClobbered(Phys);
1835 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1836 MI.getOperand(i).setReg(RReg);
1837 MI.getOperand(i).setSubReg(0);
1838 if (VRM.isImplicitlyDefined(VirtReg))
1839 // FIXME: Is this needed?
1840 BuildMI(MBB, &MI, MI.getDebugLoc(),
1841 TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1845 // This virtual register is now known to be a spilled value.
1847 continue; // Handle defs in the loop below (handle use&def here though)
1849 bool AvoidReload = MO.isUndef();
1850 // Check if it is defined by an implicit def. It should not be spilled.
1851 // Note, this is for correctness reason. e.g.
1852 // 8 %reg1024<def> = IMPLICIT_DEF
1853 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1854 // The live range [12, 14) are not part of the r1024 live interval since
1855 // it's defined by an implicit def. It will not conflicts with live
1856 // interval of r1025. Now suppose both registers are spilled, you can
1857 // easily see a situation where both registers are reloaded before
1858 // the INSERT_SUBREG and both target registers that would overlap.
1859 bool DoReMat = VRM.isReMaterialized(VirtReg);
1860 int SSorRMId = DoReMat
1861 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1862 int ReuseSlot = SSorRMId;
1864 // Check to see if this stack slot is available.
1865 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1867 // If this is a sub-register use, make sure the reuse register is in the
1868 // right register class. For example, for x86 not all of the 32-bit
1869 // registers have accessible sub-registers.
1870 // Similarly so for EXTRACT_SUBREG. Consider this:
1872 // MOV32_mr fi#1, EDI
1874 // = EXTRACT_SUBREG fi#1
1875 // fi#1 is available in EDI, but it cannot be reused because it's not in
1876 // the right register file.
1877 if (PhysReg && !AvoidReload &&
1878 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1879 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1880 if (!RC->contains(PhysReg))
1884 if (PhysReg && !AvoidReload) {
1885 // This spilled operand might be part of a two-address operand. If this
1886 // is the case, then changing it will necessarily require changing the
1887 // def part of the instruction as well. However, in some cases, we
1888 // aren't allowed to modify the reused register. If none of these cases
1890 bool CanReuse = true;
1891 bool isTied = MI.isRegTiedToDefOperand(i);
1893 // Okay, we have a two address operand. We can reuse this physreg as
1894 // long as we are allowed to clobber the value and there isn't an
1895 // earlier def that has already clobbered the physreg.
1896 CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
1897 Spills.canClobberPhysReg(PhysReg);
1901 // If this stack slot value is already available, reuse it!
1902 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1903 DEBUG(errs() << "Reusing RM#"
1904 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
1906 DEBUG(errs() << "Reusing SS#" << ReuseSlot);
1907 DEBUG(errs() << " from physreg "
1908 << TRI->getName(PhysReg) << " for vreg"
1909 << VirtReg <<" instead of reloading into physreg "
1910 << TRI->getName(VRM.getPhys(VirtReg)) << '\n');
1911 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1912 MI.getOperand(i).setReg(RReg);
1913 MI.getOperand(i).setSubReg(0);
1915 // The only technical detail we have is that we don't know that
1916 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1917 // later in the instruction. In particular, consider 'op V1, V2'.
1918 // If V1 is available in physreg R0, we would choose to reuse it
1919 // here, instead of reloading it into the register the allocator
1920 // indicated (say R1). However, V2 might have to be reloaded
1921 // later, and it might indicate that it needs to live in R0. When
1922 // this occurs, we need to have information available that
1923 // indicates it is safe to use R1 for the reload instead of R0.
1925 // To further complicate matters, we might conflict with an alias,
1926 // or R0 and R1 might not be compatible with each other. In this
1927 // case, we actually insert a reload for V1 in R1, ensuring that
1928 // we can get at R0 or its alias.
1929 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1930 VRM.getPhys(VirtReg), VirtReg);
1932 // Only mark it clobbered if this is a use&def operand.
1933 ReusedOperands.markClobbered(PhysReg);
1936 if (MI.getOperand(i).isKill() &&
1937 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1939 // The store of this spilled value is potentially dead, but we
1940 // won't know for certain until we've confirmed that the re-use
1941 // above is valid, which means waiting until the other operands
1942 // are processed. For now we just track the spill slot, we'll
1943 // remove it after the other operands are processed if valid.
1945 PotentialDeadStoreSlots.push_back(ReuseSlot);
1948 // Mark is isKill if it's there no other uses of the same virtual
1949 // register and it's not a two-address operand. IsKill will be
1950 // unset if reg is reused.
1951 if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
1952 MI.getOperand(i).setIsKill();
1953 KilledMIRegs.insert(VirtReg);
1959 // Otherwise we have a situation where we have a two-address instruction
1960 // whose mod/ref operand needs to be reloaded. This reload is already
1961 // available in some register "PhysReg", but if we used PhysReg as the
1962 // operand to our 2-addr instruction, the instruction would modify
1963 // PhysReg. This isn't cool if something later uses PhysReg and expects
1964 // to get its initial value.
1966 // To avoid this problem, and to avoid doing a load right after a store,
1967 // we emit a copy from PhysReg into the designated register for this
1969 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1970 assert(DesignatedReg && "Must map virtreg to physreg!");
1972 // Note that, if we reused a register for a previous operand, the
1973 // register we want to reload into might not actually be
1974 // available. If this occurs, use the register indicated by the
1976 if (ReusedOperands.hasReuses())
1977 DesignatedReg = ReusedOperands.GetRegForReload(VirtReg,
1979 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1981 // If the mapped designated register is actually the physreg we have
1982 // incoming, we don't need to inserted a dead copy.
1983 if (DesignatedReg == PhysReg) {
1984 // If this stack slot value is already available, reuse it!
1985 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1986 DEBUG(errs() << "Reusing RM#"
1987 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
1989 DEBUG(errs() << "Reusing SS#" << ReuseSlot);
1990 DEBUG(errs() << " from physreg " << TRI->getName(PhysReg)
1991 << " for vreg" << VirtReg
1992 << " instead of reloading into same physreg.\n");
1993 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1994 MI.getOperand(i).setReg(RReg);
1995 MI.getOperand(i).setSubReg(0);
1996 ReusedOperands.markClobbered(RReg);
2001 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
2002 RegInfo->setPhysRegUsed(DesignatedReg);
2003 ReusedOperands.markClobbered(DesignatedReg);
2005 // Back-schedule reloads and remats.
2006 MachineBasicBlock::iterator InsertLoc =
2007 ComputeReloadLoc(&MI, MBB.begin(), PhysReg, TRI, DoReMat,
2010 TII->copyRegToReg(MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
2012 MachineInstr *CopyMI = prior(InsertLoc);
2013 CopyMI->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
2014 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
2016 // This invalidates DesignatedReg.
2017 Spills.ClobberPhysReg(DesignatedReg);
2019 Spills.addAvailable(ReuseSlot, DesignatedReg);
2021 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
2022 MI.getOperand(i).setReg(RReg);
2023 MI.getOperand(i).setSubReg(0);
2024 DEBUG(errs() << '\t' << *prior(MII));
2029 // Otherwise, reload it and remember that we have it.
2030 PhysReg = VRM.getPhys(VirtReg);
2031 assert(PhysReg && "Must map virtreg to physreg!");
2033 // Note that, if we reused a register for a previous operand, the
2034 // register we want to reload into might not actually be
2035 // available. If this occurs, use the register indicated by the
2037 if (ReusedOperands.hasReuses())
2038 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2039 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
2041 RegInfo->setPhysRegUsed(PhysReg);
2042 ReusedOperands.markClobbered(PhysReg);
2046 // Back-schedule reloads and remats.
2047 MachineBasicBlock::iterator InsertLoc =
2048 ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, DoReMat,
2052 ReMaterialize(MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, VRM);
2054 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
2055 TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SSorRMId, RC);
2056 MachineInstr *LoadMI = prior(InsertLoc);
2057 VRM.addSpillSlotUse(SSorRMId, LoadMI);
2059 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
2061 // This invalidates PhysReg.
2062 Spills.ClobberPhysReg(PhysReg);
2064 // Any stores to this stack slot are not dead anymore.
2066 MaybeDeadStores[SSorRMId] = NULL;
2067 Spills.addAvailable(SSorRMId, PhysReg);
2068 // Assumes this is the last use. IsKill will be unset if reg is reused
2069 // unless it's a two-address operand.
2070 if (!MI.isRegTiedToDefOperand(i) &&
2071 KilledMIRegs.count(VirtReg) == 0) {
2072 MI.getOperand(i).setIsKill();
2073 KilledMIRegs.insert(VirtReg);
2076 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
2077 DEBUG(errs() << '\t' << *prior(InsertLoc));
2079 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2080 MI.getOperand(i).setReg(RReg);
2081 MI.getOperand(i).setSubReg(0);
2084 // Ok - now we can remove stores that have been confirmed dead.
2085 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
2086 // This was the last use and the spilled value is still available
2087 // for reuse. That means the spill was unnecessary!
2088 int PDSSlot = PotentialDeadStoreSlots[j];
2089 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
2091 DEBUG(errs() << "Removed dead store:\t" << *DeadStore);
2092 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2093 VRM.RemoveMachineInstrFromMaps(DeadStore);
2094 MBB.erase(DeadStore);
2095 MaybeDeadStores[PDSSlot] = NULL;
2101 DEBUG(errs() << '\t' << MI);
2104 // If we have folded references to memory operands, make sure we clear all
2105 // physical registers that may contain the value of the spilled virtual
2107 SmallSet<int, 2> FoldedSS;
2108 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
2109 unsigned VirtReg = I->second.first;
2110 VirtRegMap::ModRef MR = I->second.second;
2111 DEBUG(errs() << "Folded vreg: " << VirtReg << " MR: " << MR);
2113 // MI2VirtMap be can updated which invalidate the iterator.
2114 // Increment the iterator first.
2116 int SS = VRM.getStackSlot(VirtReg);
2117 if (SS == VirtRegMap::NO_STACK_SLOT)
2119 FoldedSS.insert(SS);
2120 DEBUG(errs() << " - StackSlot: " << SS << "\n");
2122 // If this folded instruction is just a use, check to see if it's a
2123 // straight load from the virt reg slot.
2124 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
2126 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
2127 if (DestReg && FrameIdx == SS) {
2128 // If this spill slot is available, turn it into a copy (or nothing)
2129 // instead of leaving it as a load!
2130 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
2131 DEBUG(errs() << "Promoted Load To Copy: " << MI);
2132 if (DestReg != InReg) {
2133 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
2134 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
2135 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
2136 unsigned SubIdx = DefMO->getSubReg();
2137 // Revisit the copy so we make sure to notice the effects of the
2138 // operation on the destreg (either needing to RA it if it's
2139 // virtual or needing to clobber any values if it's physical).
2141 --NextMII; // backtrack to the copy.
2142 NextMII->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
2143 // Propagate the sub-register index over.
2145 DefMO = NextMII->findRegisterDefOperand(DestReg);
2146 DefMO->setSubReg(SubIdx);
2150 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
2151 KillOpnd->setIsKill();
2155 DEBUG(errs() << "Removing now-noop copy: " << MI);
2156 // Unset last kill since it's being reused.
2157 InvalidateKill(InReg, TRI, RegKills, KillOps);
2158 Spills.disallowClobberPhysReg(InReg);
2161 InvalidateKills(MI, TRI, RegKills, KillOps);
2162 VRM.RemoveMachineInstrFromMaps(&MI);
2165 goto ProcessNextInst;
2168 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2169 SmallVector<MachineInstr*, 4> NewMIs;
2171 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
2172 MBB.insert(MII, NewMIs[0]);
2173 InvalidateKills(MI, TRI, RegKills, KillOps);
2174 VRM.RemoveMachineInstrFromMaps(&MI);
2177 --NextMII; // backtrack to the unfolded instruction.
2179 goto ProcessNextInst;
2184 // If this reference is not a use, any previous store is now dead.
2185 // Otherwise, the store to this stack slot is not dead anymore.
2186 MachineInstr* DeadStore = MaybeDeadStores[SS];
2188 bool isDead = !(MR & VirtRegMap::isRef);
2189 MachineInstr *NewStore = NULL;
2190 if (MR & VirtRegMap::isModRef) {
2191 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2192 SmallVector<MachineInstr*, 4> NewMIs;
2193 // We can reuse this physreg as long as we are allowed to clobber
2194 // the value and there isn't an earlier def that has already clobbered
2197 !ReusedOperands.isClobbered(PhysReg) &&
2198 Spills.canClobberPhysReg(PhysReg) &&
2199 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
2200 MachineOperand *KillOpnd =
2201 DeadStore->findRegisterUseOperand(PhysReg, true);
2202 // Note, if the store is storing a sub-register, it's possible the
2203 // super-register is needed below.
2204 if (KillOpnd && !KillOpnd->getSubReg() &&
2205 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
2206 MBB.insert(MII, NewMIs[0]);
2207 NewStore = NewMIs[1];
2208 MBB.insert(MII, NewStore);
2209 VRM.addSpillSlotUse(SS, NewStore);
2210 InvalidateKills(MI, TRI, RegKills, KillOps);
2211 VRM.RemoveMachineInstrFromMaps(&MI);
2215 --NextMII; // backtrack to the unfolded instruction.
2223 if (isDead) { // Previous store is dead.
2224 // If we get here, the store is dead, nuke it now.
2225 DEBUG(errs() << "Removed dead store:\t" << *DeadStore);
2226 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2227 VRM.RemoveMachineInstrFromMaps(DeadStore);
2228 MBB.erase(DeadStore);
2233 MaybeDeadStores[SS] = NULL;
2235 // Treat this store as a spill merged into a copy. That makes the
2236 // stack slot value available.
2237 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
2238 goto ProcessNextInst;
2242 // If the spill slot value is available, and this is a new definition of
2243 // the value, the value is not available anymore.
2244 if (MR & VirtRegMap::isMod) {
2245 // Notice that the value in this stack slot has been modified.
2246 Spills.ModifyStackSlotOrReMat(SS);
2248 // If this is *just* a mod of the value, check to see if this is just a
2249 // store to the spill slot (i.e. the spill got merged into the copy). If
2250 // so, realize that the vreg is available now, and add the store to the
2251 // MaybeDeadStore info.
2253 if (!(MR & VirtRegMap::isRef)) {
2254 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2255 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2256 "Src hasn't been allocated yet?");
2258 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
2259 Spills, RegKills, KillOps, TRI, VRM)) {
2260 NextMII = next(MII);
2262 goto ProcessNextInst;
2265 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2266 // this as a potentially dead store in case there is a subsequent
2267 // store into the stack slot without a read from it.
2268 MaybeDeadStores[StackSlot] = &MI;
2270 // If the stack slot value was previously available in some other
2271 // register, change it now. Otherwise, make the register
2272 // available in PhysReg.
2273 Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2279 // Process all of the spilled defs.
2280 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2281 MachineOperand &MO = MI.getOperand(i);
2282 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2285 unsigned VirtReg = MO.getReg();
2286 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2287 // Check to see if this is a noop copy. If so, eliminate the
2288 // instruction before considering the dest reg to be changed.
2289 // Also check if it's copying from an "undef", if so, we can't
2290 // eliminate this or else the undef marker is lost and it will
2291 // confuses the scavenger. This is extremely rare.
2292 unsigned Src, Dst, SrcSR, DstSR;
2293 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst &&
2294 !MI.findRegisterUseOperand(Src)->isUndef()) {
2296 DEBUG(errs() << "Removing now-noop copy: " << MI);
2297 SmallVector<unsigned, 2> KillRegs;
2298 InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
2299 if (MO.isDead() && !KillRegs.empty()) {
2300 // Source register or an implicit super/sub-register use is killed.
2301 assert(KillRegs[0] == Dst ||
2302 TRI->isSubRegister(KillRegs[0], Dst) ||
2303 TRI->isSuperRegister(KillRegs[0], Dst));
2304 // Last def is now dead.
2305 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps, VRM);
2307 VRM.RemoveMachineInstrFromMaps(&MI);
2310 Spills.disallowClobberPhysReg(VirtReg);
2311 goto ProcessNextInst;
2314 // If it's not a no-op copy, it clobbers the value in the destreg.
2315 Spills.ClobberPhysReg(VirtReg);
2316 ReusedOperands.markClobbered(VirtReg);
2318 // Check to see if this instruction is a load from a stack slot into
2319 // a register. If so, this provides the stack slot value in the reg.
2321 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2322 assert(DestReg == VirtReg && "Unknown load situation!");
2324 // If it is a folded reference, then it's not safe to clobber.
2325 bool Folded = FoldedSS.count(FrameIdx);
2326 // Otherwise, if it wasn't available, remember that it is now!
2327 Spills.addAvailable(FrameIdx, DestReg, !Folded);
2328 goto ProcessNextInst;
2334 unsigned SubIdx = MO.getSubReg();
2335 bool DoReMat = VRM.isReMaterialized(VirtReg);
2337 ReMatDefs.insert(&MI);
2339 // The only vregs left are stack slot definitions.
2340 int StackSlot = VRM.getStackSlot(VirtReg);
2341 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
2343 // If this def is part of a two-address operand, make sure to execute
2344 // the store from the correct physical register.
2347 if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2348 PhysReg = MI.getOperand(TiedOp).getReg();
2350 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2351 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2352 "Can't find corresponding super-register!");
2356 PhysReg = VRM.getPhys(VirtReg);
2357 if (ReusedOperands.isClobbered(PhysReg)) {
2358 // Another def has taken the assigned physreg. It must have been a
2359 // use&def which got it due to reuse. Undo the reuse!
2360 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2361 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
2365 assert(PhysReg && "VR not assigned a physical register?");
2366 RegInfo->setPhysRegUsed(PhysReg);
2367 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2368 ReusedOperands.markClobbered(RReg);
2369 MI.getOperand(i).setReg(RReg);
2370 MI.getOperand(i).setSubReg(0);
2373 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2374 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
2375 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
2376 NextMII = next(MII);
2378 // Check to see if this is a noop copy. If so, eliminate the
2379 // instruction before considering the dest reg to be changed.
2381 unsigned Src, Dst, SrcSR, DstSR;
2382 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
2384 DEBUG(errs() << "Removing now-noop copy: " << MI);
2385 InvalidateKills(MI, TRI, RegKills, KillOps);
2386 VRM.RemoveMachineInstrFromMaps(&MI);
2389 UpdateKills(*LastStore, TRI, RegKills, KillOps);
2390 goto ProcessNextInst;
2396 // Delete dead instructions without side effects.
2397 if (!Erased && !BackTracked && isSafeToDelete(MI)) {
2398 InvalidateKills(MI, TRI, RegKills, KillOps);
2399 VRM.RemoveMachineInstrFromMaps(&MI);
2404 DistanceMap.insert(std::make_pair(&MI, Dist++));
2405 if (!Erased && !BackTracked) {
2406 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2407 UpdateKills(*II, TRI, RegKills, KillOps);
2418 llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2419 switch (RewriterOpt) {
2420 default: llvm_unreachable("Unreachable!");
2422 return new LocalRewriter();
2424 return new TrivialRewriter();