1 //===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs loop invariant code motion on machine instructions. We
11 // attempt to remove as much code from the body of a loop as possible.
13 // This pass does not attempt to throttle itself to limit register pressure.
14 // The register allocation phases are expected to perform rematerialization
15 // to recover when register pressure is high.
17 // This pass is not intended to be a replacement or a complete alternative
18 // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
19 // constructs that are not exposed before lowering and instruction selection.
21 //===----------------------------------------------------------------------===//
23 #define DEBUG_TYPE "machine-licm"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/MC/MCInstrItineraries.h"
32 #include "llvm/Target/TargetLowering.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
46 AvoidSpeculation("avoid-speculation",
47 cl::desc("MachineLICM should avoid speculation"),
48 cl::init(true), cl::Hidden);
51 "Number of machine instructions hoisted out of loops");
53 "Number of instructions hoisted in low reg pressure situation");
54 STATISTIC(NumHighLatency,
55 "Number of high latency instructions hoisted");
57 "Number of hoisted machine instructions CSEed");
58 STATISTIC(NumPostRAHoisted,
59 "Number of machine instructions hoisted out of loops post regalloc");
62 class MachineLICM : public MachineFunctionPass {
63 const TargetMachine *TM;
64 const TargetInstrInfo *TII;
65 const TargetLowering *TLI;
66 const TargetRegisterInfo *TRI;
67 const MachineFrameInfo *MFI;
68 MachineRegisterInfo *MRI;
69 const InstrItineraryData *InstrItins;
72 // Various analyses that we use...
73 AliasAnalysis *AA; // Alias analysis info.
74 MachineLoopInfo *MLI; // Current MachineLoopInfo
75 MachineDominatorTree *DT; // Machine dominator tree for the cur loop
77 // State that is updated as we process loops
78 bool Changed; // True if a loop is changed.
79 bool FirstInLoop; // True if it's the first LICM in the loop.
80 MachineLoop *CurLoop; // The current loop we are working on.
81 MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
83 // Track 'estimated' register pressure.
84 SmallSet<unsigned, 32> RegSeen;
85 SmallVector<unsigned, 8> RegPressure;
87 // Register pressure "limit" per register class. If the pressure
88 // is higher than the limit, then it's considered high.
89 SmallVector<unsigned, 8> RegLimit;
91 // Register pressure on path leading from loop preheader to current BB.
92 SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
94 // For each opcode, keep a list of potential CSE instructions.
95 DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
103 // If a MBB does not dominate loop exiting blocks then it may not safe
104 // to hoist loads from this block.
105 // Tri-state: 0 - false, 1 - true, 2 - unknown
106 unsigned SpeculationState;
109 static char ID; // Pass identification, replacement for typeid
111 MachineFunctionPass(ID), PreRegAlloc(true) {
112 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
115 explicit MachineLICM(bool PreRA) :
116 MachineFunctionPass(ID), PreRegAlloc(PreRA) {
117 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
120 virtual bool runOnMachineFunction(MachineFunction &MF);
122 const char *getPassName() const { return "Machine Instruction LICM"; }
124 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
125 AU.addRequired<MachineLoopInfo>();
126 AU.addRequired<MachineDominatorTree>();
127 AU.addRequired<AliasAnalysis>();
128 AU.addPreserved<MachineLoopInfo>();
129 AU.addPreserved<MachineDominatorTree>();
130 MachineFunctionPass::getAnalysisUsage(AU);
133 virtual void releaseMemory() {
138 for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
139 CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
145 /// CandidateInfo - Keep track of information about hoisting candidates.
146 struct CandidateInfo {
150 CandidateInfo(MachineInstr *mi, unsigned def, int fi)
151 : MI(mi), Def(def), FI(fi) {}
154 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
155 /// invariants out to the preheader.
156 void HoistRegionPostRA();
158 /// HoistPostRA - When an instruction is found to only use loop invariant
159 /// operands that is safe to hoist, this instruction is called to do the
161 void HoistPostRA(MachineInstr *MI, unsigned Def);
163 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
164 /// gather register def and frame object update information.
165 void ProcessMI(MachineInstr *MI,
166 BitVector &PhysRegDefs,
167 BitVector &PhysRegClobbers,
168 SmallSet<int, 32> &StoredFIs,
169 SmallVector<CandidateInfo, 32> &Candidates);
171 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
173 void AddToLiveIns(unsigned Reg);
175 /// IsLICMCandidate - Returns true if the instruction may be a suitable
176 /// candidate for LICM. e.g. If the instruction is a call, then it's
177 /// obviously not safe to hoist it.
178 bool IsLICMCandidate(MachineInstr &I);
180 /// IsLoopInvariantInst - Returns true if the instruction is loop
181 /// invariant. I.e., all virtual register operands are defined outside of
182 /// the loop, physical registers aren't accessed (explicitly or implicitly),
183 /// and the instruction is hoistable.
185 bool IsLoopInvariantInst(MachineInstr &I);
187 /// HasAnyPHIUse - Return true if the specified register is used by any
189 bool HasAnyPHIUse(unsigned Reg) const;
191 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
192 /// and an use in the current loop, return true if the target considered
194 bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
197 bool IsCheapInstruction(MachineInstr &MI) const;
199 /// CanCauseHighRegPressure - Visit BBs from header to current BB,
200 /// check if hoisting an instruction of the given cost matrix can cause high
201 /// register pressure.
202 bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
204 /// UpdateBackTraceRegPressure - Traverse the back trace from header to
205 /// the current block and update their register pressures to reflect the
206 /// effect of hoisting MI from the current block to the preheader.
207 void UpdateBackTraceRegPressure(const MachineInstr *MI);
209 /// IsProfitableToHoist - Return true if it is potentially profitable to
210 /// hoist the given loop invariant.
211 bool IsProfitableToHoist(MachineInstr &MI);
213 /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
214 /// If not then a load from this mbb may not be safe to hoist.
215 bool IsGuaranteedToExecute(MachineBasicBlock *BB);
217 void EnterScope(MachineBasicBlock *MBB);
219 void ExitScope(MachineBasicBlock *MBB);
221 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
222 /// dominator tree node if its a leaf or all of its children are done. Walk
223 /// up the dominator tree to destroy ancestors which are now done.
224 void ExitScopeIfDone(MachineDomTreeNode *Node,
225 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
226 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
228 /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
229 /// blocks dominated by the specified header block, and that are in the
230 /// current loop) in depth first order w.r.t the DominatorTree. This allows
231 /// us to visit definitions before uses, allowing us to hoist a loop body in
232 /// one pass without iteration.
234 void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
235 void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
237 /// getRegisterClassIDAndCost - For a given MI, register, and the operand
238 /// index, return the ID and cost of its representative register class by
240 void getRegisterClassIDAndCost(const MachineInstr *MI,
241 unsigned Reg, unsigned OpIdx,
242 unsigned &RCId, unsigned &RCCost) const;
244 /// InitRegPressure - Find all virtual register references that are liveout
245 /// of the preheader to initialize the starting "register pressure". Note
246 /// this does not count live through (livein but not used) registers.
247 void InitRegPressure(MachineBasicBlock *BB);
249 /// UpdateRegPressure - Update estimate of register pressure after the
250 /// specified instruction.
251 void UpdateRegPressure(const MachineInstr *MI);
253 /// ExtractHoistableLoad - Unfold a load from the given machineinstr if
254 /// the load itself could be hoisted. Return the unfolded and hoistable
255 /// load, or null if the load couldn't be unfolded or if it wouldn't
257 MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
259 /// LookForDuplicate - Find an instruction amount PrevMIs that is a
260 /// duplicate of MI. Return this instruction if it's found.
261 const MachineInstr *LookForDuplicate(const MachineInstr *MI,
262 std::vector<const MachineInstr*> &PrevMIs);
264 /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
265 /// the preheader that compute the same value. If it's found, do a RAU on
266 /// with the definition of the existing instruction rather than hoisting
267 /// the instruction to the preheader.
268 bool EliminateCSE(MachineInstr *MI,
269 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
271 /// MayCSE - Return true if the given instruction will be CSE'd if it's
272 /// hoisted out of the loop.
273 bool MayCSE(MachineInstr *MI);
275 /// Hoist - When an instruction is found to only use loop invariant operands
276 /// that is safe to hoist, this instruction is called to do the dirty work.
277 /// It returns true if the instruction is hoisted.
278 bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
280 /// InitCSEMap - Initialize the CSE map with instructions that are in the
281 /// current loop preheader that may become duplicates of instructions that
282 /// are hoisted out of the loop.
283 void InitCSEMap(MachineBasicBlock *BB);
285 /// getCurPreheader - Get the preheader for the current loop, splitting
286 /// a critical edge if needed.
287 MachineBasicBlock *getCurPreheader();
289 } // end anonymous namespace
291 char MachineLICM::ID = 0;
292 INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
293 "Machine Loop Invariant Code Motion", false, false)
294 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
295 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
296 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
297 INITIALIZE_PASS_END(MachineLICM, "machinelicm",
298 "Machine Loop Invariant Code Motion", false, false)
300 FunctionPass *llvm::createMachineLICMPass() {
301 return new MachineLICM();
304 /// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
305 /// loop that has a unique predecessor.
306 static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
307 // Check whether this loop even has a unique predecessor.
308 if (!CurLoop->getLoopPredecessor())
310 // Ok, now check to see if any of its outer loops do.
311 for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
312 if (L->getLoopPredecessor())
314 // None of them did, so this is the outermost with a unique predecessor.
318 bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
320 DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
322 DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
323 DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
325 Changed = FirstInLoop = false;
326 TM = &MF.getTarget();
327 TII = TM->getInstrInfo();
328 TLI = TM->getTargetLowering();
329 TRI = TM->getRegisterInfo();
330 MFI = MF.getFrameInfo();
331 MRI = &MF.getRegInfo();
332 InstrItins = TM->getInstrItineraryData();
334 PreRegAlloc = MRI->isSSA();
337 // Estimate register pressure during pre-regalloc pass.
338 unsigned NumRC = TRI->getNumRegClasses();
339 RegPressure.resize(NumRC);
340 std::fill(RegPressure.begin(), RegPressure.end(), 0);
341 RegLimit.resize(NumRC);
342 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
343 E = TRI->regclass_end(); I != E; ++I)
344 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF);
347 // Get our Loop information...
348 MLI = &getAnalysis<MachineLoopInfo>();
349 DT = &getAnalysis<MachineDominatorTree>();
350 AA = &getAnalysis<AliasAnalysis>();
352 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
353 while (!Worklist.empty()) {
354 CurLoop = Worklist.pop_back_val();
357 // If this is done before regalloc, only visit outer-most preheader-sporting
359 if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
360 Worklist.append(CurLoop->begin(), CurLoop->end());
367 // CSEMap is initialized for loop header when the first instruction is
369 MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
379 /// InstructionStoresToFI - Return true if instruction stores to the
381 static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
382 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
383 oe = MI->memoperands_end(); o != oe; ++o) {
384 if (!(*o)->isStore() || !(*o)->getValue())
386 if (const FixedStackPseudoSourceValue *Value =
387 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
388 if (Value->getFrameIndex() == FI)
395 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
396 /// gather register def and frame object update information.
397 void MachineLICM::ProcessMI(MachineInstr *MI,
398 BitVector &PhysRegDefs,
399 BitVector &PhysRegClobbers,
400 SmallSet<int, 32> &StoredFIs,
401 SmallVector<CandidateInfo, 32> &Candidates) {
402 bool RuledOut = false;
403 bool HasNonInvariantUse = false;
405 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
406 const MachineOperand &MO = MI->getOperand(i);
408 // Remember if the instruction stores to the frame index.
409 int FI = MO.getIndex();
410 if (!StoredFIs.count(FI) &&
411 MFI->isSpillSlotObjectIndex(FI) &&
412 InstructionStoresToFI(MI, FI))
413 StoredFIs.insert(FI);
414 HasNonInvariantUse = true;
418 // We can't hoist an instruction defining a physreg that is clobbered in
420 if (MO.isRegMask()) {
421 PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
427 unsigned Reg = MO.getReg();
430 assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
431 "Not expecting virtual register!");
434 if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
435 // If it's using a non-loop-invariant register, then it's obviously not
437 HasNonInvariantUse = true;
441 if (MO.isImplicit()) {
442 for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
443 PhysRegClobbers.set(*AS);
445 // Non-dead implicit def? This cannot be hoisted.
447 // No need to check if a dead implicit def is also defined by
448 // another instruction.
452 // FIXME: For now, avoid instructions with multiple defs, unless
453 // it's a dead implicit def.
459 // If we have already seen another instruction that defines the same
460 // register, then this is not safe. Two defs is indicated by setting a
461 // PhysRegClobbers bit.
462 for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS) {
463 if (PhysRegDefs.test(*AS))
464 PhysRegClobbers.set(*AS);
465 if (PhysRegClobbers.test(*AS))
466 // MI defined register is seen defined by another instruction in
467 // the loop, it cannot be a LICM candidate.
469 PhysRegDefs.set(*AS);
473 // Only consider reloads for now and remats which do not have register
474 // operands. FIXME: Consider unfold load folding instructions.
475 if (Def && !RuledOut) {
477 if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
478 (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
479 Candidates.push_back(CandidateInfo(MI, Def, FI));
483 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
484 /// invariants out to the preheader.
485 void MachineLICM::HoistRegionPostRA() {
486 unsigned NumRegs = TRI->getNumRegs();
487 BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
488 BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
490 SmallVector<CandidateInfo, 32> Candidates;
491 SmallSet<int, 32> StoredFIs;
493 // Walk the entire region, count number of defs for each register, and
494 // collect potential LICM candidates.
495 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
496 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
497 MachineBasicBlock *BB = Blocks[i];
499 // If the header of the loop containing this basic block is a landing pad,
500 // then don't try to hoist instructions out of this loop.
501 const MachineLoop *ML = MLI->getLoopFor(BB);
502 if (ML && ML->getHeader()->isLandingPad()) continue;
504 // Conservatively treat live-in's as an external def.
505 // FIXME: That means a reload that're reused in successor block(s) will not
507 for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
508 E = BB->livein_end(); I != E; ++I) {
510 for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
511 PhysRegDefs.set(*AS);
514 SpeculationState = SpeculateUnknown;
515 for (MachineBasicBlock::iterator
516 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
517 MachineInstr *MI = &*MII;
518 ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
522 // Now evaluate whether the potential candidates qualify.
523 // 1. Check if the candidate defined register is defined by another
524 // instruction in the loop.
525 // 2. If the candidate is a load from stack slot (always true for now),
526 // check if the slot is stored anywhere in the loop.
527 for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
528 if (Candidates[i].FI != INT_MIN &&
529 StoredFIs.count(Candidates[i].FI))
532 if (!PhysRegClobbers.test(Candidates[i].Def)) {
534 MachineInstr *MI = Candidates[i].MI;
535 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
536 const MachineOperand &MO = MI->getOperand(j);
537 if (!MO.isReg() || MO.isDef() || !MO.getReg())
539 if (PhysRegDefs.test(MO.getReg()) ||
540 PhysRegClobbers.test(MO.getReg())) {
541 // If it's using a non-loop-invariant register, then it's obviously
542 // not safe to hoist.
548 HoistPostRA(MI, Candidates[i].Def);
553 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
554 /// loop, and make sure it is not killed by any instructions in the loop.
555 void MachineLICM::AddToLiveIns(unsigned Reg) {
556 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
557 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
558 MachineBasicBlock *BB = Blocks[i];
559 if (!BB->isLiveIn(Reg))
561 for (MachineBasicBlock::iterator
562 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
563 MachineInstr *MI = &*MII;
564 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
565 MachineOperand &MO = MI->getOperand(i);
566 if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
567 if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
574 /// HoistPostRA - When an instruction is found to only use loop invariant
575 /// operands that is safe to hoist, this instruction is called to do the
577 void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
578 MachineBasicBlock *Preheader = getCurPreheader();
579 if (!Preheader) return;
581 // Now move the instructions to the predecessor, inserting it before any
582 // terminator instructions.
583 DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
584 << MI->getParent()->getNumber() << ": " << *MI);
586 // Splice the instruction to the preheader.
587 MachineBasicBlock *MBB = MI->getParent();
588 Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
590 // Add register to livein list to all the BBs in the current loop since a
591 // loop invariant must be kept live throughout the whole loop. This is
592 // important to ensure later passes do not scavenge the def register.
599 // IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
600 // If not then a load from this mbb may not be safe to hoist.
601 bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
602 if (SpeculationState != SpeculateUnknown)
603 return SpeculationState == SpeculateFalse;
605 if (BB != CurLoop->getHeader()) {
606 // Check loop exiting blocks.
607 SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
608 CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
609 for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i)
610 if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) {
611 SpeculationState = SpeculateTrue;
616 SpeculationState = SpeculateFalse;
620 void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
621 DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
623 // Remember livein register pressure.
624 BackTrace.push_back(RegPressure);
627 void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
628 DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
629 BackTrace.pop_back();
632 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
633 /// dominator tree node if its a leaf or all of its children are done. Walk
634 /// up the dominator tree to destroy ancestors which are now done.
635 void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
636 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
637 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
638 if (OpenChildren[Node])
642 ExitScope(Node->getBlock());
644 // Now traverse upwards to pop ancestors whose offsprings are all done.
645 while (MachineDomTreeNode *Parent = ParentMap[Node]) {
646 unsigned Left = --OpenChildren[Parent];
649 ExitScope(Parent->getBlock());
654 /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
655 /// blocks dominated by the specified header block, and that are in the
656 /// current loop) in depth first order w.r.t the DominatorTree. This allows
657 /// us to visit definitions before uses, allowing us to hoist a loop body in
658 /// one pass without iteration.
660 void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
661 SmallVector<MachineDomTreeNode*, 32> Scopes;
662 SmallVector<MachineDomTreeNode*, 8> WorkList;
663 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
664 DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
666 // Perform a DFS walk to determine the order of visit.
667 WorkList.push_back(HeaderN);
669 MachineDomTreeNode *Node = WorkList.pop_back_val();
670 assert(Node != 0 && "Null dominator tree node?");
671 MachineBasicBlock *BB = Node->getBlock();
673 // If the header of the loop containing this basic block is a landing pad,
674 // then don't try to hoist instructions out of this loop.
675 const MachineLoop *ML = MLI->getLoopFor(BB);
676 if (ML && ML->getHeader()->isLandingPad())
679 // If this subregion is not in the top level loop at all, exit.
680 if (!CurLoop->contains(BB))
683 Scopes.push_back(Node);
684 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
685 unsigned NumChildren = Children.size();
687 // Don't hoist things out of a large switch statement. This often causes
688 // code to be hoisted that wasn't going to be executed, and increases
689 // register pressure in a situation where it's likely to matter.
690 if (BB->succ_size() >= 25)
693 OpenChildren[Node] = NumChildren;
694 // Add children in reverse order as then the next popped worklist node is
695 // the first child of this node. This means we ultimately traverse the
696 // DOM tree in exactly the same order as if we'd recursed.
697 for (int i = (int)NumChildren-1; i >= 0; --i) {
698 MachineDomTreeNode *Child = Children[i];
699 ParentMap[Child] = Node;
700 WorkList.push_back(Child);
702 } while (!WorkList.empty());
704 if (Scopes.size() != 0) {
705 MachineBasicBlock *Preheader = getCurPreheader();
709 // Compute registers which are livein into the loop headers.
712 InitRegPressure(Preheader);
716 for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
717 MachineDomTreeNode *Node = Scopes[i];
718 MachineBasicBlock *MBB = Node->getBlock();
720 MachineBasicBlock *Preheader = getCurPreheader();
727 SpeculationState = SpeculateUnknown;
728 for (MachineBasicBlock::iterator
729 MII = MBB->begin(), E = MBB->end(); MII != E; ) {
730 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
731 MachineInstr *MI = &*MII;
732 if (!Hoist(MI, Preheader))
733 UpdateRegPressure(MI);
737 // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
738 ExitScopeIfDone(Node, OpenChildren, ParentMap);
742 static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
743 return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
746 /// getRegisterClassIDAndCost - For a given MI, register, and the operand
747 /// index, return the ID and cost of its representative register class.
749 MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
750 unsigned Reg, unsigned OpIdx,
751 unsigned &RCId, unsigned &RCCost) const {
752 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
753 EVT VT = *RC->vt_begin();
754 if (VT == MVT::Untyped) {
758 RCId = TLI->getRepRegClassFor(VT)->getID();
759 RCCost = TLI->getRepRegClassCostFor(VT);
763 /// InitRegPressure - Find all virtual register references that are liveout of
764 /// the preheader to initialize the starting "register pressure". Note this
765 /// does not count live through (livein but not used) registers.
766 void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
767 std::fill(RegPressure.begin(), RegPressure.end(), 0);
769 // If the preheader has only a single predecessor and it ends with a
770 // fallthrough or an unconditional branch, then scan its predecessor for live
771 // defs as well. This happens whenever the preheader is created by splitting
772 // the critical edge from the loop predecessor to the loop header.
773 if (BB->pred_size() == 1) {
774 MachineBasicBlock *TBB = 0, *FBB = 0;
775 SmallVector<MachineOperand, 4> Cond;
776 if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
777 InitRegPressure(*BB->pred_begin());
780 for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
782 MachineInstr *MI = &*MII;
783 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
784 const MachineOperand &MO = MI->getOperand(i);
785 if (!MO.isReg() || MO.isImplicit())
787 unsigned Reg = MO.getReg();
788 if (!TargetRegisterInfo::isVirtualRegister(Reg))
791 bool isNew = RegSeen.insert(Reg);
792 unsigned RCId, RCCost;
793 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
795 RegPressure[RCId] += RCCost;
797 bool isKill = isOperandKill(MO, MRI);
798 if (isNew && !isKill)
799 // Haven't seen this, it must be a livein.
800 RegPressure[RCId] += RCCost;
801 else if (!isNew && isKill)
802 RegPressure[RCId] -= RCCost;
808 /// UpdateRegPressure - Update estimate of register pressure after the
809 /// specified instruction.
810 void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
811 if (MI->isImplicitDef())
814 SmallVector<unsigned, 4> Defs;
815 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
816 const MachineOperand &MO = MI->getOperand(i);
817 if (!MO.isReg() || MO.isImplicit())
819 unsigned Reg = MO.getReg();
820 if (!TargetRegisterInfo::isVirtualRegister(Reg))
823 bool isNew = RegSeen.insert(Reg);
826 else if (!isNew && isOperandKill(MO, MRI)) {
827 unsigned RCId, RCCost;
828 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
829 if (RCCost > RegPressure[RCId])
830 RegPressure[RCId] = 0;
832 RegPressure[RCId] -= RCCost;
837 while (!Defs.empty()) {
838 unsigned Reg = Defs.pop_back_val();
839 unsigned RCId, RCCost;
840 getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost);
841 RegPressure[RCId] += RCCost;
846 /// isLoadFromGOTOrConstantPool - Return true if this machine instruction
847 /// loads from global offset table or constant pool.
848 static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
849 assert (MI.mayLoad() && "Expected MI that loads!");
850 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
851 E = MI.memoperands_end(); I != E; ++I) {
852 if (const Value *V = (*I)->getValue()) {
853 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
854 if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
861 /// IsLICMCandidate - Returns true if the instruction may be a suitable
862 /// candidate for LICM. e.g. If the instruction is a call, then it's obviously
863 /// not safe to hoist it.
864 bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
865 // Check if it's safe to move the instruction.
866 bool DontMoveAcrossStore = true;
867 if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
870 // If it is load then check if it is guaranteed to execute by making sure that
871 // it dominates all exiting blocks. If it doesn't, then there is a path out of
872 // the loop which does not execute this load, so we can't hoist it. Loads
873 // from constant memory are not safe to speculate all the time, for example
874 // indexed load from a jump table.
875 // Stores and side effects are already checked by isSafeToMove.
876 if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
877 !IsGuaranteedToExecute(I.getParent()))
883 /// IsLoopInvariantInst - Returns true if the instruction is loop
884 /// invariant. I.e., all virtual register operands are defined outside of the
885 /// loop, physical registers aren't accessed explicitly, and there are no side
886 /// effects that aren't captured by the operands or other flags.
888 bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
889 if (!IsLICMCandidate(I))
892 // The instruction is loop invariant if all of its operands are.
893 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
894 const MachineOperand &MO = I.getOperand(i);
899 unsigned Reg = MO.getReg();
900 if (Reg == 0) continue;
902 // Don't hoist an instruction that uses or defines a physical register.
903 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
905 // If the physreg has no defs anywhere, it's just an ambient register
906 // and we can freely move its uses. Alternatively, if it's allocatable,
907 // it could get allocated to something with a def during allocation.
908 if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
910 // Otherwise it's safe to move.
912 } else if (!MO.isDead()) {
913 // A def that isn't dead. We can't move it.
915 } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
916 // If the reg is live into the loop, we can't hoist an instruction
917 // which would clobber it.
925 assert(MRI->getVRegDef(Reg) &&
926 "Machine instr not mapped for this vreg?!");
928 // If the loop contains the definition of an operand, then the instruction
929 // isn't loop invariant.
930 if (CurLoop->contains(MRI->getVRegDef(Reg)))
934 // If we got this far, the instruction is loop invariant!
939 /// HasAnyPHIUse - Return true if the specified register is used by any
941 bool MachineLICM::HasAnyPHIUse(unsigned Reg) const {
942 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
943 UE = MRI->use_end(); UI != UE; ++UI) {
944 MachineInstr *UseMI = &*UI;
947 // Look pass copies as well.
948 if (UseMI->isCopy()) {
949 unsigned Def = UseMI->getOperand(0).getReg();
950 if (TargetRegisterInfo::isVirtualRegister(Def) &&
958 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
959 /// and an use in the current loop, return true if the target considered
961 bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
962 unsigned DefIdx, unsigned Reg) const {
963 if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
966 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
967 E = MRI->use_nodbg_end(); I != E; ++I) {
968 MachineInstr *UseMI = &*I;
969 if (UseMI->isCopyLike())
971 if (!CurLoop->contains(UseMI->getParent()))
973 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
974 const MachineOperand &MO = UseMI->getOperand(i);
975 if (!MO.isReg() || !MO.isUse())
977 unsigned MOReg = MO.getReg();
981 if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
985 // Only look at the first in loop use.
992 /// IsCheapInstruction - Return true if the instruction is marked "cheap" or
993 /// the operand latency between its def and a use is one or less.
994 bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
995 if (MI.isAsCheapAsAMove() || MI.isCopyLike())
997 if (!InstrItins || InstrItins->isEmpty())
1000 bool isCheap = false;
1001 unsigned NumDefs = MI.getDesc().getNumDefs();
1002 for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
1003 MachineOperand &DefMO = MI.getOperand(i);
1004 if (!DefMO.isReg() || !DefMO.isDef())
1007 unsigned Reg = DefMO.getReg();
1008 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1011 if (!TII->hasLowDefLatency(InstrItins, &MI, i))
1019 /// CanCauseHighRegPressure - Visit BBs from header to current BB, check
1020 /// if hoisting an instruction of the given cost matrix can cause high
1021 /// register pressure.
1022 bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
1023 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1025 if (CI->second <= 0)
1028 unsigned RCId = CI->first;
1029 unsigned Limit = RegLimit[RCId];
1030 int Cost = CI->second;
1031 for (unsigned i = BackTrace.size(); i != 0; --i) {
1032 SmallVector<unsigned, 8> &RP = BackTrace[i-1];
1033 if (RP[RCId] + Cost >= Limit)
1041 /// UpdateBackTraceRegPressure - Traverse the back trace from header to the
1042 /// current block and update their register pressures to reflect the effect
1043 /// of hoisting MI from the current block to the preheader.
1044 void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
1045 if (MI->isImplicitDef())
1048 // First compute the 'cost' of the instruction, i.e. its contribution
1049 // to register pressure.
1050 DenseMap<unsigned, int> Cost;
1051 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
1052 const MachineOperand &MO = MI->getOperand(i);
1053 if (!MO.isReg() || MO.isImplicit())
1055 unsigned Reg = MO.getReg();
1056 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1059 unsigned RCId, RCCost;
1060 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
1062 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1063 if (CI != Cost.end())
1064 CI->second += RCCost;
1066 Cost.insert(std::make_pair(RCId, RCCost));
1067 } else if (isOperandKill(MO, MRI)) {
1068 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1069 if (CI != Cost.end())
1070 CI->second -= RCCost;
1072 Cost.insert(std::make_pair(RCId, -RCCost));
1076 // Update register pressure of blocks from loop header to current block.
1077 for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
1078 SmallVector<unsigned, 8> &RP = BackTrace[i];
1079 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1081 unsigned RCId = CI->first;
1082 RP[RCId] += CI->second;
1087 /// IsProfitableToHoist - Return true if it is potentially profitable to hoist
1088 /// the given loop invariant.
1089 bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
1090 if (MI.isImplicitDef())
1093 // If the instruction is cheap, only hoist if it is re-materilizable. LICM
1094 // will increase register pressure. It's probably not worth it if the
1095 // instruction is cheap.
1096 // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
1097 // these tend to help performance in low register pressure situation. The
1098 // trade off is it may cause spill in high pressure situation. It will end up
1099 // adding a store in the loop preheader. But the reload is no more expensive.
1100 // The side benefit is these loads are frequently CSE'ed.
1101 if (IsCheapInstruction(MI)) {
1102 if (!TII->isTriviallyReMaterializable(&MI, AA))
1105 // Estimate register pressure to determine whether to LICM the instruction.
1106 // In low register pressure situation, we can be more aggressive about
1107 // hoisting. Also, favors hoisting long latency instructions even in
1108 // moderately high pressure situation.
1109 // FIXME: If there are long latency loop-invariant instructions inside the
1110 // loop at this point, why didn't the optimizer's LICM hoist them?
1111 DenseMap<unsigned, int> Cost;
1112 for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1113 const MachineOperand &MO = MI.getOperand(i);
1114 if (!MO.isReg() || MO.isImplicit())
1116 unsigned Reg = MO.getReg();
1117 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1120 unsigned RCId, RCCost;
1121 getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
1123 if (HasHighOperandLatency(MI, i, Reg)) {
1128 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1129 if (CI != Cost.end())
1130 CI->second += RCCost;
1132 Cost.insert(std::make_pair(RCId, RCCost));
1133 } else if (isOperandKill(MO, MRI)) {
1134 // Is a virtual register use is a kill, hoisting it out of the loop
1135 // may actually reduce register pressure or be register pressure
1137 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1138 if (CI != Cost.end())
1139 CI->second -= RCCost;
1141 Cost.insert(std::make_pair(RCId, -RCCost));
1145 // Visit BBs from header to current BB, if hoisting this doesn't cause
1146 // high register pressure, then it's safe to proceed.
1147 if (!CanCauseHighRegPressure(Cost)) {
1152 // Do not "speculate" in high register pressure situation. If an
1153 // instruction is not guaranteed to be executed in the loop, it's best to be
1155 if (AvoidSpeculation &&
1156 (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI)))
1159 // High register pressure situation, only hoist if the instruction is going to
1161 if (!TII->isTriviallyReMaterializable(&MI, AA) &&
1162 !MI.isInvariantLoad(AA))
1166 // If result(s) of this instruction is used by PHIs outside of the loop, then
1167 // don't hoist it if the instruction because it will introduce an extra copy.
1168 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1169 const MachineOperand &MO = MI.getOperand(i);
1170 if (!MO.isReg() || !MO.isDef())
1172 if (HasAnyPHIUse(MO.getReg()))
1179 MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
1180 // Don't unfold simple loads.
1181 if (MI->canFoldAsLoad())
1184 // If not, we may be able to unfold a load and hoist that.
1185 // First test whether the instruction is loading from an amenable
1187 if (!MI->isInvariantLoad(AA))
1190 // Next determine the register class for a temporary register.
1191 unsigned LoadRegIndex;
1193 TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1194 /*UnfoldLoad=*/true,
1195 /*UnfoldStore=*/false,
1197 if (NewOpc == 0) return 0;
1198 const MCInstrDesc &MID = TII->get(NewOpc);
1199 if (MID.getNumDefs() != 1) return 0;
1200 const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
1201 // Ok, we're unfolding. Create a temporary register and do the unfold.
1202 unsigned Reg = MRI->createVirtualRegister(RC);
1204 MachineFunction &MF = *MI->getParent()->getParent();
1205 SmallVector<MachineInstr *, 2> NewMIs;
1207 TII->unfoldMemoryOperand(MF, MI, Reg,
1208 /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
1212 "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1214 assert(NewMIs.size() == 2 &&
1215 "Unfolded a load into multiple instructions!");
1216 MachineBasicBlock *MBB = MI->getParent();
1217 MachineBasicBlock::iterator Pos = MI;
1218 MBB->insert(Pos, NewMIs[0]);
1219 MBB->insert(Pos, NewMIs[1]);
1220 // If unfolding produced a load that wasn't loop-invariant or profitable to
1221 // hoist, discard the new instructions and bail.
1222 if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1223 NewMIs[0]->eraseFromParent();
1224 NewMIs[1]->eraseFromParent();
1228 // Update register pressure for the unfolded instruction.
1229 UpdateRegPressure(NewMIs[1]);
1231 // Otherwise we successfully unfolded a load that we can hoist.
1232 MI->eraseFromParent();
1236 void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1237 for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
1238 const MachineInstr *MI = &*I;
1239 unsigned Opcode = MI->getOpcode();
1240 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1241 CI = CSEMap.find(Opcode);
1242 if (CI != CSEMap.end())
1243 CI->second.push_back(MI);
1245 std::vector<const MachineInstr*> CSEMIs;
1246 CSEMIs.push_back(MI);
1247 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1253 MachineLICM::LookForDuplicate(const MachineInstr *MI,
1254 std::vector<const MachineInstr*> &PrevMIs) {
1255 for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
1256 const MachineInstr *PrevMI = PrevMIs[i];
1257 if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
1263 bool MachineLICM::EliminateCSE(MachineInstr *MI,
1264 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1265 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1266 // the undef property onto uses.
1267 if (CI == CSEMap.end() || MI->isImplicitDef())
1270 if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1271 DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1273 // Replace virtual registers defined by MI by their counterparts defined
1275 SmallVector<unsigned, 2> Defs;
1276 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1277 const MachineOperand &MO = MI->getOperand(i);
1279 // Physical registers may not differ here.
1280 assert((!MO.isReg() || MO.getReg() == 0 ||
1281 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1282 MO.getReg() == Dup->getOperand(i).getReg()) &&
1283 "Instructions with different phys regs are not identical!");
1285 if (MO.isReg() && MO.isDef() &&
1286 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1290 SmallVector<const TargetRegisterClass*, 2> OrigRCs;
1291 for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1292 unsigned Idx = Defs[i];
1293 unsigned Reg = MI->getOperand(Idx).getReg();
1294 unsigned DupReg = Dup->getOperand(Idx).getReg();
1295 OrigRCs.push_back(MRI->getRegClass(DupReg));
1297 if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
1298 // Restore old RCs if more than one defs.
1299 for (unsigned j = 0; j != i; ++j)
1300 MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
1305 for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1306 unsigned Idx = Defs[i];
1307 unsigned Reg = MI->getOperand(Idx).getReg();
1308 unsigned DupReg = Dup->getOperand(Idx).getReg();
1309 MRI->replaceRegWith(Reg, DupReg);
1310 MRI->clearKillFlags(DupReg);
1313 MI->eraseFromParent();
1320 /// MayCSE - Return true if the given instruction will be CSE'd if it's
1321 /// hoisted out of the loop.
1322 bool MachineLICM::MayCSE(MachineInstr *MI) {
1323 unsigned Opcode = MI->getOpcode();
1324 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1325 CI = CSEMap.find(Opcode);
1326 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1327 // the undef property onto uses.
1328 if (CI == CSEMap.end() || MI->isImplicitDef())
1331 return LookForDuplicate(MI, CI->second) != 0;
1334 /// Hoist - When an instruction is found to use only loop invariant operands
1335 /// that are safe to hoist, this instruction is called to do the dirty work.
1337 bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1338 // First check whether we should hoist this instruction.
1339 if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1340 // If not, try unfolding a hoistable load.
1341 MI = ExtractHoistableLoad(MI);
1342 if (!MI) return false;
1345 // Now move the instructions to the predecessor, inserting it before any
1346 // terminator instructions.
1348 dbgs() << "Hoisting " << *MI;
1349 if (Preheader->getBasicBlock())
1350 dbgs() << " to MachineBasicBlock "
1351 << Preheader->getName();
1352 if (MI->getParent()->getBasicBlock())
1353 dbgs() << " from MachineBasicBlock "
1354 << MI->getParent()->getName();
1358 // If this is the first instruction being hoisted to the preheader,
1359 // initialize the CSE map with potential common expressions.
1361 InitCSEMap(Preheader);
1362 FirstInLoop = false;
1365 // Look for opportunity to CSE the hoisted instruction.
1366 unsigned Opcode = MI->getOpcode();
1367 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1368 CI = CSEMap.find(Opcode);
1369 if (!EliminateCSE(MI, CI)) {
1370 // Otherwise, splice the instruction to the preheader.
1371 Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1373 // Update register pressure for BBs from header to this block.
1374 UpdateBackTraceRegPressure(MI);
1376 // Clear the kill flags of any register this instruction defines,
1377 // since they may need to be live throughout the entire loop
1378 // rather than just live for part of it.
1379 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1380 MachineOperand &MO = MI->getOperand(i);
1381 if (MO.isReg() && MO.isDef() && !MO.isDead())
1382 MRI->clearKillFlags(MO.getReg());
1385 // Add to the CSE map.
1386 if (CI != CSEMap.end())
1387 CI->second.push_back(MI);
1389 std::vector<const MachineInstr*> CSEMIs;
1390 CSEMIs.push_back(MI);
1391 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1401 MachineBasicBlock *MachineLICM::getCurPreheader() {
1402 // Determine the block to which to hoist instructions. If we can't find a
1403 // suitable loop predecessor, we can't do any hoisting.
1405 // If we've tried to get a preheader and failed, don't try again.
1406 if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1409 if (!CurPreheader) {
1410 CurPreheader = CurLoop->getLoopPreheader();
1411 if (!CurPreheader) {
1412 MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1414 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1418 CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
1419 if (!CurPreheader) {
1420 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1425 return CurPreheader;