1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Perform peephole optimizations on the machine code:
12 // - Optimize Extensions
14 // Optimization of sign / zero extension instructions. It may be extended to
15 // handle other instructions with similar properties.
17 // On some targets, some instructions, e.g. X86 sign / zero extension, may
18 // leave the source value in the lower part of the result. This optimization
19 // will replace some uses of the pre-extension value with uses of the
20 // sub-register of the results.
22 // - Optimize Comparisons
24 // Optimization of comparison instructions. For instance, in this code:
30 // If the "sub" instruction all ready sets (or could be modified to set) the
31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can
32 // eliminate the "cmp" instruction.
34 // Another instance, in this code:
36 // sub r1, r3 | sub r1, imm
37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm
40 // If the branch instruction can use flag from "sub", then we can replace
41 // "sub" with "subs" and eliminate the "cmp" instruction.
43 // - Optimize Bitcast pairs:
52 //===----------------------------------------------------------------------===//
54 #define DEBUG_TYPE "peephole-opt"
55 #include "llvm/CodeGen/Passes.h"
56 #include "llvm/CodeGen/MachineDominators.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineRegisterInfo.h"
59 #include "llvm/Target/TargetInstrInfo.h"
60 #include "llvm/Target/TargetRegisterInfo.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/ADT/DenseMap.h"
63 #include "llvm/ADT/SmallPtrSet.h"
64 #include "llvm/ADT/SmallSet.h"
65 #include "llvm/ADT/Statistic.h"
68 // Optimize Extensions
70 Aggressive("aggressive-ext-opt", cl::Hidden,
71 cl::desc("Aggressive extension optimization"));
74 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
75 cl::desc("Disable the peephole optimizer"));
77 STATISTIC(NumReuse, "Number of extension results reused");
78 STATISTIC(NumBitcasts, "Number of bitcasts eliminated");
79 STATISTIC(NumCmps, "Number of compares eliminated");
80 STATISTIC(NumImmFold, "Number of move immediate folded");
81 STATISTIC(NumLoadFold, "Number of loads folded");
84 class PeepholeOptimizer : public MachineFunctionPass {
85 const TargetMachine *TM;
86 const TargetInstrInfo *TII;
87 MachineRegisterInfo *MRI;
88 MachineDominatorTree *DT; // Machine dominator tree
91 static char ID; // Pass identification
92 PeepholeOptimizer() : MachineFunctionPass(ID) {
93 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
96 virtual bool runOnMachineFunction(MachineFunction &MF);
98 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
100 MachineFunctionPass::getAnalysisUsage(AU);
102 AU.addRequired<MachineDominatorTree>();
103 AU.addPreserved<MachineDominatorTree>();
108 bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
109 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
110 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
111 SmallPtrSet<MachineInstr*, 8> &LocalMIs);
112 bool isMoveImmediate(MachineInstr *MI,
113 SmallSet<unsigned, 4> &ImmDefRegs,
114 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
115 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
116 SmallSet<unsigned, 4> &ImmDefRegs,
117 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
118 bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg);
122 char PeepholeOptimizer::ID = 0;
123 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
124 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
125 "Peephole Optimizations", false, false)
126 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
127 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
128 "Peephole Optimizations", false, false)
130 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
131 /// a single register and writes a single register and it does not modify the
132 /// source, and if the source value is preserved as a sub-register of the
133 /// result, then replace all reachable uses of the source with the subreg of the
136 /// Do not generate an EXTRACT that is used only in a debug use, as this changes
137 /// the code. Since this code does not currently share EXTRACTs, just ignore all
139 bool PeepholeOptimizer::
140 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
141 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
142 unsigned SrcReg, DstReg, SubIdx;
143 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
146 if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
147 TargetRegisterInfo::isPhysicalRegister(SrcReg))
150 if (MRI->hasOneNonDBGUse(SrcReg))
154 // Ensure DstReg can get a register class that actually supports
155 // sub-registers. Don't change the class until we commit.
156 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
157 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
161 // The ext instr may be operating on a sub-register of SrcReg as well.
162 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
164 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
165 // SrcReg:SubIdx should be replaced.
166 bool UseSrcSubIdx = TM->getRegisterInfo()->
167 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;
169 // The source has other uses. See if we can replace the other uses with use of
170 // the result of the extension.
171 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
172 for (MachineRegisterInfo::use_nodbg_iterator
173 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
175 ReachedBBs.insert(UI->getParent());
177 // Uses that are in the same BB of uses of the result of the instruction.
178 SmallVector<MachineOperand*, 8> Uses;
180 // Uses that the result of the instruction can reach.
181 SmallVector<MachineOperand*, 8> ExtendedUses;
183 bool ExtendLife = true;
184 for (MachineRegisterInfo::use_nodbg_iterator
185 UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
187 MachineOperand &UseMO = UI.getOperand();
188 MachineInstr *UseMI = &*UI;
192 if (UseMI->isPHI()) {
197 // Only accept uses of SrcReg:SubIdx.
198 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
201 // It's an error to translate this:
203 // %reg1025 = <sext> %reg1024
205 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
209 // %reg1025 = <sext> %reg1024
211 // %reg1027 = COPY %reg1025:4
212 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
214 // The problem here is that SUBREG_TO_REG is there to assert that an
215 // implicit zext occurs. It doesn't insert a zext instruction. If we allow
216 // the COPY here, it will give us the value after the <sext>, not the
217 // original value of %reg1024 before <sext>.
218 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
221 MachineBasicBlock *UseMBB = UseMI->getParent();
223 // Local uses that come after the extension.
224 if (!LocalMIs.count(UseMI))
225 Uses.push_back(&UseMO);
226 } else if (ReachedBBs.count(UseMBB)) {
227 // Non-local uses where the result of the extension is used. Always
228 // replace these unless it's a PHI.
229 Uses.push_back(&UseMO);
230 } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
231 // We may want to extend the live range of the extension result in order
232 // to replace these uses.
233 ExtendedUses.push_back(&UseMO);
235 // Both will be live out of the def MBB anyway. Don't extend live range of
236 // the extension result.
242 if (ExtendLife && !ExtendedUses.empty())
243 // Extend the liveness of the extension result.
244 std::copy(ExtendedUses.begin(), ExtendedUses.end(),
245 std::back_inserter(Uses));
247 // Now replace all uses.
248 bool Changed = false;
250 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
252 // Look for PHI uses of the extended result, we don't want to extend the
253 // liveness of a PHI input. It breaks all kinds of assumptions down
254 // stream. A PHI use is expected to be the kill of its source values.
255 for (MachineRegisterInfo::use_nodbg_iterator
256 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
259 PHIBBs.insert(UI->getParent());
261 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
262 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
263 MachineOperand *UseMO = Uses[i];
264 MachineInstr *UseMI = UseMO->getParent();
265 MachineBasicBlock *UseMBB = UseMI->getParent();
266 if (PHIBBs.count(UseMBB))
269 // About to add uses of DstReg, clear DstReg's kill flags.
271 MRI->clearKillFlags(DstReg);
272 MRI->constrainRegClass(DstReg, DstRC);
275 unsigned NewVR = MRI->createVirtualRegister(RC);
276 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
277 TII->get(TargetOpcode::COPY), NewVR)
278 .addReg(DstReg, 0, SubIdx);
279 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
281 Copy->getOperand(0).setSubReg(SubIdx);
282 Copy->getOperand(0).setIsUndef();
284 UseMO->setReg(NewVR);
293 /// optimizeBitcastInstr - If the instruction is a bitcast instruction A that
294 /// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast
295 /// a value cross register classes), and the source is defined by another
296 /// bitcast instruction B. And if the register class of source of B matches
297 /// the register class of instruction A, then it is legal to replace all uses
298 /// of the def of A with source of B. e.g.
299 /// %vreg0<def> = VMOVSR %vreg1
300 /// %vreg3<def> = VMOVRS %vreg0
301 /// Replace all uses of vreg3 with vreg1.
303 bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI,
304 MachineBasicBlock *MBB) {
305 unsigned NumDefs = MI->getDesc().getNumDefs();
306 unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs;
312 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
313 const MachineOperand &MO = MI->getOperand(i);
316 unsigned Reg = MO.getReg();
328 assert(Def && Src && "Malformed bitcast instruction!");
330 MachineInstr *DefMI = MRI->getVRegDef(Src);
331 if (!DefMI || !DefMI->isBitcast())
335 NumDefs = DefMI->getDesc().getNumDefs();
336 NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs;
339 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
340 const MachineOperand &MO = DefMI->getOperand(i);
341 if (!MO.isReg() || MO.isDef())
343 unsigned Reg = MO.getReg();
355 if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def))
358 MRI->replaceRegWith(Def, SrcSrc);
359 MRI->clearKillFlags(SrcSrc);
360 MI->eraseFromParent();
365 /// optimizeCmpInstr - If the instruction is a compare and the previous
366 /// instruction it's comparing against all ready sets (or could be modified to
367 /// set) the same flag as the compare, then we can remove the comparison and use
368 /// the flag from the previous instruction.
369 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
370 MachineBasicBlock *MBB) {
371 // If this instruction is a comparison against zero and isn't comparing a
372 // physical register, we can try to optimize it.
373 unsigned SrcReg, SrcReg2;
374 int CmpMask, CmpValue;
375 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
376 TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
377 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
380 // Attempt to optimize the comparison instruction.
381 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
389 /// isLoadFoldable - Check whether MI is a candidate for folding into a later
390 /// instruction. We only fold loads to virtual registers and the virtual
391 /// register defined has a single use.
392 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
393 unsigned &FoldAsLoadDefReg) {
394 if (MI->canFoldAsLoad()) {
395 const MCInstrDesc &MCID = MI->getDesc();
396 if (MCID.getNumDefs() == 1) {
397 unsigned Reg = MI->getOperand(0).getReg();
398 // To reduce compilation time, we check MRI->hasOneUse when inserting
399 // loads. It should be checked when processing uses of the load, since
400 // uses can be removed during peephole.
401 if (!MI->getOperand(0).getSubReg() &&
402 TargetRegisterInfo::isVirtualRegister(Reg) &&
403 MRI->hasOneUse(Reg)) {
404 FoldAsLoadDefReg = Reg;
412 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
413 SmallSet<unsigned, 4> &ImmDefRegs,
414 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
415 const MCInstrDesc &MCID = MI->getDesc();
416 if (!MI->isMoveImmediate())
418 if (MCID.getNumDefs() != 1)
420 unsigned Reg = MI->getOperand(0).getReg();
421 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
422 ImmDefMIs.insert(std::make_pair(Reg, MI));
423 ImmDefRegs.insert(Reg);
430 /// foldImmediate - Try folding register operands that are defined by move
431 /// immediate instructions, i.e. a trivial constant folding optimization, if
432 /// and only if the def and use are in the same BB.
433 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
434 SmallSet<unsigned, 4> &ImmDefRegs,
435 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
436 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
437 MachineOperand &MO = MI->getOperand(i);
438 if (!MO.isReg() || MO.isDef())
440 unsigned Reg = MO.getReg();
441 if (!TargetRegisterInfo::isVirtualRegister(Reg))
443 if (ImmDefRegs.count(Reg) == 0)
445 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
446 assert(II != ImmDefMIs.end());
447 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
455 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
459 TM = &MF.getTarget();
460 TII = TM->getInstrInfo();
461 MRI = &MF.getRegInfo();
462 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
464 bool Changed = false;
466 SmallPtrSet<MachineInstr*, 8> LocalMIs;
467 SmallSet<unsigned, 4> ImmDefRegs;
468 DenseMap<unsigned, MachineInstr*> ImmDefMIs;
469 unsigned FoldAsLoadDefReg;
470 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
471 MachineBasicBlock *MBB = &*I;
473 bool SeenMoveImm = false;
477 FoldAsLoadDefReg = 0;
480 MachineBasicBlock::iterator PMII;
481 for (MachineBasicBlock::iterator
482 MII = I->begin(), MIE = I->end(); MII != MIE; ) {
483 MachineInstr *MI = &*MII;
486 // If there exists an instruction which belongs to the following
487 // categories, we will discard the load candidate.
488 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
489 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
490 MI->hasUnmodeledSideEffects()) {
491 FoldAsLoadDefReg = 0;
495 if (MI->mayStore() || MI->isCall())
496 FoldAsLoadDefReg = 0;
498 if (MI->isBitcast()) {
499 if (optimizeBitcastInstr(MI, MBB)) {
503 MII = First ? I->begin() : llvm::next(PMII);
506 } else if (MI->isCompare()) {
507 if (optimizeCmpInstr(MI, MBB)) {
511 MII = First ? I->begin() : llvm::next(PMII);
516 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
519 Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
521 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
524 // Check whether MI is a load candidate for folding into a later
525 // instruction. If MI is not a candidate, check whether we can fold an
526 // earlier load into MI.
527 if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
528 // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
529 // can enable folding by converting SUB to CMP.
530 MachineInstr *DefMI = 0;
531 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
532 FoldAsLoadDefReg, DefMI);
534 // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
536 LocalMIs.erase(DefMI);
537 LocalMIs.insert(FoldMI);
538 MI->eraseFromParent();
539 DefMI->eraseFromParent();
542 // MI is replaced with FoldMI.
545 MII = llvm::next(PMII);