1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 #include "ARMBaseInstrInfo.h"
12 #include "ARMSubtarget.h"
13 #include "MCTargetDesc/ARMAddressingModes.h"
14 #include "Thumb2InstrInfo.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/IR/Function.h" // To access Function attributes
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetMachine.h"
28 #define DEBUG_TYPE "t2-reduce-size"
30 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
31 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
32 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
34 static cl::opt<int> ReduceLimit("t2-reduce-limit",
35 cl::init(-1), cl::Hidden);
36 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
37 cl::init(-1), cl::Hidden);
38 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
39 cl::init(-1), cl::Hidden);
42 /// ReduceTable - A static table with information on mapping from wide
45 uint16_t WideOpc; // Wide opcode
46 uint16_t NarrowOpc1; // Narrow opcode to transform to
47 uint16_t NarrowOpc2; // Narrow opcode when it's two-address
48 uint8_t Imm1Limit; // Limit of immediate field (bits)
49 uint8_t Imm2Limit; // Limit of immediate field when it's two-address
50 unsigned LowRegs1 : 1; // Only possible if low-registers are used
51 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
52 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
54 // 2 - Always set CPSR.
56 unsigned PartFlag : 1; // 16-bit instruction does partial flag update
57 unsigned Special : 1; // Needs to be dealt with specially
58 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
61 static const ReduceEntry ReduceTable[] = {
62 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM
63 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
64 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
65 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
66 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
67 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
68 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
69 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
70 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
71 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
72 //FIXME: Disable CMN, as CCodes are backwards from compare expectations
73 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
74 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
75 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
76 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
77 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
78 // FIXME: adr.n immediate offset must be multiple of 4.
79 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
80 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
81 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
82 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
83 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
84 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
85 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
86 // FIXME: Do we need the 16-bit 'S' variant?
87 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
88 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
89 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
90 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
91 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
92 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
93 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
94 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
95 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
96 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
97 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
98 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
99 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
100 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
101 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
102 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
103 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
104 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
105 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
106 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
108 // FIXME: Clean this up after splitting each Thumb load / store opcode
109 // into multiple ones.
110 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
111 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
112 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
113 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
114 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
115 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
116 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
117 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
118 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
119 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
120 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
121 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
122 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
123 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
125 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
126 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
127 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
128 // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent
129 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
130 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
133 class Thumb2SizeReduce : public MachineFunctionPass {
138 const Thumb2InstrInfo *TII;
139 const ARMSubtarget *STI;
141 bool runOnMachineFunction(MachineFunction &MF) override;
143 const char *getPassName() const override {
144 return "Thumb2 instruction size reduction pass";
148 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
149 DenseMap<unsigned, unsigned> ReduceOpcodeMap;
151 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
153 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
154 bool is2Addr, ARMCC::CondCodes Pred,
155 bool LiveCPSR, bool &HasCC, bool &CCDead);
157 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
158 const ReduceEntry &Entry);
160 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
161 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
163 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
165 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
166 const ReduceEntry &Entry, bool LiveCPSR,
169 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
170 /// non-two-address instruction.
171 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
172 const ReduceEntry &Entry, bool LiveCPSR,
175 /// ReduceMI - Attempt to reduce MI, return true on success.
176 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
177 bool LiveCPSR, bool IsSelfLoop);
179 /// ReduceMBB - Reduce width of instructions in the specified basic block.
180 bool ReduceMBB(MachineBasicBlock &MBB);
185 // Last instruction to define CPSR in the current block.
186 MachineInstr *CPSRDef;
187 // Was CPSR last defined by a high latency instruction?
188 // When CPSRDef is null, this refers to CPSR defs in predecessors.
189 bool HighLatencyCPSR;
192 // The flags leaving this block have high latency.
193 bool HighLatencyCPSR;
194 // Has this block been visited yet?
197 MBBInfo() : HighLatencyCPSR(false), Visited(false) {}
200 SmallVector<MBBInfo, 8> BlockInfo;
202 char Thumb2SizeReduce::ID = 0;
205 Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
206 OptimizeSize = MinimizeSize = false;
207 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
208 unsigned FromOpc = ReduceTable[i].WideOpc;
209 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
210 assert(false && "Duplicated entries?");
214 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
215 for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
216 if (*Regs == ARM::CPSR)
221 // Check for a likely high-latency flag def.
222 static bool isHighLatencyCPSR(MachineInstr *Def) {
223 switch(Def->getOpcode()) {
231 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
232 /// the 's' 16-bit instruction partially update CPSR. Abort the
233 /// transformation to avoid adding false dependency on last CPSR setting
234 /// instruction which hurts the ability for out-of-order execution engine
235 /// to do register renaming magic.
236 /// This function checks if there is a read-of-write dependency between the
237 /// last instruction that defines the CPSR and the current instruction. If there
238 /// is, then there is no harm done since the instruction cannot be retired
239 /// before the CPSR setting instruction anyway.
240 /// Note, we are not doing full dependency analysis here for the sake of compile
241 /// time. We're not looking for cases like:
243 /// r1 = add.w r0, ...
246 /// In this case it would have been ok to narrow the mul.w to muls since there
247 /// are indirect RAW dependency between the muls and the mul.w
249 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
250 // Disable the check for -Oz (aka OptimizeForSizeHarder).
251 if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
255 // If this BB loops back to itself, conservatively avoid narrowing the
256 // first instruction that does partial flag update.
257 return HighLatencyCPSR || FirstInSelfLoop;
259 SmallSet<unsigned, 2> Defs;
260 for (const MachineOperand &MO : CPSRDef->operands()) {
261 if (!MO.isReg() || MO.isUndef() || MO.isUse())
263 unsigned Reg = MO.getReg();
264 if (Reg == 0 || Reg == ARM::CPSR)
269 for (const MachineOperand &MO : Use->operands()) {
270 if (!MO.isReg() || MO.isUndef() || MO.isDef())
272 unsigned Reg = MO.getReg();
277 // If the current CPSR has high latency, try to avoid the false dependency.
281 // tMOVi8 usually doesn't start long dependency chains, and there are a lot
282 // of them, so always shrink them when CPSR doesn't have high latency.
283 if (Use->getOpcode() == ARM::t2MOVi ||
284 Use->getOpcode() == ARM::t2MOVi16)
287 // No read-after-write dependency. The narrowing will add false dependency.
292 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
293 bool is2Addr, ARMCC::CondCodes Pred,
294 bool LiveCPSR, bool &HasCC, bool &CCDead) {
295 if ((is2Addr && Entry.PredCC2 == 0) ||
296 (!is2Addr && Entry.PredCC1 == 0)) {
297 if (Pred == ARMCC::AL) {
298 // Not predicated, must set CPSR.
300 // Original instruction was not setting CPSR, but CPSR is not
301 // currently live anyway. It's ok to set it. The CPSR def is
311 // Predicated, must not set CPSR.
315 } else if ((is2Addr && Entry.PredCC2 == 2) ||
316 (!is2Addr && Entry.PredCC1 == 2)) {
317 /// Old opcode has an optional def of CPSR.
320 // If old opcode does not implicitly define CPSR, then it's not ok since
321 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
322 if (!HasImplicitCPSRDef(MI->getDesc()))
326 // 16-bit instruction does not set CPSR.
334 static bool VerifyLowRegs(MachineInstr *MI) {
335 unsigned Opc = MI->getOpcode();
336 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
337 bool isLROk = (Opc == ARM::t2STMDB_UPD);
338 bool isSPOk = isPCOk || isLROk;
339 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
340 const MachineOperand &MO = MI->getOperand(i);
341 if (!MO.isReg() || MO.isImplicit())
343 unsigned Reg = MO.getReg();
344 if (Reg == 0 || Reg == ARM::CPSR)
346 if (isPCOk && Reg == ARM::PC)
348 if (isLROk && Reg == ARM::LR)
350 if (Reg == ARM::SP) {
353 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
354 // Special case for these ldr / str with sp as base register.
357 if (!isARMLowRegister(Reg))
364 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
365 const ReduceEntry &Entry) {
366 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
370 bool HasImmOffset = false;
371 bool HasShift = false;
372 bool HasOffReg = true;
373 bool isLdStMul = false;
374 unsigned Opc = Entry.NarrowOpc1;
375 unsigned OpNum = 3; // First 'rest' of operands.
376 uint8_t ImmLimit = Entry.Imm1Limit;
378 switch (Entry.WideOpc) {
380 llvm_unreachable("Unexpected Thumb2 load / store opcode!");
383 if (MI->getOperand(1).getReg() == ARM::SP) {
384 Opc = Entry.NarrowOpc2;
385 ImmLimit = Entry.Imm2Limit;
415 unsigned BaseReg = MI->getOperand(0).getReg();
416 assert(isARMLowRegister(BaseReg));
418 // For the non-writeback version (this one), the base register must be
419 // one of the registers being loaded.
421 for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
422 if (MI->getOperand(i).getReg() == BaseReg) {
435 case ARM::t2LDMIA_RET: {
436 unsigned BaseReg = MI->getOperand(1).getReg();
437 if (BaseReg != ARM::SP)
439 Opc = Entry.NarrowOpc2; // tPOP_RET
444 case ARM::t2LDMIA_UPD:
445 case ARM::t2STMIA_UPD:
446 case ARM::t2STMDB_UPD: {
449 unsigned BaseReg = MI->getOperand(1).getReg();
450 if (BaseReg == ARM::SP &&
451 (Entry.WideOpc == ARM::t2LDMIA_UPD ||
452 Entry.WideOpc == ARM::t2STMDB_UPD)) {
453 Opc = Entry.NarrowOpc2; // tPOP or tPUSH
455 } else if (!isARMLowRegister(BaseReg) ||
456 (Entry.WideOpc != ARM::t2LDMIA_UPD &&
457 Entry.WideOpc != ARM::t2STMIA_UPD)) {
466 unsigned OffsetReg = 0;
467 bool OffsetKill = false;
468 bool OffsetInternal = false;
470 OffsetReg = MI->getOperand(2).getReg();
471 OffsetKill = MI->getOperand(2).isKill();
472 OffsetInternal = MI->getOperand(2).isInternalRead();
474 if (MI->getOperand(3).getImm())
475 // Thumb1 addressing mode doesn't support shift.
479 unsigned OffsetImm = 0;
481 OffsetImm = MI->getOperand(2).getImm();
482 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
484 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
485 // Make sure the immediate field fits.
489 // Add the 16-bit load / store instruction.
490 DebugLoc dl = MI->getDebugLoc();
491 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
493 MIB.addOperand(MI->getOperand(0));
494 MIB.addOperand(MI->getOperand(1));
497 MIB.addImm(OffsetImm / Scale);
499 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
502 MIB.addReg(OffsetReg, getKillRegState(OffsetKill) |
503 getInternalReadRegState(OffsetInternal));
506 // Transfer the rest of operands.
507 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
508 MIB.addOperand(MI->getOperand(OpNum));
510 // Transfer memoperands.
511 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
513 // Transfer MI flags.
514 MIB.setMIFlags(MI->getFlags());
516 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
524 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
525 const ReduceEntry &Entry,
526 bool LiveCPSR, bool IsSelfLoop) {
527 unsigned Opc = MI->getOpcode();
528 if (Opc == ARM::t2ADDri) {
529 // If the source register is SP, try to reduce to tADDrSPi, otherwise
530 // it's a normal reduce.
531 if (MI->getOperand(1).getReg() != ARM::SP) {
532 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
534 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
536 // Try to reduce to tADDrSPi.
537 unsigned Imm = MI->getOperand(2).getImm();
538 // The immediate must be in range, the destination register must be a low
539 // reg, the predicate must be "always" and the condition flags must not
541 if (Imm & 3 || Imm > 1020)
543 if (!isARMLowRegister(MI->getOperand(0).getReg()))
545 if (MI->getOperand(3).getImm() != ARMCC::AL)
547 const MCInstrDesc &MCID = MI->getDesc();
548 if (MCID.hasOptionalDef() &&
549 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
552 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
553 TII->get(ARM::tADDrSPi))
554 .addOperand(MI->getOperand(0))
555 .addOperand(MI->getOperand(1))
556 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four.
559 // Transfer MI flags.
560 MIB.setMIFlags(MI->getFlags());
562 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
569 if (Entry.LowRegs1 && !VerifyLowRegs(MI))
572 if (MI->mayLoadOrStore())
573 return ReduceLoadStore(MBB, MI, Entry);
578 case ARM::t2ADDSrr: {
579 unsigned PredReg = 0;
580 if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
583 case ARM::t2ADDSri: {
584 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
589 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
600 if (MI->getOperand(2).getImm() == 0)
601 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
604 // Can convert only 'pure' immediate operands, not immediates obtained as
605 // globals' addresses.
606 if (MI->getOperand(1).isImm())
607 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
610 // Try to reduce to the lo-reg only version first. Why there are two
611 // versions of the instruction is a mystery.
612 // It would be nice to just have two entries in the master table that
613 // are prioritized, but the table assumes a unique entry for each
614 // source insn opcode. So for now, we hack a local entry record to use.
615 static const ReduceEntry NarrowEntry =
616 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
617 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
619 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
626 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
627 const ReduceEntry &Entry,
628 bool LiveCPSR, bool IsSelfLoop) {
630 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
633 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
634 STI->avoidMOVsShifterOperand())
635 // Don't issue movs with shifter operand for some CPUs unless we
636 // are optimizing / minimizing for size.
639 unsigned Reg0 = MI->getOperand(0).getReg();
640 unsigned Reg1 = MI->getOperand(1).getReg();
641 // t2MUL is "special". The tied source operand is second, not first.
642 if (MI->getOpcode() == ARM::t2MUL) {
643 unsigned Reg2 = MI->getOperand(2).getReg();
644 // Early exit if the regs aren't all low regs.
645 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
646 || !isARMLowRegister(Reg2))
649 // If the other operand also isn't the same as the destination, we
653 // Try to commute the operands to make it a 2-address instruction.
654 MachineInstr *CommutedMI = TII->commuteInstruction(MI);
658 } else if (Reg0 != Reg1) {
659 // Try to commute the operands to make it a 2-address instruction.
660 unsigned CommOpIdx1, CommOpIdx2;
661 if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
662 CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
664 MachineInstr *CommutedMI = TII->commuteInstruction(MI);
668 if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
670 if (Entry.Imm2Limit) {
671 unsigned Imm = MI->getOperand(2).getImm();
672 unsigned Limit = (1 << Entry.Imm2Limit) - 1;
676 unsigned Reg2 = MI->getOperand(2).getReg();
677 if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
681 // Check if it's possible / necessary to transfer the predicate.
682 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
683 unsigned PredReg = 0;
684 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
685 bool SkipPred = false;
686 if (Pred != ARMCC::AL) {
687 if (!NewMCID.isPredicable())
688 // Can't transfer predicate, fail.
691 SkipPred = !NewMCID.isPredicable();
696 const MCInstrDesc &MCID = MI->getDesc();
697 if (MCID.hasOptionalDef()) {
698 unsigned NumOps = MCID.getNumOperands();
699 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
700 if (HasCC && MI->getOperand(NumOps-1).isDead())
703 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
706 // Avoid adding a false dependency on partial flag update by some 16-bit
707 // instructions which has the 's' bit set.
708 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
709 canAddPseudoFlagDep(MI, IsSelfLoop))
712 // Add the 16-bit instruction.
713 DebugLoc dl = MI->getDebugLoc();
714 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
715 MIB.addOperand(MI->getOperand(0));
716 if (NewMCID.hasOptionalDef()) {
718 AddDefaultT1CC(MIB, CCDead);
723 // Transfer the rest of operands.
724 unsigned NumOps = MCID.getNumOperands();
725 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
726 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
728 if (SkipPred && MCID.OpInfo[i].isPredicate())
730 MIB.addOperand(MI->getOperand(i));
733 // Transfer MI flags.
734 MIB.setMIFlags(MI->getFlags());
736 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
744 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
745 const ReduceEntry &Entry,
746 bool LiveCPSR, bool IsSelfLoop) {
747 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
750 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
751 STI->avoidMOVsShifterOperand())
752 // Don't issue movs with shifter operand for some CPUs unless we
753 // are optimizing / minimizing for size.
756 unsigned Limit = ~0U;
758 Limit = (1 << Entry.Imm1Limit) - 1;
760 const MCInstrDesc &MCID = MI->getDesc();
761 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
762 if (MCID.OpInfo[i].isPredicate())
764 const MachineOperand &MO = MI->getOperand(i);
766 unsigned Reg = MO.getReg();
767 if (!Reg || Reg == ARM::CPSR)
769 if (Entry.LowRegs1 && !isARMLowRegister(Reg))
771 } else if (MO.isImm() &&
772 !MCID.OpInfo[i].isPredicate()) {
773 if (((unsigned)MO.getImm()) > Limit)
778 // Check if it's possible / necessary to transfer the predicate.
779 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
780 unsigned PredReg = 0;
781 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
782 bool SkipPred = false;
783 if (Pred != ARMCC::AL) {
784 if (!NewMCID.isPredicable())
785 // Can't transfer predicate, fail.
788 SkipPred = !NewMCID.isPredicable();
793 if (MCID.hasOptionalDef()) {
794 unsigned NumOps = MCID.getNumOperands();
795 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
796 if (HasCC && MI->getOperand(NumOps-1).isDead())
799 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
802 // Avoid adding a false dependency on partial flag update by some 16-bit
803 // instructions which has the 's' bit set.
804 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
805 canAddPseudoFlagDep(MI, IsSelfLoop))
808 // Add the 16-bit instruction.
809 DebugLoc dl = MI->getDebugLoc();
810 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
811 MIB.addOperand(MI->getOperand(0));
812 if (NewMCID.hasOptionalDef()) {
814 AddDefaultT1CC(MIB, CCDead);
819 // Transfer the rest of operands.
820 unsigned NumOps = MCID.getNumOperands();
821 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
822 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
824 if ((MCID.getOpcode() == ARM::t2RSBSri ||
825 MCID.getOpcode() == ARM::t2RSBri ||
826 MCID.getOpcode() == ARM::t2SXTB ||
827 MCID.getOpcode() == ARM::t2SXTH ||
828 MCID.getOpcode() == ARM::t2UXTB ||
829 MCID.getOpcode() == ARM::t2UXTH) && i == 2)
830 // Skip the zero immediate operand, it's now implicit.
832 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
833 if (SkipPred && isPred)
835 const MachineOperand &MO = MI->getOperand(i);
836 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
837 // Skip implicit def of CPSR. Either it's modeled as an optional
838 // def now or it's already an implicit def on the new instruction.
842 if (!MCID.isPredicable() && NewMCID.isPredicable())
845 // Transfer MI flags.
846 MIB.setMIFlags(MI->getFlags());
848 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
855 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
857 for (const MachineOperand &MO : MI.operands()) {
858 if (!MO.isReg() || MO.isUndef() || MO.isUse())
860 if (MO.getReg() != ARM::CPSR)
868 return HasDef || LiveCPSR;
871 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
872 for (const MachineOperand &MO : MI.operands()) {
873 if (!MO.isReg() || MO.isUndef() || MO.isDef())
875 if (MO.getReg() != ARM::CPSR)
877 assert(LiveCPSR && "CPSR liveness tracking is wrong!");
887 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
888 bool LiveCPSR, bool IsSelfLoop) {
889 unsigned Opcode = MI->getOpcode();
890 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
891 if (OPI == ReduceOpcodeMap.end())
893 const ReduceEntry &Entry = ReduceTable[OPI->second];
895 // Don't attempt normal reductions on "special" cases for now.
897 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
899 // Try to transform to a 16-bit two-address instruction.
900 if (Entry.NarrowOpc2 &&
901 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
904 // Try to transform to a 16-bit non-two-address instruction.
905 if (Entry.NarrowOpc1 &&
906 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
912 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
913 bool Modified = false;
915 // Yes, CPSR could be livein.
916 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
917 MachineInstr *BundleMI = nullptr;
920 HighLatencyCPSR = false;
922 // Check predecessors for the latest CPSRDef.
923 for (auto *Pred : MBB.predecessors()) {
924 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
925 if (!PInfo.Visited) {
926 // Since blocks are visited in RPO, this must be a back-edge.
929 if (PInfo.HighLatencyCPSR) {
930 HighLatencyCPSR = true;
935 // If this BB loops back to itself, conservatively avoid narrowing the
936 // first instruction that does partial flag update.
937 bool IsSelfLoop = MBB.isSuccessor(&MBB);
938 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
939 MachineBasicBlock::instr_iterator NextMII;
940 for (; MII != E; MII = NextMII) {
941 NextMII = std::next(MII);
943 MachineInstr *MI = &*MII;
944 if (MI->isBundle()) {
948 if (MI->isDebugValue())
951 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
953 // Does NextMII belong to the same bundle as MI?
954 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
956 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
958 MachineBasicBlock::instr_iterator I = std::prev(NextMII);
960 // Removing and reinserting the first instruction in a bundle will break
961 // up the bundle. Fix the bundling if it was broken.
962 if (NextInSameBundle && !NextMII->isBundledWithPred())
963 NextMII->bundleWithPred();
966 if (!NextInSameBundle && MI->isInsideBundle()) {
967 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
968 // marker is only on the BUNDLE instruction. Process the BUNDLE
969 // instruction as we finish with the bundled instruction to work around
970 // the inconsistency.
971 if (BundleMI->killsRegister(ARM::CPSR))
973 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
974 if (MO && !MO->isDead())
976 MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
977 if (MO && !MO->isKill())
981 bool DefCPSR = false;
982 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
984 // Calls don't really set CPSR.
986 HighLatencyCPSR = false;
988 } else if (DefCPSR) {
989 // This is the last CPSR defining instruction.
991 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
996 MBBInfo &Info = BlockInfo[MBB.getNumber()];
997 Info.HighLatencyCPSR = HighLatencyCPSR;
1002 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
1003 STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
1004 if (STI->isThumb1Only() || STI->prefers32BitThumb())
1007 TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
1009 // Optimizing / minimizing size?
1010 OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
1011 MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
1014 BlockInfo.resize(MF.getNumBlockIDs());
1016 // Visit blocks in reverse post-order so LastCPSRDef is known for all
1018 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
1019 bool Modified = false;
1020 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
1021 I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
1022 Modified |= ReduceMBB(**I);
1026 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
1028 FunctionPass *llvm::createThumb2SizeReductionPass() {
1029 return new Thumb2SizeReduce();