1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMRegisterInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetMachine.h"
31 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
32 STATISTIC(NumSTMGened , "Number of stm instructions generated");
33 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
34 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
37 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
38 const TargetInstrInfo *TII;
40 virtual bool runOnMachineFunction(MachineFunction &Fn);
42 virtual const char *getPassName() const {
43 return "ARM load / store optimization pass";
47 struct MemOpQueueEntry {
50 MachineBasicBlock::iterator MBBI;
52 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
53 : Offset(o), Position(p), MBBI(i), Merged(false) {};
55 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
56 typedef MemOpQueue::iterator MemOpQueueIter;
58 SmallVector<MachineBasicBlock::iterator, 4>
59 MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
60 int Opcode, unsigned Size, MemOpQueue &MemOps);
62 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
63 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
67 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
68 /// optimization pass.
69 FunctionPass *llvm::createARMLoadStoreOptimizationPass() {
70 return new ARMLoadStoreOpt();
73 static int getLoadStoreMultipleOpcode(int Opcode) {
98 /// mergeOps - Create and insert a LDM or STM with Base as base register and
99 /// registers in Regs as the register operands that would be loaded / stored.
100 /// It returns true if the transformation is done.
101 static bool mergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
102 int Offset, unsigned Base, int Opcode,
103 SmallVector<unsigned, 8> &Regs,
104 const TargetInstrInfo *TII) {
105 // Only a single register to load / store. Don't bother.
106 unsigned NumRegs = Regs.size();
110 ARM_AM::AMSubMode Mode = ARM_AM::ia;
111 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
112 if (isAM4 && Offset == 4)
114 else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
116 else if (isAM4 && Offset == -4 * (int)NumRegs)
118 else if (Offset != 0) {
119 // If starting offset isn't zero, insert a MI to materialize a new base.
120 // But only do so if it is cost effective, i.e. merging more than two
126 if (Opcode == ARM::LDR)
127 // If it is a load, then just use one of the destination register to
128 // use as the new base.
129 NewBase = Regs[NumRegs-1];
131 // FIXME: Try scavenging a register to use as a new base.
134 int BaseOpc = ARM::ADDri;
136 BaseOpc = ARM::SUBri;
139 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
140 if (ImmedOffset == -1)
141 return false; // Probably not worth it then.
142 BuildMI(MBB, MBBI, TII->get(BaseOpc), NewBase).addReg(Base).addImm(ImmedOffset);
146 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
147 bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
148 Opcode = getLoadStoreMultipleOpcode(Opcode);
149 MachineInstrBuilder MIB = (isAM4)
150 ? BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base)
151 .addImm(ARM_AM::getAM4ModeImm(Mode))
152 : BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base)
153 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs));
154 for (unsigned i = 0; i != NumRegs; ++i)
155 MIB = MIB.addReg(Regs[i], Opcode == isDef);
160 SmallVector<MachineBasicBlock::iterator, 4>
161 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB,
162 unsigned SIndex, unsigned Base, int Opcode,
163 unsigned Size, MemOpQueue &MemOps) {
164 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
165 SmallVector<MachineBasicBlock::iterator, 4> Merges;
166 int Offset = MemOps[SIndex].Offset;
167 int SOffset = Offset;
168 unsigned Pos = MemOps[SIndex].Position;
169 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
170 SmallVector<unsigned, 8> Regs;
171 unsigned PReg = MemOps[SIndex].MBBI->getOperand(0).getReg();
172 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
173 Regs.push_back(PReg);
174 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
175 int NewOffset = MemOps[i].Offset;
176 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
177 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
178 // AM4 - register numbers in ascending order.
179 // AM5 - consecutive register numbers in ascending order.
180 if (NewOffset == Offset + (int)Size &&
181 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
186 // Can't merge this in. Try merge the earlier ones first.
187 if (mergeOps(MBB, ++Loc, SOffset, Base, Opcode, Regs, TII)) {
188 Merges.push_back(prior(Loc));
189 for (unsigned j = SIndex; j < i; ++j) {
190 MBB.erase(MemOps[j].MBBI);
191 MemOps[j].Merged = true;
194 SmallVector<MachineBasicBlock::iterator, 4> Merges2 =
195 MergeLDR_STR(MBB, i, Base, Opcode, Size, MemOps);
196 Merges.append(Merges2.begin(), Merges2.end());
200 if (MemOps[i].Position > Pos) {
201 Pos = MemOps[i].Position;
202 Loc = MemOps[i].MBBI;
206 if (mergeOps(MBB, ++Loc, SOffset, Base, Opcode, Regs, TII)) {
207 Merges.push_back(prior(Loc));
208 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
209 MBB.erase(MemOps[i].MBBI);
210 MemOps[i].Merged = true;
217 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
219 return (MI && MI->getOpcode() == ARM::SUBri &&
220 MI->getOperand(0).getReg() == Base &&
221 MI->getOperand(1).getReg() == Base &&
222 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
225 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
227 return (MI && MI->getOpcode() == ARM::ADDri &&
228 MI->getOperand(0).getReg() == Base &&
229 MI->getOperand(1).getReg() == Base &&
230 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
233 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
234 switch (MI->getOpcode()) {
246 return (MI->getNumOperands() - 2) * 4;
251 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
255 /// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
256 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
258 /// stmia rn, <ra, rb, rc>
259 /// rn := rn + 4 * 3;
261 /// stmia rn!, <ra, rb, rc>
263 /// rn := rn - 4 * 3;
264 /// ldmia rn, <ra, rb, rc>
266 /// ldmdb rn!, <ra, rb, rc>
267 static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
268 MachineBasicBlock::iterator MBBI) {
269 MachineInstr *MI = MBBI;
270 unsigned Base = MI->getOperand(0).getReg();
271 unsigned Bytes = getLSMultipleTransferSize(MI);
272 int Opcode = MI->getOpcode();
273 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
276 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
279 // Can't use the updating AM4 sub-mode if the base register is also a dest
280 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
281 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) {
282 if (MI->getOperand(i).getReg() == Base)
286 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
287 if (MBBI != MBB.begin()) {
288 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
289 if (Mode == ARM_AM::ia &&
290 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
291 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
294 } else if (Mode == ARM_AM::ib &&
295 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
296 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
302 if (MBBI != MBB.end()) {
303 MachineBasicBlock::iterator NextMBBI = next(MBBI);
304 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
305 isMatchingIncrement(NextMBBI, Base, Bytes)) {
306 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
309 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
310 isMatchingDecrement(NextMBBI, Base, Bytes)) {
311 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
317 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
318 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
321 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
322 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
323 if (MBBI != MBB.begin()) {
324 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
325 if (Mode == ARM_AM::ia &&
326 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
327 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
333 if (MBBI != MBB.end()) {
334 MachineBasicBlock::iterator NextMBBI = next(MBBI);
335 if (Mode == ARM_AM::ia &&
336 isMatchingIncrement(NextMBBI, Base, Bytes)) {
337 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
347 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
349 case ARM::LDR: return ARM::LDR_PRE;
350 case ARM::STR: return ARM::STR_PRE;
351 case ARM::FLDS: return ARM::FLDMS;
352 case ARM::FLDD: return ARM::FLDMD;
353 case ARM::FSTS: return ARM::FSTMS;
354 case ARM::FSTD: return ARM::FSTMD;
360 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
362 case ARM::LDR: return ARM::LDR_POST;
363 case ARM::STR: return ARM::STR_POST;
364 case ARM::FLDS: return ARM::FLDMS;
365 case ARM::FLDD: return ARM::FLDMD;
366 case ARM::FSTS: return ARM::FSTMS;
367 case ARM::FSTD: return ARM::FSTMD;
373 /// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
374 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
375 static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
376 MachineBasicBlock::iterator MBBI,
377 const TargetInstrInfo *TII) {
378 MachineInstr *MI = MBBI;
379 unsigned Base = MI->getOperand(1).getReg();
380 unsigned Bytes = getLSMultipleTransferSize(MI);
381 int Opcode = MI->getOpcode();
382 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
383 if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
384 (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
387 bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
388 // Can't do the merge if the destination register is the same as the would-be
389 // writeback register.
390 if (isLd && MI->getOperand(0).getReg() == Base)
393 bool DoMerge = false;
394 ARM_AM::AddrOpc AddSub = ARM_AM::add;
396 if (MBBI != MBB.begin()) {
397 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
398 if (isMatchingDecrement(PrevMBBI, Base, Bytes)) {
400 AddSub = ARM_AM::sub;
401 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
402 } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes)) {
404 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
410 if (!DoMerge && MBBI != MBB.end()) {
411 MachineBasicBlock::iterator NextMBBI = next(MBBI);
412 if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes)) {
414 AddSub = ARM_AM::sub;
415 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
416 } else if (isMatchingIncrement(NextMBBI, Base, Bytes)) {
418 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
427 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
428 unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
429 : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
430 true, isDPR ? 2 : 1);
433 BuildMI(MBB, MBBI, TII->get(NewOpc), MI->getOperand(0).getReg())
434 .addReg(Base, true).addReg(Base).addReg(0).addImm(Offset);
436 BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base)
437 .addImm(Offset).addReg(MI->getOperand(0).getReg(), true);
440 BuildMI(MBB, MBBI, TII->get(NewOpc), Base).addReg(MI->getOperand(0).getReg())
441 .addReg(Base).addReg(0).addImm(Offset);
443 BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base)
444 .addImm(Offset).addReg(MI->getOperand(0).getReg(), false);
451 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
452 /// ops of the same base and incrementing offset into LDM / STM ops.
453 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
454 unsigned NumMerges = 0;
455 unsigned NumMemOps = 0;
457 unsigned CurrBase = 0;
459 unsigned CurrSize = 0;
460 unsigned Position = 0;
461 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
463 bool Advance = false;
464 bool TryMerge = false;
465 bool Clobber = false;
467 int Opcode = MBBI->getOpcode();
468 bool isMemOp = false;
475 (MBBI->getOperand(1).isRegister() && MBBI->getOperand(2).getReg() == 0);
480 isMemOp = MBBI->getOperand(1).isRegister();
484 isMemOp = MBBI->getOperand(1).isRegister();
489 unsigned Base = MBBI->getOperand(1).getReg();
490 unsigned OffIdx = MBBI->getNumOperands()-1;
491 unsigned OffField = MBBI->getOperand(OffIdx).getImm();
493 ? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4;
495 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
498 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
503 // r5 := ldr [r5, #4]
504 // r6 := ldr [r5, #8]
506 // The second ldr has effectively broken the chain even though it
507 // looks like the later ldr(s) use the same base register. Try to
508 // merge the ldr's so far, including this one. But don't try to
509 // combine the following ldr(s).
510 Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
511 if (CurrBase == 0 && !Clobber) {
512 // Start of a new chain.
516 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
525 if (CurrOpc == Opcode && CurrBase == Base) {
526 // Continue adding to the queue.
527 if (Offset > MemOps.back().Offset) {
528 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
532 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
534 if (Offset < I->Offset) {
535 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
539 } else if (Offset == I->Offset) {
540 // Collision! This can't be merged!
557 SmallVector<MachineBasicBlock::iterator,4> MBBII =
558 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,MemOps);
559 // Try folding preceeding/trailing base inc/dec into the generated
561 for (unsigned i = 0, e = MBBII.size(); i < e; ++i)
562 if (mergeBaseUpdateLSMultiple(MBB, MBBII[i]))
564 NumMerges += MBBII.size();
567 // Try folding preceeding/trailing base inc/dec into those load/store
568 // that were not merged to form LDM/STM ops.
569 for (unsigned i = 0; i != NumMemOps; ++i)
570 if (!MemOps[i].Merged)
571 if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII))
581 // If iterator hasn't been advanced and this is not a memory op, skip it.
582 // It can't start a new chain anyway.
583 if (!Advance && !isMemOp && MBBI != E) {
589 return NumMerges > 0;
592 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
593 /// (bx lr) into the preceeding stack restore so it directly restore the value
595 /// ldmfd sp!, {r7, lr}
598 /// ldmfd sp!, {r7, pc}
599 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
600 if (MBB.empty()) return false;
602 MachineBasicBlock::iterator MBBI = prior(MBB.end());
603 if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
604 MachineInstr *PrevMI = prior(MBBI);
605 if (PrevMI->getOpcode() == ARM::LDM) {
606 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
607 if (MO.getReg() == ARM::LR) {
608 PrevMI->setInstrDescriptor(TII->get(ARM::LDM_RET));
618 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
619 TII = Fn.getTarget().getInstrInfo();
620 bool Modified = false;
621 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
623 MachineBasicBlock &MBB = *MFI;
624 Modified |= LoadStoreMultipleOpti(MBB);
625 Modified |= MergeReturnIntoLDM(MBB);