1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/Target/TargetRegisterInfo.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/Compiler.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
37 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
38 STATISTIC(NumSTMGened , "Number of stm instructions generated");
39 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
40 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
41 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
43 /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
44 /// load / store instructions to form ldm / stm instructions.
47 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
49 ARMLoadStoreOpt() : MachineFunctionPass(&ID) {}
51 const TargetInstrInfo *TII;
52 const TargetRegisterInfo *TRI;
56 virtual bool runOnMachineFunction(MachineFunction &Fn);
58 virtual const char *getPassName() const {
59 return "ARM load / store optimization pass";
63 struct MemOpQueueEntry {
66 MachineBasicBlock::iterator MBBI;
68 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
69 : Offset(o), Position(p), MBBI(i), Merged(false) {};
71 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
72 typedef MemOpQueue::iterator MemOpQueueIter;
74 bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
75 int Offset, unsigned Base, bool BaseKill, int Opcode,
76 ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
77 DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
78 void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
79 int Opcode, unsigned Size,
80 ARMCC::CondCodes Pred, unsigned PredReg,
81 unsigned Scratch, MemOpQueue &MemOps,
82 SmallVector<MachineBasicBlock::iterator, 4> &Merges);
84 void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
85 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
86 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
88 char ARMLoadStoreOpt::ID = 0;
91 static int getLoadStoreMultipleOpcode(int Opcode) {
116 /// MergeOps - Create and insert a LDM or STM with Base as base register and
117 /// registers in Regs as the register operands that would be loaded / stored.
118 /// It returns true if the transformation is done.
120 ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
121 MachineBasicBlock::iterator MBBI,
122 int Offset, unsigned Base, bool BaseKill,
123 int Opcode, ARMCC::CondCodes Pred,
124 unsigned PredReg, unsigned Scratch, DebugLoc dl,
125 SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
126 // Only a single register to load / store. Don't bother.
127 unsigned NumRegs = Regs.size();
131 ARM_AM::AMSubMode Mode = ARM_AM::ia;
132 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
133 if (isAM4 && Offset == 4)
135 else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
137 else if (isAM4 && Offset == -4 * (int)NumRegs)
139 else if (Offset != 0) {
140 // If starting offset isn't zero, insert a MI to materialize a new base.
141 // But only do so if it is cost effective, i.e. merging more than two
147 if (Opcode == ARM::LDR)
148 // If it is a load, then just use one of the destination register to
149 // use as the new base.
150 NewBase = Regs[NumRegs-1].first;
152 // Use the scratch register to use as a new base.
157 int BaseOpc = ARM::ADDri;
159 BaseOpc = ARM::SUBri;
162 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
163 if (ImmedOffset == -1)
164 return false; // Probably not worth it then.
166 BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
167 .addReg(Base, getKillRegState(BaseKill)).addImm(ImmedOffset)
168 .addImm(Pred).addReg(PredReg).addReg(0);
170 BaseKill = true; // New base is always killed right its use.
173 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
174 bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
175 Opcode = getLoadStoreMultipleOpcode(Opcode);
176 MachineInstrBuilder MIB = (isAM4)
177 ? BuildMI(MBB, MBBI, dl, TII->get(Opcode))
178 .addReg(Base, getKillRegState(BaseKill))
179 .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg)
180 : BuildMI(MBB, MBBI, dl, TII->get(Opcode))
181 .addReg(Base, getKillRegState(BaseKill))
182 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
183 .addImm(Pred).addReg(PredReg);
184 for (unsigned i = 0; i != NumRegs; ++i)
185 MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
186 | getKillRegState(Regs[i].second));
191 /// MergeLDR_STR - Merge a number of load / store instructions into one or more
192 /// load / store multiple instructions.
194 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
195 unsigned Base, int Opcode, unsigned Size,
196 ARMCC::CondCodes Pred, unsigned PredReg,
197 unsigned Scratch, MemOpQueue &MemOps,
198 SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
199 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
200 int Offset = MemOps[SIndex].Offset;
201 int SOffset = Offset;
202 unsigned Pos = MemOps[SIndex].Position;
203 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
204 DebugLoc dl = Loc->getDebugLoc();
205 unsigned PReg = Loc->getOperand(0).getReg();
206 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
207 bool isKill = Loc->getOperand(0).isKill();
209 SmallVector<std::pair<unsigned,bool>, 8> Regs;
210 Regs.push_back(std::make_pair(PReg, isKill));
211 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
212 int NewOffset = MemOps[i].Offset;
213 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
214 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
215 isKill = MemOps[i].MBBI->getOperand(0).isKill();
216 // AM4 - register numbers in ascending order.
217 // AM5 - consecutive register numbers in ascending order.
218 if (NewOffset == Offset + (int)Size &&
219 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
221 Regs.push_back(std::make_pair(Reg, isKill));
224 // Can't merge this in. Try merge the earlier ones first.
225 if (MergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, PredReg,
226 Scratch, dl, Regs)) {
227 Merges.push_back(prior(Loc));
228 for (unsigned j = SIndex; j < i; ++j) {
229 MBB.erase(MemOps[j].MBBI);
230 MemOps[j].Merged = true;
233 MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
238 if (MemOps[i].Position > Pos) {
239 Pos = MemOps[i].Position;
240 Loc = MemOps[i].MBBI;
244 bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
245 if (MergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, PredReg,
246 Scratch, dl, Regs)) {
247 Merges.push_back(prior(Loc));
248 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
249 MBB.erase(MemOps[i].MBBI);
250 MemOps[i].Merged = true;
257 /// getInstrPredicate - If instruction is predicated, returns its predicate
258 /// condition, otherwise returns AL. It also returns the condition code
259 /// register by reference.
260 static ARMCC::CondCodes getInstrPredicate(MachineInstr *MI, unsigned &PredReg) {
261 int PIdx = MI->findFirstPredOperandIdx();
267 PredReg = MI->getOperand(PIdx+1).getReg();
268 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
271 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
272 unsigned Bytes, ARMCC::CondCodes Pred,
274 unsigned MyPredReg = 0;
275 return (MI && MI->getOpcode() == ARM::SUBri &&
276 MI->getOperand(0).getReg() == Base &&
277 MI->getOperand(1).getReg() == Base &&
278 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
279 getInstrPredicate(MI, MyPredReg) == Pred &&
280 MyPredReg == PredReg);
283 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
284 unsigned Bytes, ARMCC::CondCodes Pred,
286 unsigned MyPredReg = 0;
287 return (MI && MI->getOpcode() == ARM::ADDri &&
288 MI->getOperand(0).getReg() == Base &&
289 MI->getOperand(1).getReg() == Base &&
290 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
291 getInstrPredicate(MI, MyPredReg) == Pred &&
292 MyPredReg == PredReg);
295 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
296 switch (MI->getOpcode()) {
308 return (MI->getNumOperands() - 4) * 4;
313 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
317 /// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
318 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
320 /// stmia rn, <ra, rb, rc>
321 /// rn := rn + 4 * 3;
323 /// stmia rn!, <ra, rb, rc>
325 /// rn := rn - 4 * 3;
326 /// ldmia rn, <ra, rb, rc>
328 /// ldmdb rn!, <ra, rb, rc>
329 static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
330 MachineBasicBlock::iterator MBBI,
332 MachineBasicBlock::iterator &I) {
333 MachineInstr *MI = MBBI;
334 unsigned Base = MI->getOperand(0).getReg();
335 unsigned Bytes = getLSMultipleTransferSize(MI);
336 unsigned PredReg = 0;
337 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
338 int Opcode = MI->getOpcode();
339 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
342 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
345 // Can't use the updating AM4 sub-mode if the base register is also a dest
346 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
347 for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
348 if (MI->getOperand(i).getReg() == Base)
352 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
353 if (MBBI != MBB.begin()) {
354 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
355 if (Mode == ARM_AM::ia &&
356 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
357 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
360 } else if (Mode == ARM_AM::ib &&
361 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
362 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
368 if (MBBI != MBB.end()) {
369 MachineBasicBlock::iterator NextMBBI = next(MBBI);
370 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
371 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
372 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
379 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
380 isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
381 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
391 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
392 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
395 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
396 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
397 if (MBBI != MBB.begin()) {
398 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
399 if (Mode == ARM_AM::ia &&
400 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
401 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
407 if (MBBI != MBB.end()) {
408 MachineBasicBlock::iterator NextMBBI = next(MBBI);
409 if (Mode == ARM_AM::ia &&
410 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
411 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
425 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
427 case ARM::LDR: return ARM::LDR_PRE;
428 case ARM::STR: return ARM::STR_PRE;
429 case ARM::FLDS: return ARM::FLDMS;
430 case ARM::FLDD: return ARM::FLDMD;
431 case ARM::FSTS: return ARM::FSTMS;
432 case ARM::FSTD: return ARM::FSTMD;
438 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
440 case ARM::LDR: return ARM::LDR_POST;
441 case ARM::STR: return ARM::STR_POST;
442 case ARM::FLDS: return ARM::FLDMS;
443 case ARM::FLDD: return ARM::FLDMD;
444 case ARM::FSTS: return ARM::FSTMS;
445 case ARM::FSTD: return ARM::FSTMD;
451 /// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
452 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
453 static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
454 MachineBasicBlock::iterator MBBI,
455 const TargetInstrInfo *TII,
457 MachineBasicBlock::iterator &I) {
458 MachineInstr *MI = MBBI;
459 unsigned Base = MI->getOperand(1).getReg();
460 bool BaseKill = MI->getOperand(1).isKill();
461 unsigned Bytes = getLSMultipleTransferSize(MI);
462 int Opcode = MI->getOpcode();
463 DebugLoc dl = MI->getDebugLoc();
464 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
465 if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
466 (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
469 bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
470 // Can't do the merge if the destination register is the same as the would-be
471 // writeback register.
472 if (isLd && MI->getOperand(0).getReg() == Base)
475 unsigned PredReg = 0;
476 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
477 bool DoMerge = false;
478 ARM_AM::AddrOpc AddSub = ARM_AM::add;
480 if (MBBI != MBB.begin()) {
481 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
482 if (isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
484 AddSub = ARM_AM::sub;
485 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
486 } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes,
489 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
495 if (!DoMerge && MBBI != MBB.end()) {
496 MachineBasicBlock::iterator NextMBBI = next(MBBI);
497 if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
499 AddSub = ARM_AM::sub;
500 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
501 } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
503 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
517 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
518 unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
519 : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
520 true, isDPR ? 2 : 1);
523 // LDR_PRE, LDR_POST;
524 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
525 .addReg(Base, RegState::Define)
526 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
529 BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
530 .addReg(Base, getKillRegState(BaseKill))
531 .addImm(Offset).addImm(Pred).addReg(PredReg)
532 .addReg(MI->getOperand(0).getReg(), RegState::Define);
534 MachineOperand &MO = MI->getOperand(0);
536 // STR_PRE, STR_POST;
537 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
538 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
539 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
542 BuildMI(MBB, MBBI, dl, TII->get(NewOpc)).addReg(Base).addImm(Offset)
543 .addImm(Pred).addReg(PredReg)
544 .addReg(MO.getReg(), getKillRegState(MO.isKill()));
551 /// isMemoryOp - Returns true if instruction is a memory operations (that this
552 /// pass is capable of operating on).
553 static bool isMemoryOp(MachineInstr *MI) {
554 int Opcode = MI->getOpcode();
559 return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
562 return MI->getOperand(1).isReg();
565 return MI->getOperand(1).isReg();
570 /// AdvanceRS - Advance register scavenger to just before the earliest memory
571 /// op that is being merged.
572 void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
573 MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
574 unsigned Position = MemOps[0].Position;
575 for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
576 if (MemOps[i].Position < Position) {
577 Position = MemOps[i].Position;
578 Loc = MemOps[i].MBBI;
582 if (Loc != MBB.begin())
583 RS->forward(prior(Loc));
586 static int getMemoryOpOffset(const MachineInstr *MI) {
587 int Opcode = MI->getOpcode();
588 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
589 unsigned NumOperands = MI->getDesc().getNumOperands();
590 unsigned OffField = MI->getOperand(NumOperands-3).getImm();
592 ? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4;
594 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
597 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
603 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
604 /// ops of the same base and incrementing offset into LDM / STM ops.
605 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
606 unsigned NumMerges = 0;
607 unsigned NumMemOps = 0;
609 unsigned CurrBase = 0;
611 unsigned CurrSize = 0;
612 ARMCC::CondCodes CurrPred = ARMCC::AL;
613 unsigned CurrPredReg = 0;
614 unsigned Position = 0;
615 SmallVector<MachineBasicBlock::iterator,4> Merges;
617 RS->enterBasicBlock(&MBB);
618 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
620 bool Advance = false;
621 bool TryMerge = false;
622 bool Clobber = false;
624 bool isMemOp = isMemoryOp(MBBI);
626 int Opcode = MBBI->getOpcode();
627 unsigned Size = getLSMultipleTransferSize(MBBI);
628 unsigned Base = MBBI->getOperand(1).getReg();
629 unsigned PredReg = 0;
630 ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
631 int Offset = getMemoryOpOffset(MBBI);
634 // r5 := ldr [r5, #4]
635 // r6 := ldr [r5, #8]
637 // The second ldr has effectively broken the chain even though it
638 // looks like the later ldr(s) use the same base register. Try to
639 // merge the ldr's so far, including this one. But don't try to
640 // combine the following ldr(s).
641 Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
642 if (CurrBase == 0 && !Clobber) {
643 // Start of a new chain.
648 CurrPredReg = PredReg;
649 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
658 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
659 // No need to match PredReg.
660 // Continue adding to the queue.
661 if (Offset > MemOps.back().Offset) {
662 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
666 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
668 if (Offset < I->Offset) {
669 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
673 } else if (Offset == I->Offset) {
674 // Collision! This can't be merged!
691 // Try to find a free register to use as a new base in case it's needed.
692 // First advance to the instruction just before the start of the chain.
693 AdvanceRS(MBB, MemOps);
694 // Find a scratch register. Make sure it's a call clobbered register or
695 // a spilled callee-saved register.
696 unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
698 Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
699 AFI->getSpilledCSRegisters());
700 // Process the load / store instructions.
701 RS->forward(prior(MBBI));
705 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
706 CurrPred, CurrPredReg, Scratch, MemOps, Merges);
708 // Try folding preceeding/trailing base inc/dec into the generated
710 for (unsigned i = 0, e = Merges.size(); i < e; ++i)
711 if (mergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
713 NumMerges += Merges.size();
715 // Try folding preceeding/trailing base inc/dec into those load/store
716 // that were not merged to form LDM/STM ops.
717 for (unsigned i = 0; i != NumMemOps; ++i)
718 if (!MemOps[i].Merged)
719 if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
722 // RS may be pointing to an instruction that's deleted.
723 RS->skipTo(prior(MBBI));
724 } else if (NumMemOps == 1) {
725 // Try folding preceeding/trailing base inc/dec into the single
727 if (mergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
729 RS->forward(prior(MBBI));
736 CurrPred = ARMCC::AL;
743 // If iterator hasn't been advanced and this is not a memory op, skip it.
744 // It can't start a new chain anyway.
745 if (!Advance && !isMemOp && MBBI != E) {
751 return NumMerges > 0;
755 struct OffsetCompare {
756 bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
757 int LOffset = getMemoryOpOffset(LHS);
758 int ROffset = getMemoryOpOffset(RHS);
759 assert(LHS == RHS || LOffset != ROffset);
760 return LOffset > ROffset;
765 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
766 /// (bx lr) into the preceeding stack restore so it directly restore the value
768 /// ldmfd sp!, {r7, lr}
771 /// ldmfd sp!, {r7, pc}
772 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
773 if (MBB.empty()) return false;
775 MachineBasicBlock::iterator MBBI = prior(MBB.end());
776 if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
777 MachineInstr *PrevMI = prior(MBBI);
778 if (PrevMI->getOpcode() == ARM::LDM) {
779 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
780 if (MO.getReg() == ARM::LR) {
781 PrevMI->setDesc(TII->get(ARM::LDM_RET));
791 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
792 const TargetMachine &TM = Fn.getTarget();
793 AFI = Fn.getInfo<ARMFunctionInfo>();
794 TII = TM.getInstrInfo();
795 TRI = TM.getRegisterInfo();
796 RS = new RegScavenger();
798 bool Modified = false;
799 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
801 MachineBasicBlock &MBB = *MFI;
802 Modified |= LoadStoreMultipleOpti(MBB);
803 Modified |= MergeReturnIntoLDM(MBB);
811 /// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
812 /// load / stores from consecutive locations close to make it more
813 /// likely they will be combined later.
816 struct VISIBILITY_HIDDEN ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
818 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {}
820 const TargetInstrInfo *TII;
821 const TargetRegisterInfo *TRI;
822 MachineRegisterInfo *MRI;
824 virtual bool runOnMachineFunction(MachineFunction &Fn);
826 virtual const char *getPassName() const {
827 return "ARM pre- register allocation load / store optimization pass";
831 bool RescheduleOps(MachineBasicBlock *MBB,
832 SmallVector<MachineInstr*, 4> &Ops,
833 unsigned Base, bool isLd,
834 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
835 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
837 char ARMPreAllocLoadStoreOpt::ID = 0;
840 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
841 TII = Fn.getTarget().getInstrInfo();
842 TRI = Fn.getTarget().getRegisterInfo();
843 MRI = &Fn.getRegInfo();
845 bool Modified = false;
846 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
848 Modified |= RescheduleLoadStoreInstrs(MFI);
853 static bool IsSafeToMove(bool isLd, unsigned Base,
854 MachineBasicBlock::iterator I,
855 MachineBasicBlock::iterator E,
856 SmallPtrSet<MachineInstr*, 4> MoveOps,
857 const TargetRegisterInfo *TRI) {
858 // Are there stores / loads / calls between them?
859 // FIXME: This is overly conservative. We should make use of alias information
862 const TargetInstrDesc &TID = I->getDesc();
863 if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
865 if (isLd && TID.mayStore())
870 // It's not safe to move the first 'str' down.
874 if (TID.mayStore() && !MoveOps.count(&*I))
877 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
878 MachineOperand &MO = I->getOperand(j);
879 if (MO.isReg() && MO.isDef() && TRI->regsOverlap(MO.getReg(), Base))
886 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
887 SmallVector<MachineInstr*, 4> &Ops,
888 unsigned Base, bool isLd,
889 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
892 // Sort by offset (in reverse order).
893 std::sort(Ops.begin(), Ops.end(), OffsetCompare());
895 // The loads / stores of the same base are in order. Scan them from first to
896 // last and check for the followins:
897 // 1. Any def of base.
899 while (Ops.size() > 1) {
900 unsigned FirstLoc = ~0U;
901 unsigned LastLoc = 0;
902 MachineInstr *FirstOp = 0;
903 MachineInstr *LastOp = 0;
905 unsigned LastBytes = 0;
906 unsigned NumMove = 0;
907 for (int i = Ops.size() - 1; i >= 0; --i) {
908 MachineInstr *Op = Ops[i];
909 unsigned Loc = MI2LocMap[Op];
910 if (Loc <= FirstLoc) {
914 if (Loc >= LastLoc) {
919 int Offset = getMemoryOpOffset(Op);
920 unsigned Bytes = getLSMultipleTransferSize(Op);
922 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
934 SmallPtrSet<MachineInstr*, 4> MoveOps;
935 for (int i = NumMove-1; i >= 0; --i)
936 MoveOps.insert(Ops[i]);
938 // Be conservative, if the instructions are too far apart, don't
939 // move them. We want to limit the increase of register pressure.
940 bool DoMove = (LastLoc - FirstLoc) < NumMove*4;
942 DoMove = IsSafeToMove(isLd, Base, FirstOp, LastOp, MoveOps, TRI);
944 for (unsigned i = 0; i != NumMove; ++i)
947 // This is the new location for the loads / stores.
948 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
949 while (InsertPos != MBB->end() && MoveOps.count(InsertPos))
951 for (unsigned i = 0; i != NumMove; ++i) {
952 MachineInstr *Op = Ops.back();
954 MBB->splice(InsertPos, MBB, Op);
957 NumLdStMoved += NumMove;
967 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
970 DenseMap<MachineInstr*, unsigned> MI2LocMap;
971 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
972 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
973 SmallVector<unsigned, 4> LdBases;
974 SmallVector<unsigned, 4> StBases;
977 MachineBasicBlock::iterator MBBI = MBB->begin();
978 MachineBasicBlock::iterator E = MBB->end();
980 for (; MBBI != E; ++MBBI) {
981 MachineInstr *MI = MBBI;
982 const TargetInstrDesc &TID = MI->getDesc();
983 if (TID.isCall() || TID.isTerminator()) {
989 MI2LocMap[MI] = Loc++;
992 unsigned PredReg = 0;
993 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
996 int Opcode = MI->getOpcode();
997 bool isLd = Opcode == ARM::LDR ||
998 Opcode == ARM::FLDS || Opcode == ARM::FLDD;
999 unsigned Base = MI->getOperand(1).getReg();
1000 int Offset = getMemoryOpOffset(MI);
1002 bool StopHere = false;
1004 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1005 Base2LdsMap.find(Base);
1006 if (BI != Base2LdsMap.end()) {
1007 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1008 if (Offset == getMemoryOpOffset(BI->second[i])) {
1014 BI->second.push_back(MI);
1016 SmallVector<MachineInstr*, 4> MIs;
1018 Base2LdsMap[Base] = MIs;
1019 LdBases.push_back(Base);
1022 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1023 Base2StsMap.find(Base);
1024 if (BI != Base2StsMap.end()) {
1025 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1026 if (Offset == getMemoryOpOffset(BI->second[i])) {
1032 BI->second.push_back(MI);
1034 SmallVector<MachineInstr*, 4> MIs;
1036 Base2StsMap[Base] = MIs;
1037 StBases.push_back(Base);
1042 // Found a duplicate (a base+offset combination that's seen earlier). Backtrack.
1048 // Re-schedule loads.
1049 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
1050 unsigned Base = LdBases[i];
1051 SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
1053 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
1056 // Re-schedule stores.
1057 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
1058 unsigned Base = StBases[i];
1059 SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
1061 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
1065 Base2LdsMap.clear();
1066 Base2StsMap.clear();
1076 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
1077 /// optimization pass.
1078 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
1080 return new ARMPreAllocLoadStoreOpt();
1081 return new ARMLoadStoreOpt();