1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/ADT/Statistic.h"
40 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
41 STATISTIC(NumSTMGened , "Number of stm instructions generated");
42 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
43 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
44 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
45 STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
46 STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
47 STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
48 STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
49 STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
50 STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
52 /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
53 /// load / store instructions to form ldm / stm instructions.
56 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
58 ARMLoadStoreOpt() : MachineFunctionPass(&ID) {}
60 const TargetInstrInfo *TII;
61 const TargetRegisterInfo *TRI;
65 virtual bool runOnMachineFunction(MachineFunction &Fn);
67 virtual const char *getPassName() const {
68 return "ARM load / store optimization pass";
72 struct MemOpQueueEntry {
75 MachineBasicBlock::iterator MBBI;
77 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
78 : Offset(o), Position(p), MBBI(i), Merged(false) {};
80 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
81 typedef MemOpQueue::iterator MemOpQueueIter;
83 bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
84 int Offset, unsigned Base, bool BaseKill, int Opcode,
85 ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
86 DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
87 void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
88 int Opcode, unsigned Size,
89 ARMCC::CondCodes Pred, unsigned PredReg,
90 unsigned Scratch, MemOpQueue &MemOps,
91 SmallVector<MachineBasicBlock::iterator, 4> &Merges);
93 void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
94 bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
95 MachineBasicBlock::iterator &MBBI);
96 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
97 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
99 char ARMLoadStoreOpt::ID = 0;
102 static int getLoadStoreMultipleOpcode(int Opcode) {
127 /// MergeOps - Create and insert a LDM or STM with Base as base register and
128 /// registers in Regs as the register operands that would be loaded / stored.
129 /// It returns true if the transformation is done.
131 ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
132 MachineBasicBlock::iterator MBBI,
133 int Offset, unsigned Base, bool BaseKill,
134 int Opcode, ARMCC::CondCodes Pred,
135 unsigned PredReg, unsigned Scratch, DebugLoc dl,
136 SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
137 // Only a single register to load / store. Don't bother.
138 unsigned NumRegs = Regs.size();
142 ARM_AM::AMSubMode Mode = ARM_AM::ia;
143 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
144 if (isAM4 && Offset == 4)
146 else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
148 else if (isAM4 && Offset == -4 * (int)NumRegs)
150 else if (Offset != 0) {
151 // If starting offset isn't zero, insert a MI to materialize a new base.
152 // But only do so if it is cost effective, i.e. merging more than two
158 if (Opcode == ARM::LDR)
159 // If it is a load, then just use one of the destination register to
160 // use as the new base.
161 NewBase = Regs[NumRegs-1].first;
163 // Use the scratch register to use as a new base.
168 int BaseOpc = ARM::ADDri;
170 BaseOpc = ARM::SUBri;
173 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
174 if (ImmedOffset == -1)
175 return false; // Probably not worth it then.
177 BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
178 .addReg(Base, getKillRegState(BaseKill)).addImm(ImmedOffset)
179 .addImm(Pred).addReg(PredReg).addReg(0);
181 BaseKill = true; // New base is always killed right its use.
184 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
185 bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
186 Opcode = getLoadStoreMultipleOpcode(Opcode);
187 MachineInstrBuilder MIB = (isAM4)
188 ? BuildMI(MBB, MBBI, dl, TII->get(Opcode))
189 .addReg(Base, getKillRegState(BaseKill))
190 .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg)
191 : BuildMI(MBB, MBBI, dl, TII->get(Opcode))
192 .addReg(Base, getKillRegState(BaseKill))
193 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
194 .addImm(Pred).addReg(PredReg);
195 for (unsigned i = 0; i != NumRegs; ++i)
196 MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
197 | getKillRegState(Regs[i].second));
202 /// MergeLDR_STR - Merge a number of load / store instructions into one or more
203 /// load / store multiple instructions.
205 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
206 unsigned Base, int Opcode, unsigned Size,
207 ARMCC::CondCodes Pred, unsigned PredReg,
208 unsigned Scratch, MemOpQueue &MemOps,
209 SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
210 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
211 int Offset = MemOps[SIndex].Offset;
212 int SOffset = Offset;
213 unsigned Pos = MemOps[SIndex].Position;
214 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
215 DebugLoc dl = Loc->getDebugLoc();
216 unsigned PReg = Loc->getOperand(0).getReg();
217 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
218 bool isKill = Loc->getOperand(0).isKill();
220 SmallVector<std::pair<unsigned,bool>, 8> Regs;
221 Regs.push_back(std::make_pair(PReg, isKill));
222 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
223 int NewOffset = MemOps[i].Offset;
224 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
225 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
226 isKill = MemOps[i].MBBI->getOperand(0).isKill();
227 // AM4 - register numbers in ascending order.
228 // AM5 - consecutive register numbers in ascending order.
229 if (NewOffset == Offset + (int)Size &&
230 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
232 Regs.push_back(std::make_pair(Reg, isKill));
235 // Can't merge this in. Try merge the earlier ones first.
236 if (MergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, PredReg,
237 Scratch, dl, Regs)) {
238 Merges.push_back(prior(Loc));
239 for (unsigned j = SIndex; j < i; ++j) {
240 MBB.erase(MemOps[j].MBBI);
241 MemOps[j].Merged = true;
244 MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
249 if (MemOps[i].Position > Pos) {
250 Pos = MemOps[i].Position;
251 Loc = MemOps[i].MBBI;
255 bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
256 if (MergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, PredReg,
257 Scratch, dl, Regs)) {
258 Merges.push_back(prior(Loc));
259 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
260 MBB.erase(MemOps[i].MBBI);
261 MemOps[i].Merged = true;
268 /// getInstrPredicate - If instruction is predicated, returns its predicate
269 /// condition, otherwise returns AL. It also returns the condition code
270 /// register by reference.
271 static ARMCC::CondCodes getInstrPredicate(MachineInstr *MI, unsigned &PredReg) {
272 int PIdx = MI->findFirstPredOperandIdx();
278 PredReg = MI->getOperand(PIdx+1).getReg();
279 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
282 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
283 unsigned Bytes, ARMCC::CondCodes Pred,
285 unsigned MyPredReg = 0;
286 return (MI && MI->getOpcode() == ARM::SUBri &&
287 MI->getOperand(0).getReg() == Base &&
288 MI->getOperand(1).getReg() == Base &&
289 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
290 getInstrPredicate(MI, MyPredReg) == Pred &&
291 MyPredReg == PredReg);
294 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
295 unsigned Bytes, ARMCC::CondCodes Pred,
297 unsigned MyPredReg = 0;
298 return (MI && MI->getOpcode() == ARM::ADDri &&
299 MI->getOperand(0).getReg() == Base &&
300 MI->getOperand(1).getReg() == Base &&
301 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
302 getInstrPredicate(MI, MyPredReg) == Pred &&
303 MyPredReg == PredReg);
306 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
307 switch (MI->getOpcode()) {
319 return (MI->getNumOperands() - 4) * 4;
324 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
328 /// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
329 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
331 /// stmia rn, <ra, rb, rc>
332 /// rn := rn + 4 * 3;
334 /// stmia rn!, <ra, rb, rc>
336 /// rn := rn - 4 * 3;
337 /// ldmia rn, <ra, rb, rc>
339 /// ldmdb rn!, <ra, rb, rc>
340 static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
341 MachineBasicBlock::iterator MBBI,
343 MachineBasicBlock::iterator &I) {
344 MachineInstr *MI = MBBI;
345 unsigned Base = MI->getOperand(0).getReg();
346 unsigned Bytes = getLSMultipleTransferSize(MI);
347 unsigned PredReg = 0;
348 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
349 int Opcode = MI->getOpcode();
350 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
353 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
356 // Can't use the updating AM4 sub-mode if the base register is also a dest
357 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
358 for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
359 if (MI->getOperand(i).getReg() == Base)
363 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
364 if (MBBI != MBB.begin()) {
365 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
366 if (Mode == ARM_AM::ia &&
367 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
368 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
371 } else if (Mode == ARM_AM::ib &&
372 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
373 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
379 if (MBBI != MBB.end()) {
380 MachineBasicBlock::iterator NextMBBI = next(MBBI);
381 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
382 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
383 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
390 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
391 isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
392 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
402 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
403 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
406 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
407 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
408 if (MBBI != MBB.begin()) {
409 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
410 if (Mode == ARM_AM::ia &&
411 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
412 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
418 if (MBBI != MBB.end()) {
419 MachineBasicBlock::iterator NextMBBI = next(MBBI);
420 if (Mode == ARM_AM::ia &&
421 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
422 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
436 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
438 case ARM::LDR: return ARM::LDR_PRE;
439 case ARM::STR: return ARM::STR_PRE;
440 case ARM::FLDS: return ARM::FLDMS;
441 case ARM::FLDD: return ARM::FLDMD;
442 case ARM::FSTS: return ARM::FSTMS;
443 case ARM::FSTD: return ARM::FSTMD;
449 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
451 case ARM::LDR: return ARM::LDR_POST;
452 case ARM::STR: return ARM::STR_POST;
453 case ARM::FLDS: return ARM::FLDMS;
454 case ARM::FLDD: return ARM::FLDMD;
455 case ARM::FSTS: return ARM::FSTMS;
456 case ARM::FSTD: return ARM::FSTMD;
462 /// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
463 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
464 static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
465 MachineBasicBlock::iterator MBBI,
466 const TargetInstrInfo *TII,
468 MachineBasicBlock::iterator &I) {
469 MachineInstr *MI = MBBI;
470 unsigned Base = MI->getOperand(1).getReg();
471 bool BaseKill = MI->getOperand(1).isKill();
472 unsigned Bytes = getLSMultipleTransferSize(MI);
473 int Opcode = MI->getOpcode();
474 DebugLoc dl = MI->getDebugLoc();
475 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
476 if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
477 (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
480 bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
481 // Can't do the merge if the destination register is the same as the would-be
482 // writeback register.
483 if (isLd && MI->getOperand(0).getReg() == Base)
486 unsigned PredReg = 0;
487 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
488 bool DoMerge = false;
489 ARM_AM::AddrOpc AddSub = ARM_AM::add;
491 if (MBBI != MBB.begin()) {
492 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
493 if (isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
495 AddSub = ARM_AM::sub;
496 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
497 } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes,
500 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
506 if (!DoMerge && MBBI != MBB.end()) {
507 MachineBasicBlock::iterator NextMBBI = next(MBBI);
508 if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
510 AddSub = ARM_AM::sub;
511 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
512 } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
514 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
528 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
529 unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
530 : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
531 true, isDPR ? 2 : 1);
534 // LDR_PRE, LDR_POST;
535 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
536 .addReg(Base, RegState::Define)
537 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
540 BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
541 .addReg(Base, getKillRegState(BaseKill))
542 .addImm(Offset).addImm(Pred).addReg(PredReg)
543 .addReg(MI->getOperand(0).getReg(), RegState::Define);
545 MachineOperand &MO = MI->getOperand(0);
547 // STR_PRE, STR_POST;
548 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
549 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
550 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
553 BuildMI(MBB, MBBI, dl, TII->get(NewOpc)).addReg(Base).addImm(Offset)
554 .addImm(Pred).addReg(PredReg)
555 .addReg(MO.getReg(), getKillRegState(MO.isKill()));
562 /// isMemoryOp - Returns true if instruction is a memory operations (that this
563 /// pass is capable of operating on).
564 static bool isMemoryOp(MachineInstr *MI) {
565 int Opcode = MI->getOpcode();
570 return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
573 return MI->getOperand(1).isReg();
576 return MI->getOperand(1).isReg();
581 /// AdvanceRS - Advance register scavenger to just before the earliest memory
582 /// op that is being merged.
583 void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
584 MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
585 unsigned Position = MemOps[0].Position;
586 for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
587 if (MemOps[i].Position < Position) {
588 Position = MemOps[i].Position;
589 Loc = MemOps[i].MBBI;
593 if (Loc != MBB.begin())
594 RS->forward(prior(Loc));
597 static int getMemoryOpOffset(const MachineInstr *MI) {
598 int Opcode = MI->getOpcode();
599 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
600 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
601 unsigned NumOperands = MI->getDesc().getNumOperands();
602 unsigned OffField = MI->getOperand(NumOperands-3).getImm();
604 ? ARM_AM::getAM2Offset(OffField)
605 : (isAM3 ? ARM_AM::getAM3Offset(OffField)
606 : ARM_AM::getAM5Offset(OffField) * 4);
608 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
611 if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
614 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
620 static void InsertLDR_STR(MachineBasicBlock &MBB,
621 MachineBasicBlock::iterator &MBBI,
622 int OffImm, bool isDef,
623 DebugLoc dl, unsigned NewOpc,
624 unsigned Reg, bool RegDeadKill,
625 unsigned BaseReg, bool BaseKill,
626 unsigned OffReg, bool OffKill,
627 ARMCC::CondCodes Pred, unsigned PredReg,
628 const TargetInstrInfo *TII) {
631 Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift);
633 Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift);
635 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
636 .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
637 .addReg(BaseReg, getKillRegState(BaseKill))
638 .addReg(OffReg, getKillRegState(OffKill))
640 .addImm(Pred).addReg(PredReg);
642 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
643 .addReg(Reg, getKillRegState(RegDeadKill))
644 .addReg(BaseReg, getKillRegState(BaseKill))
645 .addReg(OffReg, getKillRegState(OffKill))
647 .addImm(Pred).addReg(PredReg);
650 bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
651 MachineBasicBlock::iterator &MBBI) {
652 MachineInstr *MI = &*MBBI;
653 unsigned Opcode = MI->getOpcode();
654 if (Opcode == ARM::LDRD || Opcode == ARM::STRD) {
655 unsigned EvenReg = MI->getOperand(0).getReg();
656 unsigned OddReg = MI->getOperand(1).getReg();
657 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
658 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
659 if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
662 bool isLd = Opcode == ARM::LDRD;
663 bool EvenDeadKill = isLd ?
664 MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
665 bool OddDeadKill = isLd ?
666 MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
667 const MachineOperand &BaseOp = MI->getOperand(2);
668 unsigned BaseReg = BaseOp.getReg();
669 bool BaseKill = BaseOp.isKill();
670 const MachineOperand &OffOp = MI->getOperand(3);
671 unsigned OffReg = OffOp.getReg();
672 bool OffKill = OffOp.isKill();
673 int OffImm = getMemoryOpOffset(MI);
674 unsigned PredReg = 0;
675 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
677 if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) {
678 // Ascending register numbers and no offset. It's safe to change it to a
680 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDM : ARM::STM;
682 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
683 .addReg(BaseReg, getKillRegState(BaseKill))
684 .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
685 .addImm(Pred).addReg(PredReg)
686 .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
687 .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
690 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
691 .addReg(BaseReg, getKillRegState(BaseKill))
692 .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
693 .addImm(Pred).addReg(PredReg)
694 .addReg(EvenReg, getKillRegState(EvenDeadKill))
695 .addReg(OddReg, getKillRegState(OddDeadKill));
699 // Split into two instructions.
700 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDR : ARM::STR;
701 DebugLoc dl = MBBI->getDebugLoc();
702 // If this is a load and base register is killed, it may have been
703 // re-defed by the load, make sure the first load does not clobber it.
705 (BaseKill || OffKill) &&
706 (TRI->regsOverlap(EvenReg, BaseReg) ||
707 (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) {
708 assert(!TRI->regsOverlap(OddReg, BaseReg) &&
709 (!OffReg || !TRI->regsOverlap(OddReg, OffReg)));
710 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc, OddReg, OddDeadKill,
711 BaseReg, false, OffReg, false, Pred, PredReg, TII);
712 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc, EvenReg, EvenDeadKill,
713 BaseReg, BaseKill, OffReg, OffKill, Pred, PredReg, TII);
715 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
716 EvenReg, EvenDeadKill, BaseReg, false, OffReg, false,
718 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
719 OddReg, OddDeadKill, BaseReg, BaseKill, OffReg, OffKill,
734 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
735 /// ops of the same base and incrementing offset into LDM / STM ops.
736 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
737 unsigned NumMerges = 0;
738 unsigned NumMemOps = 0;
740 unsigned CurrBase = 0;
742 unsigned CurrSize = 0;
743 ARMCC::CondCodes CurrPred = ARMCC::AL;
744 unsigned CurrPredReg = 0;
745 unsigned Position = 0;
746 SmallVector<MachineBasicBlock::iterator,4> Merges;
748 RS->enterBasicBlock(&MBB);
749 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
751 if (FixInvalidRegPairOp(MBB, MBBI))
754 bool Advance = false;
755 bool TryMerge = false;
756 bool Clobber = false;
758 bool isMemOp = isMemoryOp(MBBI);
760 int Opcode = MBBI->getOpcode();
761 unsigned Size = getLSMultipleTransferSize(MBBI);
762 unsigned Base = MBBI->getOperand(1).getReg();
763 unsigned PredReg = 0;
764 ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
765 int Offset = getMemoryOpOffset(MBBI);
768 // r5 := ldr [r5, #4]
769 // r6 := ldr [r5, #8]
771 // The second ldr has effectively broken the chain even though it
772 // looks like the later ldr(s) use the same base register. Try to
773 // merge the ldr's so far, including this one. But don't try to
774 // combine the following ldr(s).
775 Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
776 if (CurrBase == 0 && !Clobber) {
777 // Start of a new chain.
782 CurrPredReg = PredReg;
783 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
792 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
793 // No need to match PredReg.
794 // Continue adding to the queue.
795 if (Offset > MemOps.back().Offset) {
796 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
800 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
802 if (Offset < I->Offset) {
803 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
807 } else if (Offset == I->Offset) {
808 // Collision! This can't be merged!
825 // Try to find a free register to use as a new base in case it's needed.
826 // First advance to the instruction just before the start of the chain.
827 AdvanceRS(MBB, MemOps);
828 // Find a scratch register. Make sure it's a call clobbered register or
829 // a spilled callee-saved register.
830 unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
832 Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
833 AFI->getSpilledCSRegisters());
834 // Process the load / store instructions.
835 RS->forward(prior(MBBI));
839 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
840 CurrPred, CurrPredReg, Scratch, MemOps, Merges);
842 // Try folding preceeding/trailing base inc/dec into the generated
844 for (unsigned i = 0, e = Merges.size(); i < e; ++i)
845 if (mergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
847 NumMerges += Merges.size();
849 // Try folding preceeding/trailing base inc/dec into those load/store
850 // that were not merged to form LDM/STM ops.
851 for (unsigned i = 0; i != NumMemOps; ++i)
852 if (!MemOps[i].Merged)
853 if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
856 // RS may be pointing to an instruction that's deleted.
857 RS->skipTo(prior(MBBI));
858 } else if (NumMemOps == 1) {
859 // Try folding preceeding/trailing base inc/dec into the single
861 if (mergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
863 RS->forward(prior(MBBI));
870 CurrPred = ARMCC::AL;
877 // If iterator hasn't been advanced and this is not a memory op, skip it.
878 // It can't start a new chain anyway.
879 if (!Advance && !isMemOp && MBBI != E) {
885 return NumMerges > 0;
889 struct OffsetCompare {
890 bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
891 int LOffset = getMemoryOpOffset(LHS);
892 int ROffset = getMemoryOpOffset(RHS);
893 assert(LHS == RHS || LOffset != ROffset);
894 return LOffset > ROffset;
899 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
900 /// (bx lr) into the preceeding stack restore so it directly restore the value
902 /// ldmfd sp!, {r7, lr}
905 /// ldmfd sp!, {r7, pc}
906 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
907 if (MBB.empty()) return false;
909 MachineBasicBlock::iterator MBBI = prior(MBB.end());
910 if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
911 MachineInstr *PrevMI = prior(MBBI);
912 if (PrevMI->getOpcode() == ARM::LDM) {
913 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
914 if (MO.getReg() == ARM::LR) {
915 PrevMI->setDesc(TII->get(ARM::LDM_RET));
925 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
926 const TargetMachine &TM = Fn.getTarget();
927 AFI = Fn.getInfo<ARMFunctionInfo>();
928 TII = TM.getInstrInfo();
929 TRI = TM.getRegisterInfo();
930 RS = new RegScavenger();
932 bool Modified = false;
933 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
935 MachineBasicBlock &MBB = *MFI;
936 Modified |= LoadStoreMultipleOpti(MBB);
937 Modified |= MergeReturnIntoLDM(MBB);
945 /// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
946 /// load / stores from consecutive locations close to make it more
947 /// likely they will be combined later.
950 struct VISIBILITY_HIDDEN ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
952 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {}
954 const TargetData *TD;
955 const TargetInstrInfo *TII;
956 const TargetRegisterInfo *TRI;
957 const ARMSubtarget *STI;
958 MachineRegisterInfo *MRI;
960 virtual bool runOnMachineFunction(MachineFunction &Fn);
962 virtual const char *getPassName() const {
963 return "ARM pre- register allocation load / store optimization pass";
967 bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
968 unsigned &NewOpc, unsigned &EvenReg,
969 unsigned &OddReg, unsigned &BaseReg,
970 unsigned &OffReg, unsigned &Offset,
971 unsigned &PredReg, ARMCC::CondCodes &Pred);
972 bool RescheduleOps(MachineBasicBlock *MBB,
973 SmallVector<MachineInstr*, 4> &Ops,
974 unsigned Base, bool isLd,
975 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
976 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
978 char ARMPreAllocLoadStoreOpt::ID = 0;
981 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
982 TD = Fn.getTarget().getTargetData();
983 TII = Fn.getTarget().getInstrInfo();
984 TRI = Fn.getTarget().getRegisterInfo();
985 STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
986 MRI = &Fn.getRegInfo();
988 bool Modified = false;
989 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
991 Modified |= RescheduleLoadStoreInstrs(MFI);
996 static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
997 MachineBasicBlock::iterator I,
998 MachineBasicBlock::iterator E,
999 SmallPtrSet<MachineInstr*, 4> &MemOps,
1000 SmallSet<unsigned, 4> &MemRegs,
1001 const TargetRegisterInfo *TRI) {
1002 // Are there stores / loads / calls between them?
1003 // FIXME: This is overly conservative. We should make use of alias information
1005 SmallSet<unsigned, 4> AddedRegPressure;
1007 if (MemOps.count(&*I))
1009 const TargetInstrDesc &TID = I->getDesc();
1010 if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
1012 if (isLd && TID.mayStore())
1017 // It's not safe to move the first 'str' down.
1020 // str r4, [r0, #+4]
1024 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
1025 MachineOperand &MO = I->getOperand(j);
1028 unsigned Reg = MO.getReg();
1029 if (MO.isDef() && TRI->regsOverlap(Reg, Base))
1031 if (Reg != Base && !MemRegs.count(Reg))
1032 AddedRegPressure.insert(Reg);
1036 // Estimate register pressure increase due to the transformation.
1037 if (MemRegs.size() <= 4)
1038 // Ok if we are moving small number of instructions.
1040 return AddedRegPressure.size() <= MemRegs.size() * 2;
1044 ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
1046 unsigned &NewOpc, unsigned &EvenReg,
1047 unsigned &OddReg, unsigned &BaseReg,
1048 unsigned &OffReg, unsigned &Offset,
1050 ARMCC::CondCodes &Pred) {
1051 // FIXME: FLDS / FSTS -> FLDD / FSTD
1052 unsigned Opcode = Op0->getOpcode();
1053 if (Opcode == ARM::LDR)
1055 else if (Opcode == ARM::STR)
1060 // Must sure the base address satisfies i64 ld / st alignment requirement.
1061 if (!Op0->hasOneMemOperand() ||
1062 !Op0->memoperands_begin()->getValue() ||
1063 Op0->memoperands_begin()->isVolatile())
1066 unsigned Align = Op0->memoperands_begin()->getAlignment();
1067 unsigned ReqAlign = STI->hasV6Ops()
1068 ? TD->getPrefTypeAlignment(Type::Int64Ty) : 8; // Pre-v6 need 8-byte align
1069 if (Align < ReqAlign)
1072 // Then make sure the immediate offset fits.
1073 int OffImm = getMemoryOpOffset(Op0);
1074 ARM_AM::AddrOpc AddSub = ARM_AM::add;
1076 AddSub = ARM_AM::sub;
1079 if (OffImm >= 256) // 8 bits
1081 Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
1083 EvenReg = Op0->getOperand(0).getReg();
1084 OddReg = Op1->getOperand(0).getReg();
1085 if (EvenReg == OddReg)
1087 BaseReg = Op0->getOperand(1).getReg();
1088 OffReg = Op0->getOperand(2).getReg();
1089 Pred = getInstrPredicate(Op0, PredReg);
1090 dl = Op0->getDebugLoc();
1094 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
1095 SmallVector<MachineInstr*, 4> &Ops,
1096 unsigned Base, bool isLd,
1097 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
1098 bool RetVal = false;
1100 // Sort by offset (in reverse order).
1101 std::sort(Ops.begin(), Ops.end(), OffsetCompare());
1103 // The loads / stores of the same base are in order. Scan them from first to
1104 // last and check for the followins:
1105 // 1. Any def of base.
1107 while (Ops.size() > 1) {
1108 unsigned FirstLoc = ~0U;
1109 unsigned LastLoc = 0;
1110 MachineInstr *FirstOp = 0;
1111 MachineInstr *LastOp = 0;
1113 unsigned LastOpcode = 0;
1114 unsigned LastBytes = 0;
1115 unsigned NumMove = 0;
1116 for (int i = Ops.size() - 1; i >= 0; --i) {
1117 MachineInstr *Op = Ops[i];
1118 unsigned Loc = MI2LocMap[Op];
1119 if (Loc <= FirstLoc) {
1123 if (Loc >= LastLoc) {
1128 unsigned Opcode = Op->getOpcode();
1129 if (LastOpcode && Opcode != LastOpcode)
1132 int Offset = getMemoryOpOffset(Op);
1133 unsigned Bytes = getLSMultipleTransferSize(Op);
1135 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
1138 LastOffset = Offset;
1140 LastOpcode = Opcode;
1141 if (++NumMove == 8) // FIXME: Tune
1148 SmallPtrSet<MachineInstr*, 4> MemOps;
1149 SmallSet<unsigned, 4> MemRegs;
1150 for (int i = NumMove-1; i >= 0; --i) {
1151 MemOps.insert(Ops[i]);
1152 MemRegs.insert(Ops[i]->getOperand(0).getReg());
1155 // Be conservative, if the instructions are too far apart, don't
1156 // move them. We want to limit the increase of register pressure.
1157 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
1159 DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
1160 MemOps, MemRegs, TRI);
1162 for (unsigned i = 0; i != NumMove; ++i)
1165 // This is the new location for the loads / stores.
1166 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
1167 while (InsertPos != MBB->end() && MemOps.count(InsertPos))
1170 // If we are moving a pair of loads / stores, see if it makes sense
1171 // to try to allocate a pair of registers that can form register pairs.
1172 MachineInstr *Op0 = Ops.back();
1173 MachineInstr *Op1 = Ops[Ops.size()-2];
1174 unsigned EvenReg = 0, OddReg = 0;
1175 unsigned BaseReg = 0, OffReg = 0, PredReg = 0;
1176 ARMCC::CondCodes Pred = ARMCC::AL;
1177 unsigned NewOpc = 0;
1178 unsigned Offset = 0;
1180 if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
1181 EvenReg, OddReg, BaseReg, OffReg,
1182 Offset, PredReg, Pred)) {
1186 // Form the pair instruction.
1188 BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc))
1189 .addReg(EvenReg, RegState::Define)
1190 .addReg(OddReg, RegState::Define)
1191 .addReg(BaseReg).addReg(0).addImm(Offset)
1192 .addImm(Pred).addReg(PredReg);
1195 BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc))
1198 .addReg(BaseReg).addReg(0).addImm(Offset)
1199 .addImm(Pred).addReg(PredReg);
1205 // Add register allocation hints to form register pairs.
1206 MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg);
1207 MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg);
1209 for (unsigned i = 0; i != NumMove; ++i) {
1210 MachineInstr *Op = Ops.back();
1212 MBB->splice(InsertPos, MBB, Op);
1216 NumLdStMoved += NumMove;
1226 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
1227 bool RetVal = false;
1229 DenseMap<MachineInstr*, unsigned> MI2LocMap;
1230 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
1231 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
1232 SmallVector<unsigned, 4> LdBases;
1233 SmallVector<unsigned, 4> StBases;
1236 MachineBasicBlock::iterator MBBI = MBB->begin();
1237 MachineBasicBlock::iterator E = MBB->end();
1239 for (; MBBI != E; ++MBBI) {
1240 MachineInstr *MI = MBBI;
1241 const TargetInstrDesc &TID = MI->getDesc();
1242 if (TID.isCall() || TID.isTerminator()) {
1243 // Stop at barriers.
1248 MI2LocMap[MI] = Loc++;
1249 if (!isMemoryOp(MI))
1251 unsigned PredReg = 0;
1252 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
1255 int Opcode = MI->getOpcode();
1256 bool isLd = Opcode == ARM::LDR ||
1257 Opcode == ARM::FLDS || Opcode == ARM::FLDD;
1258 unsigned Base = MI->getOperand(1).getReg();
1259 int Offset = getMemoryOpOffset(MI);
1261 bool StopHere = false;
1263 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1264 Base2LdsMap.find(Base);
1265 if (BI != Base2LdsMap.end()) {
1266 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1267 if (Offset == getMemoryOpOffset(BI->second[i])) {
1273 BI->second.push_back(MI);
1275 SmallVector<MachineInstr*, 4> MIs;
1277 Base2LdsMap[Base] = MIs;
1278 LdBases.push_back(Base);
1281 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1282 Base2StsMap.find(Base);
1283 if (BI != Base2StsMap.end()) {
1284 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1285 if (Offset == getMemoryOpOffset(BI->second[i])) {
1291 BI->second.push_back(MI);
1293 SmallVector<MachineInstr*, 4> MIs;
1295 Base2StsMap[Base] = MIs;
1296 StBases.push_back(Base);
1301 // Found a duplicate (a base+offset combination that's seen earlier).
1308 // Re-schedule loads.
1309 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
1310 unsigned Base = LdBases[i];
1311 SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
1313 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
1316 // Re-schedule stores.
1317 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
1318 unsigned Base = StBases[i];
1319 SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
1321 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
1325 Base2LdsMap.clear();
1326 Base2StsMap.clear();
1336 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
1337 /// optimization pass.
1338 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
1340 return new ARMPreAllocLoadStoreOpt();
1341 return new ARMLoadStoreOpt();