1 //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
34 #define DEBUG_TYPE "aarch64-ldst-opt"
36 /// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37 /// load / store instructions to form ldp / stp instructions.
39 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40 STATISTIC(NumPostFolded, "Number of post-index updates folded");
41 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42 STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
45 static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
46 cl::init(20), cl::Hidden);
48 // Place holder while testing unscaled load/store combining
49 static cl::opt<bool> EnableAArch64UnscaledMemOp(
50 "aarch64-unscaled-mem-op", cl::Hidden,
51 cl::desc("Allow AArch64 unscaled load/store combining"), cl::init(true));
54 void initializeAArch64LoadStoreOptPass(PassRegistry &);
57 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
61 typedef struct LdStPairFlags {
62 // If a matching instruction is found, MergeForward is set to true if the
63 // merge is to remove the first instruction and replace the second with
64 // a pair-wise insn, and false if the reverse is true.
67 // SExtIdx gives the index of the result of the load pair that must be
68 // extended. The value of SExtIdx assumes that the paired load produces the
69 // value in this order: (I, returned iterator), i.e., -1 means no value has
70 // to be extended, 0 means I, and 1 means the returned iterator.
73 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
75 void setMergeForward(bool V = true) { MergeForward = V; }
76 bool getMergeForward() const { return MergeForward; }
78 void setSExtIdx(int V) { SExtIdx = V; }
79 int getSExtIdx() const { return SExtIdx; }
83 struct AArch64LoadStoreOpt : public MachineFunctionPass {
85 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
86 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
89 const AArch64InstrInfo *TII;
90 const TargetRegisterInfo *TRI;
92 // Scan the instructions looking for a load/store that can be combined
93 // with the current instruction into a load/store pair.
94 // Return the matching instruction if one is found, else MBB->end().
95 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
98 // Merge the two instructions indicated into a single pair-wise instruction.
99 // If MergeForward is true, erase the first instruction and fold its
100 // operation into the second. If false, the reverse. Return the instruction
101 // following the first instruction (which may change during processing).
102 MachineBasicBlock::iterator
103 mergePairedInsns(MachineBasicBlock::iterator I,
104 MachineBasicBlock::iterator Paired,
105 const LdStPairFlags &Flags);
107 // Scan the instruction list to find a base register update that can
108 // be combined with the current instruction (a load or store) using
109 // pre or post indexed addressing with writeback. Scan forwards.
110 MachineBasicBlock::iterator
111 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, unsigned Limit,
114 // Scan the instruction list to find a base register update that can
115 // be combined with the current instruction (a load or store) using
116 // pre or post indexed addressing with writeback. Scan backwards.
117 MachineBasicBlock::iterator
118 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
120 // Merge a pre-index base register update into a ld/st instruction.
121 MachineBasicBlock::iterator
122 mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
123 MachineBasicBlock::iterator Update);
125 // Merge a post-index base register update into a ld/st instruction.
126 MachineBasicBlock::iterator
127 mergePostIdxUpdateInsn(MachineBasicBlock::iterator I,
128 MachineBasicBlock::iterator Update);
130 bool optimizeBlock(MachineBasicBlock &MBB);
132 bool runOnMachineFunction(MachineFunction &Fn) override;
134 const char *getPassName() const override {
135 return AARCH64_LOAD_STORE_OPT_NAME;
138 char AArch64LoadStoreOpt::ID = 0;
141 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
142 AARCH64_LOAD_STORE_OPT_NAME, false, false)
144 static bool isUnscaledLdSt(unsigned Opc) {
148 case AArch64::STURSi:
149 case AArch64::STURDi:
150 case AArch64::STURQi:
151 case AArch64::STURWi:
152 case AArch64::STURXi:
153 case AArch64::LDURSi:
154 case AArch64::LDURDi:
155 case AArch64::LDURQi:
156 case AArch64::LDURWi:
157 case AArch64::LDURXi:
158 case AArch64::LDURSWi:
163 static bool isUnscaledLdSt(MachineInstr *MI) {
164 return isUnscaledLdSt(MI->getOpcode());
167 // Size in bytes of the data moved by an unscaled load or store
168 static int getMemSize(MachineInstr *MI) {
169 switch (MI->getOpcode()) {
171 llvm_unreachable("Opcode has unknown size!");
172 case AArch64::STRSui:
173 case AArch64::STURSi:
175 case AArch64::STRDui:
176 case AArch64::STURDi:
178 case AArch64::STRQui:
179 case AArch64::STURQi:
181 case AArch64::STRWui:
182 case AArch64::STURWi:
184 case AArch64::STRXui:
185 case AArch64::STURXi:
187 case AArch64::LDRSui:
188 case AArch64::LDURSi:
190 case AArch64::LDRDui:
191 case AArch64::LDURDi:
193 case AArch64::LDRQui:
194 case AArch64::LDURQi:
196 case AArch64::LDRWui:
197 case AArch64::LDURWi:
199 case AArch64::LDRXui:
200 case AArch64::LDURXi:
202 case AArch64::LDRSWui:
203 case AArch64::LDURSWi:
208 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
209 bool *IsValidLdStrOpc = nullptr) {
211 *IsValidLdStrOpc = true;
215 *IsValidLdStrOpc = false;
217 case AArch64::STRDui:
218 case AArch64::STURDi:
219 case AArch64::STRQui:
220 case AArch64::STURQi:
221 case AArch64::STRWui:
222 case AArch64::STURWi:
223 case AArch64::STRXui:
224 case AArch64::STURXi:
225 case AArch64::LDRDui:
226 case AArch64::LDURDi:
227 case AArch64::LDRQui:
228 case AArch64::LDURQi:
229 case AArch64::LDRWui:
230 case AArch64::LDURWi:
231 case AArch64::LDRXui:
232 case AArch64::LDURXi:
233 case AArch64::STRSui:
234 case AArch64::STURSi:
235 case AArch64::LDRSui:
236 case AArch64::LDURSi:
238 case AArch64::LDRSWui:
239 return AArch64::LDRWui;
240 case AArch64::LDURSWi:
241 return AArch64::LDURWi;
245 static unsigned getMatchingPairOpcode(unsigned Opc) {
248 llvm_unreachable("Opcode has no pairwise equivalent!");
249 case AArch64::STRSui:
250 case AArch64::STURSi:
251 return AArch64::STPSi;
252 case AArch64::STRDui:
253 case AArch64::STURDi:
254 return AArch64::STPDi;
255 case AArch64::STRQui:
256 case AArch64::STURQi:
257 return AArch64::STPQi;
258 case AArch64::STRWui:
259 case AArch64::STURWi:
260 return AArch64::STPWi;
261 case AArch64::STRXui:
262 case AArch64::STURXi:
263 return AArch64::STPXi;
264 case AArch64::LDRSui:
265 case AArch64::LDURSi:
266 return AArch64::LDPSi;
267 case AArch64::LDRDui:
268 case AArch64::LDURDi:
269 return AArch64::LDPDi;
270 case AArch64::LDRQui:
271 case AArch64::LDURQi:
272 return AArch64::LDPQi;
273 case AArch64::LDRWui:
274 case AArch64::LDURWi:
275 return AArch64::LDPWi;
276 case AArch64::LDRXui:
277 case AArch64::LDURXi:
278 return AArch64::LDPXi;
279 case AArch64::LDRSWui:
280 case AArch64::LDURSWi:
281 return AArch64::LDPSWi;
285 static unsigned getPreIndexedOpcode(unsigned Opc) {
288 llvm_unreachable("Opcode has no pre-indexed equivalent!");
289 case AArch64::STRSui:
290 return AArch64::STRSpre;
291 case AArch64::STRDui:
292 return AArch64::STRDpre;
293 case AArch64::STRQui:
294 return AArch64::STRQpre;
295 case AArch64::STRWui:
296 return AArch64::STRWpre;
297 case AArch64::STRXui:
298 return AArch64::STRXpre;
299 case AArch64::LDRSui:
300 return AArch64::LDRSpre;
301 case AArch64::LDRDui:
302 return AArch64::LDRDpre;
303 case AArch64::LDRQui:
304 return AArch64::LDRQpre;
305 case AArch64::LDRWui:
306 return AArch64::LDRWpre;
307 case AArch64::LDRXui:
308 return AArch64::LDRXpre;
309 case AArch64::LDRSWui:
310 return AArch64::LDRSWpre;
314 static unsigned getPostIndexedOpcode(unsigned Opc) {
317 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
318 case AArch64::STRSui:
319 return AArch64::STRSpost;
320 case AArch64::STRDui:
321 return AArch64::STRDpost;
322 case AArch64::STRQui:
323 return AArch64::STRQpost;
324 case AArch64::STRWui:
325 return AArch64::STRWpost;
326 case AArch64::STRXui:
327 return AArch64::STRXpost;
328 case AArch64::LDRSui:
329 return AArch64::LDRSpost;
330 case AArch64::LDRDui:
331 return AArch64::LDRDpost;
332 case AArch64::LDRQui:
333 return AArch64::LDRQpost;
334 case AArch64::LDRWui:
335 return AArch64::LDRWpost;
336 case AArch64::LDRXui:
337 return AArch64::LDRXpost;
338 case AArch64::LDRSWui:
339 return AArch64::LDRSWpost;
343 static const MachineOperand &getLdStRegOp(const MachineInstr *MI) {
344 return MI->getOperand(0);
347 static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
348 return MI->getOperand(1);
351 static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
352 return MI->getOperand(2);
355 MachineBasicBlock::iterator
356 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
357 MachineBasicBlock::iterator Paired,
358 const LdStPairFlags &Flags) {
359 MachineBasicBlock::iterator NextI = I;
361 // If NextI is the second of the two instructions to be merged, we need
362 // to skip one further. Either way we merge will invalidate the iterator,
363 // and we don't need to scan the new instruction, as it's a pairwise
364 // instruction, which we're not considering for further action anyway.
368 int SExtIdx = Flags.getSExtIdx();
370 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
371 bool IsUnscaled = isUnscaledLdSt(Opc);
373 IsUnscaled && EnableAArch64UnscaledMemOp ? getMemSize(I) : 1;
375 bool MergeForward = Flags.getMergeForward();
376 unsigned NewOpc = getMatchingPairOpcode(Opc);
377 // Insert our new paired instruction after whichever of the paired
378 // instructions MergeForward indicates.
379 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
380 // Also based on MergeForward is from where we copy the base register operand
381 // so we get the flags compatible with the input code.
382 const MachineOperand &BaseRegOp =
383 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
385 int Offset = getLdStOffsetOp(I).getImm();
386 int PairedOffset = getLdStOffsetOp(Paired).getImm();
387 bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode());
389 // We're trying to pair instructions that differ in how they are scaled.
390 // If I is scaled then scale the offset of Paired accordingly.
391 // Otherwise, do the opposite (i.e., make Paired's offset unscaled).
392 if (IsUnscaled != PairedIsUnscaled) {
393 int MemSize = getMemSize(Paired);
394 assert(!(PairedOffset % getMemSize(Paired)) &&
395 "Offset should be a multiple of the stride!");
397 PairedIsUnscaled ? PairedOffset / MemSize : PairedOffset * MemSize;
400 // Which register is Rt and which is Rt2 depends on the offset order.
401 MachineInstr *RtMI, *Rt2MI;
402 if (Offset == PairedOffset + OffsetStride) {
405 // Here we swapped the assumption made for SExtIdx.
406 // I.e., we turn ldp I, Paired into ldp Paired, I.
407 // Update the index accordingly.
409 SExtIdx = (SExtIdx + 1) % 2;
414 // Scale the immediate offset, if necessary.
415 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
416 if (isUnscaledLdSt(RtMI->getOpcode()) && EnableAArch64UnscaledMemOp) {
417 assert(!(OffsetImm % getMemSize(RtMI)) &&
418 "Offset should be a multiple of the stride!");
419 OffsetImm /= getMemSize(RtMI);
422 // Construct the new instruction.
423 MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
424 I->getDebugLoc(), TII->get(NewOpc))
425 .addOperand(getLdStRegOp(RtMI))
426 .addOperand(getLdStRegOp(Rt2MI))
427 .addOperand(BaseRegOp)
431 // FIXME: Do we need/want to copy the mem operands from the source
432 // instructions? Probably. What uses them after this?
434 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
435 DEBUG(I->print(dbgs()));
436 DEBUG(dbgs() << " ");
437 DEBUG(Paired->print(dbgs()));
438 DEBUG(dbgs() << " with instruction:\n ");
441 // Generate the sign extension for the proper result of the ldp.
442 // I.e., with X1, that would be:
443 // %W1<def> = KILL %W1, %X1<imp-def>
444 // %X1<def> = SBFMXri %X1<kill>, 0, 31
445 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
446 // Right now, DstMO has the extended register, since it comes from an
448 unsigned DstRegX = DstMO.getReg();
449 // Get the W variant of that register.
450 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
451 // Update the result of LDP to use the W instead of the X variant.
452 DstMO.setReg(DstRegW);
453 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
454 DEBUG(dbgs() << "\n");
455 // Make the machine verifier happy by providing a definition for
457 // Insert this definition right after the generated LDP, i.e., before
459 MachineInstrBuilder MIBKill =
460 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
461 TII->get(TargetOpcode::KILL), DstRegW)
463 .addReg(DstRegX, RegState::Define);
464 MIBKill->getOperand(2).setImplicit();
465 // Create the sign extension.
466 MachineInstrBuilder MIBSXTW =
467 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
468 TII->get(AArch64::SBFMXri), DstRegX)
473 DEBUG(dbgs() << " Extend operand:\n ");
474 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
475 DEBUG(dbgs() << "\n");
477 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
478 DEBUG(dbgs() << "\n");
481 // Erase the old instructions.
482 I->eraseFromParent();
483 Paired->eraseFromParent();
488 /// trackRegDefsUses - Remember what registers the specified instruction uses
490 static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
492 const TargetRegisterInfo *TRI) {
493 for (const MachineOperand &MO : MI->operands()) {
495 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
499 unsigned Reg = MO.getReg();
501 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
502 ModifiedRegs.set(*AI);
504 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
505 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
511 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
512 // Convert the byte-offset used by unscaled into an "element" offset used
513 // by the scaled pair load/store instructions.
515 // If the byte-offset isn't a multiple of the stride, there's no point
516 // trying to match it.
517 if (Offset % OffsetStride)
519 Offset /= OffsetStride;
522 return Offset <= 63 && Offset >= -64;
525 // Do alignment, specialized to power of 2 and for signed ints,
526 // avoiding having to do a C-style cast from uint_64t to int when
527 // using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
528 // FIXME: Move this function to include/MathExtras.h?
529 static int alignTo(int Num, int PowOf2) {
530 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
533 static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
534 const AArch64InstrInfo *TII) {
535 // One of the instructions must modify memory.
536 if (!MIa->mayStore() && !MIb->mayStore())
539 // Both instructions must be memory operations.
540 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
543 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
546 static bool mayAlias(MachineInstr *MIa,
547 SmallVectorImpl<MachineInstr *> &MemInsns,
548 const AArch64InstrInfo *TII) {
549 for (auto &MIb : MemInsns)
550 if (mayAlias(MIa, MIb, TII))
556 static bool canMergeOpc(unsigned Opc, unsigned PairOpc, LdStPairFlags &Flags) {
557 // Opcodes match: nothing more to check.
561 // Try to match a sign-extended load/store with a zero-extended load/store.
562 Flags.setSExtIdx(-1);
563 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
564 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
565 assert(IsValidLdStrOpc &&
566 "Given Opc should be a Load or Store with an immediate");
567 // Opc will be the first instruction in the pair.
568 if (NonSExtOpc == getMatchingNonSExtOpcode(PairOpc, &PairIsValidLdStrOpc)) {
569 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
573 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
575 // If the second instruction isn't even a load/store, bail out.
576 if (!PairIsValidLdStrOpc)
579 // Try to match an unscaled load/store with a scaled load/store.
580 return isUnscaledLdSt(Opc) != isUnscaledLdSt(PairOpc) &&
581 getMatchingPairOpcode(Opc) == getMatchingPairOpcode(PairOpc);
584 /// findMatchingInsn - Scan the instructions looking for a load/store that can
585 /// be combined with the current instruction into a load/store pair.
586 MachineBasicBlock::iterator
587 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
588 LdStPairFlags &Flags,
590 MachineBasicBlock::iterator E = I->getParent()->end();
591 MachineBasicBlock::iterator MBBI = I;
592 MachineInstr *FirstMI = I;
595 unsigned Opc = FirstMI->getOpcode();
596 bool MayLoad = FirstMI->mayLoad();
597 bool IsUnscaled = isUnscaledLdSt(FirstMI);
598 unsigned Reg = getLdStRegOp(FirstMI).getReg();
599 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
600 int Offset = getLdStOffsetOp(FirstMI).getImm();
602 // Early exit if the first instruction modifies the base register.
603 // e.g., ldr x0, [x0]
604 if (FirstMI->modifiesRegister(BaseReg, TRI))
607 // Early exit if the offset if not possible to match. (6 bits of positive
608 // range, plus allow an extra one in case we find a later insn that matches
611 IsUnscaled && EnableAArch64UnscaledMemOp ? getMemSize(FirstMI) : 1;
612 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
615 // Track which registers have been modified and used between the first insn
616 // (inclusive) and the second insn.
617 BitVector ModifiedRegs, UsedRegs;
618 ModifiedRegs.resize(TRI->getNumRegs());
619 UsedRegs.resize(TRI->getNumRegs());
621 // Remember any instructions that read/write memory between FirstMI and MI.
622 SmallVector<MachineInstr *, 4> MemInsns;
624 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
625 MachineInstr *MI = MBBI;
626 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
627 // optimization by changing how far we scan.
628 if (MI->isDebugValue())
631 // Now that we know this is a real instruction, count it.
634 if (canMergeOpc(Opc, MI->getOpcode(), Flags) &&
635 getLdStOffsetOp(MI).isImm()) {
636 assert(MI->mayLoadOrStore() && "Expected memory operation.");
637 // If we've found another instruction with the same opcode, check to see
638 // if the base and offset are compatible with our starting instruction.
639 // These instructions all have scaled immediate operands, so we just
640 // check for +1/-1. Make sure to check the new instruction offset is
641 // actually an immediate and not a symbolic reference destined for
644 // Pairwise instructions have a 7-bit signed offset field. Single insns
645 // have a 12-bit unsigned offset field. To be a valid combine, the
646 // final offset must be in range.
647 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
648 int MIOffset = getLdStOffsetOp(MI).getImm();
650 // We're trying to pair instructions that differ in how they are scaled.
651 // If FirstMI is scaled then scale the offset of MI accordingly.
652 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
653 bool MIIsUnscaled = isUnscaledLdSt(MI);
654 if (IsUnscaled != MIIsUnscaled) {
655 int MemSize = getMemSize(MI);
657 // If the unscaled offset isn't a multiple of the MemSize, we can't
658 // pair the operations together: bail and keep looking.
659 if (MIOffset % MemSize)
667 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
668 (Offset + OffsetStride == MIOffset))) {
669 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
670 // If this is a volatile load/store that otherwise matched, stop looking
671 // as something is going on that we don't have enough information to
672 // safely transform. Similarly, stop if we see a hint to avoid pairs.
673 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
675 // If the resultant immediate offset of merging these instructions
676 // is out of range for a pairwise instruction, bail and keep looking.
677 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
678 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
679 MemInsns.push_back(MI);
682 // If the alignment requirements of the paired (scaled) instruction
683 // can't express the offset of the unscaled input, bail and keep
685 if (IsUnscaled && EnableAArch64UnscaledMemOp &&
686 (alignTo(MinOffset, OffsetStride) != MinOffset)) {
687 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
688 MemInsns.push_back(MI);
691 // If the destination register of the loads is the same register, bail
692 // and keep looking. A load-pair instruction with both destination
693 // registers the same is UNPREDICTABLE and will result in an exception.
694 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
695 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
696 MemInsns.push_back(MI);
700 // If the Rt of the second instruction was not modified or used between
701 // the two instructions and none of the instructions between the second
702 // and first alias with the second, we can combine the second into the
704 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
705 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
706 !mayAlias(MI, MemInsns, TII)) {
707 Flags.setMergeForward(false);
711 // Likewise, if the Rt of the first instruction is not modified or used
712 // between the two instructions and none of the instructions between the
713 // first and the second alias with the first, we can combine the first
715 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
716 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
717 !mayAlias(FirstMI, MemInsns, TII)) {
718 Flags.setMergeForward(true);
721 // Unable to combine these instructions due to interference in between.
726 // If the instruction wasn't a matching load or store. Stop searching if we
727 // encounter a call instruction that might modify memory.
731 // Update modified / uses register lists.
732 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
734 // Otherwise, if the base register is modified, we have no match, so
736 if (ModifiedRegs[BaseReg])
739 // Update list of instructions that read/write memory.
740 if (MI->mayLoadOrStore())
741 MemInsns.push_back(MI);
746 MachineBasicBlock::iterator
747 AArch64LoadStoreOpt::mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
748 MachineBasicBlock::iterator Update) {
749 assert((Update->getOpcode() == AArch64::ADDXri ||
750 Update->getOpcode() == AArch64::SUBXri) &&
751 "Unexpected base register update instruction to merge!");
752 MachineBasicBlock::iterator NextI = I;
753 // Return the instruction following the merged instruction, which is
754 // the instruction following our unmerged load. Unless that's the add/sub
755 // instruction we're merging, in which case it's the one after that.
756 if (++NextI == Update)
759 int Value = Update->getOperand(2).getImm();
760 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
761 "Can't merge 1 << 12 offset into pre-indexed load / store");
762 if (Update->getOpcode() == AArch64::SUBXri)
765 unsigned NewOpc = getPreIndexedOpcode(I->getOpcode());
766 MachineInstrBuilder MIB =
767 BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
768 .addOperand(getLdStRegOp(Update))
769 .addOperand(getLdStRegOp(I))
770 .addOperand(getLdStBaseOp(I))
774 DEBUG(dbgs() << "Creating pre-indexed load/store.");
775 DEBUG(dbgs() << " Replacing instructions:\n ");
776 DEBUG(I->print(dbgs()));
777 DEBUG(dbgs() << " ");
778 DEBUG(Update->print(dbgs()));
779 DEBUG(dbgs() << " with instruction:\n ");
780 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
781 DEBUG(dbgs() << "\n");
783 // Erase the old instructions for the block.
784 I->eraseFromParent();
785 Update->eraseFromParent();
790 MachineBasicBlock::iterator AArch64LoadStoreOpt::mergePostIdxUpdateInsn(
791 MachineBasicBlock::iterator I, MachineBasicBlock::iterator Update) {
792 assert((Update->getOpcode() == AArch64::ADDXri ||
793 Update->getOpcode() == AArch64::SUBXri) &&
794 "Unexpected base register update instruction to merge!");
795 MachineBasicBlock::iterator NextI = I;
796 // Return the instruction following the merged instruction, which is
797 // the instruction following our unmerged load. Unless that's the add/sub
798 // instruction we're merging, in which case it's the one after that.
799 if (++NextI == Update)
802 int Value = Update->getOperand(2).getImm();
803 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
804 "Can't merge 1 << 12 offset into post-indexed load / store");
805 if (Update->getOpcode() == AArch64::SUBXri)
808 unsigned NewOpc = getPostIndexedOpcode(I->getOpcode());
809 MachineInstrBuilder MIB =
810 BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
811 .addOperand(getLdStRegOp(Update))
812 .addOperand(getLdStRegOp(I))
813 .addOperand(getLdStBaseOp(I))
817 DEBUG(dbgs() << "Creating post-indexed load/store.");
818 DEBUG(dbgs() << " Replacing instructions:\n ");
819 DEBUG(I->print(dbgs()));
820 DEBUG(dbgs() << " ");
821 DEBUG(Update->print(dbgs()));
822 DEBUG(dbgs() << " with instruction:\n ");
823 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
824 DEBUG(dbgs() << "\n");
826 // Erase the old instructions for the block.
827 I->eraseFromParent();
828 Update->eraseFromParent();
833 static bool isMatchingUpdateInsn(MachineInstr *MI, unsigned BaseReg,
835 switch (MI->getOpcode()) {
838 case AArch64::SUBXri:
839 // Negate the offset for a SUB instruction.
842 case AArch64::ADDXri:
843 // Make sure it's a vanilla immediate operand, not a relocation or
844 // anything else we can't handle.
845 if (!MI->getOperand(2).isImm())
847 // Watch out for 1 << 12 shifted value.
848 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
850 // If the instruction has the base register as source and dest and the
851 // immediate will fit in a signed 9-bit integer, then we have a match.
852 if (getLdStRegOp(MI).getReg() == BaseReg &&
853 getLdStBaseOp(MI).getReg() == BaseReg &&
854 getLdStOffsetOp(MI).getImm() <= 255 &&
855 getLdStOffsetOp(MI).getImm() >= -256) {
856 // If we have a non-zero Offset, we check that it matches the amount
857 // we're adding to the register.
858 if (!Offset || Offset == MI->getOperand(2).getImm())
866 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
867 MachineBasicBlock::iterator I, unsigned Limit, int Value) {
868 MachineBasicBlock::iterator E = I->getParent()->end();
869 MachineInstr *MemMI = I;
870 MachineBasicBlock::iterator MBBI = I;
871 const MachineFunction &MF = *MemMI->getParent()->getParent();
873 unsigned DestReg = getLdStRegOp(MemMI).getReg();
874 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
875 int Offset = getLdStOffsetOp(MemMI).getImm() *
876 TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
878 // If the base register overlaps the destination register, we can't
880 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
883 // Scan forward looking for post-index opportunities.
884 // Updating instructions can't be formed if the memory insn already
885 // has an offset other than the value we're looking for.
889 // Track which registers have been modified and used between the first insn
890 // (inclusive) and the second insn.
891 BitVector ModifiedRegs, UsedRegs;
892 ModifiedRegs.resize(TRI->getNumRegs());
893 UsedRegs.resize(TRI->getNumRegs());
895 for (unsigned Count = 0; MBBI != E; ++MBBI) {
896 MachineInstr *MI = MBBI;
897 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
898 // optimization by changing how far we scan.
899 if (MI->isDebugValue())
902 // Now that we know this is a real instruction, count it.
905 // If we found a match, return it.
906 if (isMatchingUpdateInsn(MI, BaseReg, Value))
909 // Update the status of what the instruction clobbered and used.
910 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
912 // Otherwise, if the base register is used or modified, we have no match, so
914 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
920 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
921 MachineBasicBlock::iterator I, unsigned Limit) {
922 MachineBasicBlock::iterator B = I->getParent()->begin();
923 MachineBasicBlock::iterator E = I->getParent()->end();
924 MachineInstr *MemMI = I;
925 MachineBasicBlock::iterator MBBI = I;
926 const MachineFunction &MF = *MemMI->getParent()->getParent();
928 unsigned DestReg = getLdStRegOp(MemMI).getReg();
929 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
930 int Offset = getLdStOffsetOp(MemMI).getImm();
931 unsigned RegSize = TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
933 // If the load/store is the first instruction in the block, there's obviously
934 // not any matching update. Ditto if the memory offset isn't zero.
935 if (MBBI == B || Offset != 0)
937 // If the base register overlaps the destination register, we can't
939 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
942 // Track which registers have been modified and used between the first insn
943 // (inclusive) and the second insn.
944 BitVector ModifiedRegs, UsedRegs;
945 ModifiedRegs.resize(TRI->getNumRegs());
946 UsedRegs.resize(TRI->getNumRegs());
948 for (unsigned Count = 0; MBBI != B; --MBBI) {
949 MachineInstr *MI = MBBI;
950 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
951 // optimization by changing how far we scan.
952 if (MI->isDebugValue())
955 // Now that we know this is a real instruction, count it.
958 // If we found a match, return it.
959 if (isMatchingUpdateInsn(MI, BaseReg, RegSize))
962 // Update the status of what the instruction clobbered and used.
963 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
965 // Otherwise, if the base register is used or modified, we have no match, so
967 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
973 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
974 bool Modified = false;
975 // Two tranformations to do here:
976 // 1) Find loads and stores that can be merged into a single load or store
983 // 2) Find base register updates that can be merged into the load or store
984 // as a base-reg writeback.
991 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
993 MachineInstr *MI = MBBI;
994 switch (MI->getOpcode()) {
996 // Just move on to the next instruction.
999 case AArch64::STRSui:
1000 case AArch64::STRDui:
1001 case AArch64::STRQui:
1002 case AArch64::STRXui:
1003 case AArch64::STRWui:
1004 case AArch64::LDRSui:
1005 case AArch64::LDRDui:
1006 case AArch64::LDRQui:
1007 case AArch64::LDRXui:
1008 case AArch64::LDRWui:
1009 case AArch64::LDRSWui:
1010 // do the unscaled versions as well
1011 case AArch64::STURSi:
1012 case AArch64::STURDi:
1013 case AArch64::STURQi:
1014 case AArch64::STURWi:
1015 case AArch64::STURXi:
1016 case AArch64::LDURSi:
1017 case AArch64::LDURDi:
1018 case AArch64::LDURQi:
1019 case AArch64::LDURWi:
1020 case AArch64::LDURXi:
1021 case AArch64::LDURSWi: {
1022 // If this is a volatile load/store, don't mess with it.
1023 if (MI->hasOrderedMemoryRef()) {
1027 // Make sure this is a reg+imm (as opposed to an address reloc).
1028 if (!getLdStOffsetOp(MI).isImm()) {
1032 // Check if this load/store has a hint to avoid pair formation.
1033 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1034 if (TII->isLdStPairSuppressed(MI)) {
1038 // Look ahead up to ScanLimit instructions for a pairable instruction.
1039 LdStPairFlags Flags;
1040 MachineBasicBlock::iterator Paired =
1041 findMatchingInsn(MBBI, Flags, ScanLimit);
1044 if (isUnscaledLdSt(MI))
1045 ++NumUnscaledPairCreated;
1047 // Merge the loads into a pair. Keeping the iterator straight is a
1048 // pain, so we let the merge routine tell us what the next instruction
1049 // is after it's done mucking about.
1050 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1057 // FIXME: Do the other instructions.
1061 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1063 MachineInstr *MI = MBBI;
1064 // Do update merging. It's simpler to keep this separate from the above
1065 // switch, though not strictly necessary.
1066 unsigned Opc = MI->getOpcode();
1069 // Just move on to the next instruction.
1072 case AArch64::STRSui:
1073 case AArch64::STRDui:
1074 case AArch64::STRQui:
1075 case AArch64::STRXui:
1076 case AArch64::STRWui:
1077 case AArch64::LDRSui:
1078 case AArch64::LDRDui:
1079 case AArch64::LDRQui:
1080 case AArch64::LDRXui:
1081 case AArch64::LDRWui:
1082 // do the unscaled versions as well
1083 case AArch64::STURSi:
1084 case AArch64::STURDi:
1085 case AArch64::STURQi:
1086 case AArch64::STURWi:
1087 case AArch64::STURXi:
1088 case AArch64::LDURSi:
1089 case AArch64::LDURDi:
1090 case AArch64::LDURQi:
1091 case AArch64::LDURWi:
1092 case AArch64::LDURXi: {
1093 // Make sure this is a reg+imm (as opposed to an address reloc).
1094 if (!getLdStOffsetOp(MI).isImm()) {
1098 // Look ahead up to ScanLimit instructions for a mergable instruction.
1099 MachineBasicBlock::iterator Update =
1100 findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
1102 // Merge the update into the ld/st.
1103 MBBI = mergePostIdxUpdateInsn(MBBI, Update);
1108 // Don't know how to handle pre/post-index versions, so move to the next
1110 if (isUnscaledLdSt(Opc)) {
1115 // Look back to try to find a pre-index instruction. For example,
1119 // ldr x1, [x0, #8]!
1120 Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
1122 // Merge the update into the ld/st.
1123 MBBI = mergePreIdxUpdateInsn(MBBI, Update);
1129 // Look forward to try to find a post-index instruction. For example,
1130 // ldr x1, [x0, #64]
1133 // ldr x1, [x0, #64]!
1135 // The immediate in the load/store is scaled by the size of the register
1136 // being loaded. The immediate in the add we're looking for,
1137 // however, is not, so adjust here.
1138 int Value = MI->getOperand(2).getImm() *
1139 TII->getRegClass(MI->getDesc(), 0, TRI, *(MBB.getParent()))
1141 Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, Value);
1143 // Merge the update into the ld/st.
1144 MBBI = mergePreIdxUpdateInsn(MBBI, Update);
1150 // Nothing found. Just move to the next instruction.
1154 // FIXME: Do the other instructions.
1161 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1162 TII = static_cast<const AArch64InstrInfo *>(Fn.getSubtarget().getInstrInfo());
1163 TRI = Fn.getSubtarget().getRegisterInfo();
1165 bool Modified = false;
1166 for (auto &MBB : Fn)
1167 Modified |= optimizeBlock(MBB);
1172 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1173 // loads and stores near one another?
1175 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1176 /// load / store optimization pass.
1177 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1178 return new AArch64LoadStoreOpt();