1 //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
34 #define DEBUG_TYPE "aarch64-ldst-opt"
36 /// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
37 /// load / store instructions to form ldp / stp instructions.
39 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
40 STATISTIC(NumPostFolded, "Number of post-index updates folded");
41 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
42 STATISTIC(NumUnscaledPairCreated,
43 "Number of load/store from unscaled generated");
44 STATISTIC(NumSmallTypeMerged, "Number of small type loads merged");
46 static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
47 cl::init(20), cl::Hidden);
50 void initializeAArch64LoadStoreOptPass(PassRegistry &);
53 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
57 typedef struct LdStPairFlags {
58 // If a matching instruction is found, MergeForward is set to true if the
59 // merge is to remove the first instruction and replace the second with
60 // a pair-wise insn, and false if the reverse is true.
63 // SExtIdx gives the index of the result of the load pair that must be
64 // extended. The value of SExtIdx assumes that the paired load produces the
65 // value in this order: (I, returned iterator), i.e., -1 means no value has
66 // to be extended, 0 means I, and 1 means the returned iterator.
69 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
71 void setMergeForward(bool V = true) { MergeForward = V; }
72 bool getMergeForward() const { return MergeForward; }
74 void setSExtIdx(int V) { SExtIdx = V; }
75 int getSExtIdx() const { return SExtIdx; }
79 struct AArch64LoadStoreOpt : public MachineFunctionPass {
81 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
82 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
85 const AArch64InstrInfo *TII;
86 const TargetRegisterInfo *TRI;
87 const AArch64Subtarget *Subtarget;
89 // Scan the instructions looking for a load/store that can be combined
90 // with the current instruction into a load/store pair.
91 // Return the matching instruction if one is found, else MBB->end().
92 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
95 // Merge the two instructions indicated into a single pair-wise instruction.
96 // If MergeForward is true, erase the first instruction and fold its
97 // operation into the second. If false, the reverse. Return the instruction
98 // following the first instruction (which may change during processing).
99 MachineBasicBlock::iterator
100 mergePairedInsns(MachineBasicBlock::iterator I,
101 MachineBasicBlock::iterator Paired,
102 const LdStPairFlags &Flags);
104 // Scan the instruction list to find a base register update that can
105 // be combined with the current instruction (a load or store) using
106 // pre or post indexed addressing with writeback. Scan forwards.
107 MachineBasicBlock::iterator
108 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, unsigned Limit,
111 // Scan the instruction list to find a base register update that can
112 // be combined with the current instruction (a load or store) using
113 // pre or post indexed addressing with writeback. Scan backwards.
114 MachineBasicBlock::iterator
115 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
117 // Find an instruction that updates the base register of the ld/st
119 bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
120 unsigned BaseReg, int Offset);
122 // Merge a pre- or post-index base register update into a ld/st instruction.
123 MachineBasicBlock::iterator
124 mergeUpdateInsn(MachineBasicBlock::iterator I,
125 MachineBasicBlock::iterator Update, bool IsPreIdx);
127 // Find and merge foldable ldr/str instructions.
128 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
130 // Check if converting two narrow loads into a single wider load with
131 // bitfield extracts could be enabled.
132 bool enableNarrowLdMerge(MachineFunction &Fn);
134 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
136 bool runOnMachineFunction(MachineFunction &Fn) override;
138 const char *getPassName() const override {
139 return AARCH64_LOAD_STORE_OPT_NAME;
142 char AArch64LoadStoreOpt::ID = 0;
145 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
146 AARCH64_LOAD_STORE_OPT_NAME, false, false)
148 static bool isUnscaledLdSt(unsigned Opc) {
152 case AArch64::STURSi:
153 case AArch64::STURDi:
154 case AArch64::STURQi:
155 case AArch64::STURWi:
156 case AArch64::STURXi:
157 case AArch64::LDURSi:
158 case AArch64::LDURDi:
159 case AArch64::LDURQi:
160 case AArch64::LDURWi:
161 case AArch64::LDURXi:
162 case AArch64::LDURSWi:
163 case AArch64::LDURHHi:
164 case AArch64::LDURBBi:
165 case AArch64::LDURSBWi:
166 case AArch64::LDURSHWi:
171 static bool isUnscaledLdSt(MachineInstr *MI) {
172 return isUnscaledLdSt(MI->getOpcode());
175 static unsigned getBitExtrOpcode(MachineInstr *MI) {
176 switch (MI->getOpcode()) {
178 llvm_unreachable("Unexpected opcode.");
179 case AArch64::LDRBBui:
180 case AArch64::LDURBBi:
181 case AArch64::LDRHHui:
182 case AArch64::LDURHHi:
183 return AArch64::UBFMWri;
184 case AArch64::LDRSBWui:
185 case AArch64::LDURSBWi:
186 case AArch64::LDRSHWui:
187 case AArch64::LDURSHWi:
188 return AArch64::SBFMWri;
192 static bool isSmallTypeLdMerge(unsigned Opc) {
196 case AArch64::LDRHHui:
197 case AArch64::LDURHHi:
198 case AArch64::LDRBBui:
199 case AArch64::LDURBBi:
200 case AArch64::LDRSHWui:
201 case AArch64::LDURSHWi:
202 case AArch64::LDRSBWui:
203 case AArch64::LDURSBWi:
208 static bool isSmallTypeLdMerge(MachineInstr *MI) {
209 return isSmallTypeLdMerge(MI->getOpcode());
212 // Scaling factor for unscaled load or store.
213 static int getMemScale(MachineInstr *MI) {
214 switch (MI->getOpcode()) {
216 llvm_unreachable("Opcode has unknown scale!");
217 case AArch64::LDRBBui:
218 case AArch64::LDURBBi:
219 case AArch64::LDRSBWui:
220 case AArch64::LDURSBWi:
221 case AArch64::STRBBui:
223 case AArch64::LDRHHui:
224 case AArch64::LDURHHi:
225 case AArch64::LDRSHWui:
226 case AArch64::LDURSHWi:
227 case AArch64::STRHHui:
229 case AArch64::LDRSui:
230 case AArch64::LDURSi:
231 case AArch64::LDRSWui:
232 case AArch64::LDURSWi:
233 case AArch64::LDRWui:
234 case AArch64::LDURWi:
235 case AArch64::STRSui:
236 case AArch64::STURSi:
237 case AArch64::STRWui:
238 case AArch64::STURWi:
240 case AArch64::LDPSWi:
245 case AArch64::LDRDui:
246 case AArch64::LDURDi:
247 case AArch64::LDRXui:
248 case AArch64::LDURXi:
249 case AArch64::STRDui:
250 case AArch64::STURDi:
251 case AArch64::STRXui:
252 case AArch64::STURXi:
258 case AArch64::LDRQui:
259 case AArch64::LDURQi:
260 case AArch64::STRQui:
261 case AArch64::STURQi:
268 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
269 bool *IsValidLdStrOpc = nullptr) {
271 *IsValidLdStrOpc = true;
275 *IsValidLdStrOpc = false;
277 case AArch64::STRDui:
278 case AArch64::STURDi:
279 case AArch64::STRQui:
280 case AArch64::STURQi:
281 case AArch64::STRWui:
282 case AArch64::STURWi:
283 case AArch64::STRXui:
284 case AArch64::STURXi:
285 case AArch64::LDRDui:
286 case AArch64::LDURDi:
287 case AArch64::LDRQui:
288 case AArch64::LDURQi:
289 case AArch64::LDRWui:
290 case AArch64::LDURWi:
291 case AArch64::LDRXui:
292 case AArch64::LDURXi:
293 case AArch64::STRSui:
294 case AArch64::STURSi:
295 case AArch64::LDRSui:
296 case AArch64::LDURSi:
297 case AArch64::LDRHHui:
298 case AArch64::LDURHHi:
299 case AArch64::LDRBBui:
300 case AArch64::LDURBBi:
302 case AArch64::LDRSWui:
303 return AArch64::LDRWui;
304 case AArch64::LDURSWi:
305 return AArch64::LDURWi;
306 case AArch64::LDRSBWui:
307 return AArch64::LDRBBui;
308 case AArch64::LDRSHWui:
309 return AArch64::LDRHHui;
310 case AArch64::LDURSBWi:
311 return AArch64::LDURBBi;
312 case AArch64::LDURSHWi:
313 return AArch64::LDURHHi;
317 static unsigned getMatchingPairOpcode(unsigned Opc) {
320 llvm_unreachable("Opcode has no pairwise equivalent!");
321 case AArch64::STRSui:
322 case AArch64::STURSi:
323 return AArch64::STPSi;
324 case AArch64::STRDui:
325 case AArch64::STURDi:
326 return AArch64::STPDi;
327 case AArch64::STRQui:
328 case AArch64::STURQi:
329 return AArch64::STPQi;
330 case AArch64::STRWui:
331 case AArch64::STURWi:
332 return AArch64::STPWi;
333 case AArch64::STRXui:
334 case AArch64::STURXi:
335 return AArch64::STPXi;
336 case AArch64::LDRSui:
337 case AArch64::LDURSi:
338 return AArch64::LDPSi;
339 case AArch64::LDRDui:
340 case AArch64::LDURDi:
341 return AArch64::LDPDi;
342 case AArch64::LDRQui:
343 case AArch64::LDURQi:
344 return AArch64::LDPQi;
345 case AArch64::LDRWui:
346 case AArch64::LDURWi:
347 return AArch64::LDPWi;
348 case AArch64::LDRXui:
349 case AArch64::LDURXi:
350 return AArch64::LDPXi;
351 case AArch64::LDRSWui:
352 case AArch64::LDURSWi:
353 return AArch64::LDPSWi;
354 case AArch64::LDRHHui:
355 case AArch64::LDRSHWui:
356 return AArch64::LDRWui;
357 case AArch64::LDURHHi:
358 case AArch64::LDURSHWi:
359 return AArch64::LDURWi;
360 case AArch64::LDRBBui:
361 case AArch64::LDRSBWui:
362 return AArch64::LDRHHui;
363 case AArch64::LDURBBi:
364 case AArch64::LDURSBWi:
365 return AArch64::LDURHHi;
369 static unsigned getPreIndexedOpcode(unsigned Opc) {
372 llvm_unreachable("Opcode has no pre-indexed equivalent!");
373 case AArch64::STRSui:
374 return AArch64::STRSpre;
375 case AArch64::STRDui:
376 return AArch64::STRDpre;
377 case AArch64::STRQui:
378 return AArch64::STRQpre;
379 case AArch64::STRBBui:
380 return AArch64::STRBBpre;
381 case AArch64::STRHHui:
382 return AArch64::STRHHpre;
383 case AArch64::STRWui:
384 return AArch64::STRWpre;
385 case AArch64::STRXui:
386 return AArch64::STRXpre;
387 case AArch64::LDRSui:
388 return AArch64::LDRSpre;
389 case AArch64::LDRDui:
390 return AArch64::LDRDpre;
391 case AArch64::LDRQui:
392 return AArch64::LDRQpre;
393 case AArch64::LDRBBui:
394 return AArch64::LDRBBpre;
395 case AArch64::LDRHHui:
396 return AArch64::LDRHHpre;
397 case AArch64::LDRWui:
398 return AArch64::LDRWpre;
399 case AArch64::LDRXui:
400 return AArch64::LDRXpre;
401 case AArch64::LDRSWui:
402 return AArch64::LDRSWpre;
404 return AArch64::LDPSpre;
405 case AArch64::LDPSWi:
406 return AArch64::LDPSWpre;
408 return AArch64::LDPDpre;
410 return AArch64::LDPQpre;
412 return AArch64::LDPWpre;
414 return AArch64::LDPXpre;
416 return AArch64::STPSpre;
418 return AArch64::STPDpre;
420 return AArch64::STPQpre;
422 return AArch64::STPWpre;
424 return AArch64::STPXpre;
428 static unsigned getPostIndexedOpcode(unsigned Opc) {
431 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
432 case AArch64::STRSui:
433 return AArch64::STRSpost;
434 case AArch64::STRDui:
435 return AArch64::STRDpost;
436 case AArch64::STRQui:
437 return AArch64::STRQpost;
438 case AArch64::STRBBui:
439 return AArch64::STRBBpost;
440 case AArch64::STRHHui:
441 return AArch64::STRHHpost;
442 case AArch64::STRWui:
443 return AArch64::STRWpost;
444 case AArch64::STRXui:
445 return AArch64::STRXpost;
446 case AArch64::LDRSui:
447 return AArch64::LDRSpost;
448 case AArch64::LDRDui:
449 return AArch64::LDRDpost;
450 case AArch64::LDRQui:
451 return AArch64::LDRQpost;
452 case AArch64::LDRBBui:
453 return AArch64::LDRBBpost;
454 case AArch64::LDRHHui:
455 return AArch64::LDRHHpost;
456 case AArch64::LDRWui:
457 return AArch64::LDRWpost;
458 case AArch64::LDRXui:
459 return AArch64::LDRXpost;
460 case AArch64::LDRSWui:
461 return AArch64::LDRSWpost;
463 return AArch64::LDPSpost;
464 case AArch64::LDPSWi:
465 return AArch64::LDPSWpost;
467 return AArch64::LDPDpost;
469 return AArch64::LDPQpost;
471 return AArch64::LDPWpost;
473 return AArch64::LDPXpost;
475 return AArch64::STPSpost;
477 return AArch64::STPDpost;
479 return AArch64::STPQpost;
481 return AArch64::STPWpost;
483 return AArch64::STPXpost;
487 static bool isPairedLdSt(const MachineInstr *MI) {
488 switch (MI->getOpcode()) {
492 case AArch64::LDPSWi:
506 static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
507 unsigned PairedRegOp = 0) {
508 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
509 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
510 return MI->getOperand(Idx);
513 static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
514 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
515 return MI->getOperand(Idx);
518 static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
519 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
520 return MI->getOperand(Idx);
523 // Copy MachineMemOperands from Op0 and Op1 to a new array assigned to MI.
524 static void concatenateMemOperands(MachineInstr *MI, MachineInstr *Op0,
526 assert(MI->memoperands_empty() && "expected a new machineinstr");
527 size_t numMemRefs = (Op0->memoperands_end() - Op0->memoperands_begin()) +
528 (Op1->memoperands_end() - Op1->memoperands_begin());
530 MachineFunction *MF = MI->getParent()->getParent();
531 MachineSDNode::mmo_iterator MemBegin = MF->allocateMemRefsArray(numMemRefs);
532 MachineSDNode::mmo_iterator MemEnd =
533 std::copy(Op0->memoperands_begin(), Op0->memoperands_end(), MemBegin);
534 MemEnd = std::copy(Op1->memoperands_begin(), Op1->memoperands_end(), MemEnd);
535 MI->setMemRefs(MemBegin, MemEnd);
538 MachineBasicBlock::iterator
539 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
540 MachineBasicBlock::iterator Paired,
541 const LdStPairFlags &Flags) {
542 MachineBasicBlock::iterator NextI = I;
544 // If NextI is the second of the two instructions to be merged, we need
545 // to skip one further. Either way we merge will invalidate the iterator,
546 // and we don't need to scan the new instruction, as it's a pairwise
547 // instruction, which we're not considering for further action anyway.
551 int SExtIdx = Flags.getSExtIdx();
553 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
554 bool IsUnscaled = isUnscaledLdSt(Opc);
555 int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
557 bool MergeForward = Flags.getMergeForward();
558 unsigned NewOpc = getMatchingPairOpcode(Opc);
559 // Insert our new paired instruction after whichever of the paired
560 // instructions MergeForward indicates.
561 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
562 // Also based on MergeForward is from where we copy the base register operand
563 // so we get the flags compatible with the input code.
564 const MachineOperand &BaseRegOp =
565 MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
567 // Which register is Rt and which is Rt2 depends on the offset order.
568 MachineInstr *RtMI, *Rt2MI;
569 if (getLdStOffsetOp(I).getImm() ==
570 getLdStOffsetOp(Paired).getImm() + OffsetStride) {
573 // Here we swapped the assumption made for SExtIdx.
574 // I.e., we turn ldp I, Paired into ldp Paired, I.
575 // Update the index accordingly.
577 SExtIdx = (SExtIdx + 1) % 2;
583 int OffsetImm = getLdStOffsetOp(RtMI).getImm();
585 if (isSmallTypeLdMerge(Opc)) {
586 // Change the scaled offset from small to large type.
588 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
591 MachineInstr *RtNewDest = MergeForward ? I : Paired;
592 // When merging small (< 32 bit) loads for big-endian targets, the order of
593 // the component parts gets swapped.
594 if (!Subtarget->isLittleEndian())
595 std::swap(RtMI, Rt2MI);
596 // Construct the new load instruction.
597 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
598 NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
600 .addOperand(getLdStRegOp(RtNewDest))
601 .addOperand(BaseRegOp)
604 // Copy MachineMemOperands from the original loads.
605 concatenateMemOperands(NewMemMI, I, Paired);
609 << "Creating the new load and extract. Replacing instructions:\n ");
610 DEBUG(I->print(dbgs()));
611 DEBUG(dbgs() << " ");
612 DEBUG(Paired->print(dbgs()));
613 DEBUG(dbgs() << " with instructions:\n ");
614 DEBUG((NewMemMI)->print(dbgs()));
616 int Width = getMemScale(I) == 1 ? 8 : 16;
619 int ImmsLow = LSBLow + Width - 1;
620 int ImmsHigh = LSBHigh + Width - 1;
621 MachineInstr *ExtDestMI = MergeForward ? Paired : I;
622 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
623 // Create the bitfield extract for high bits.
624 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
625 TII->get(getBitExtrOpcode(Rt2MI)))
626 .addOperand(getLdStRegOp(Rt2MI))
627 .addReg(getLdStRegOp(RtNewDest).getReg())
630 // Create the bitfield extract for low bits.
631 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
632 // For unsigned, prefer to use AND for low bits.
633 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
634 TII->get(AArch64::ANDWri))
635 .addOperand(getLdStRegOp(RtMI))
636 .addReg(getLdStRegOp(RtNewDest).getReg())
639 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
640 TII->get(getBitExtrOpcode(RtMI)))
641 .addOperand(getLdStRegOp(RtMI))
642 .addReg(getLdStRegOp(RtNewDest).getReg())
647 // Create the bitfield extract for low bits.
648 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
649 // For unsigned, prefer to use AND for low bits.
650 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
651 TII->get(AArch64::ANDWri))
652 .addOperand(getLdStRegOp(RtMI))
653 .addReg(getLdStRegOp(RtNewDest).getReg())
656 BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
657 TII->get(getBitExtrOpcode(RtMI)))
658 .addOperand(getLdStRegOp(RtMI))
659 .addReg(getLdStRegOp(RtNewDest).getReg())
664 // Create the bitfield extract for high bits.
665 BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
666 TII->get(getBitExtrOpcode(Rt2MI)))
667 .addOperand(getLdStRegOp(Rt2MI))
668 .addReg(getLdStRegOp(RtNewDest).getReg())
672 DEBUG(dbgs() << " ");
673 DEBUG((BitExtMI1)->print(dbgs()));
674 DEBUG(dbgs() << " ");
675 DEBUG((BitExtMI2)->print(dbgs()));
676 DEBUG(dbgs() << "\n");
678 // Erase the old instructions.
679 I->eraseFromParent();
680 Paired->eraseFromParent();
686 OffsetImm /= OffsetStride;
688 // Construct the new instruction.
689 MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
690 I->getDebugLoc(), TII->get(NewOpc))
691 .addOperand(getLdStRegOp(RtMI))
692 .addOperand(getLdStRegOp(Rt2MI))
693 .addOperand(BaseRegOp)
697 // FIXME: Do we need/want to copy the mem operands from the source
698 // instructions? Probably. What uses them after this?
700 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
701 DEBUG(I->print(dbgs()));
702 DEBUG(dbgs() << " ");
703 DEBUG(Paired->print(dbgs()));
704 DEBUG(dbgs() << " with instruction:\n ");
707 // Generate the sign extension for the proper result of the ldp.
708 // I.e., with X1, that would be:
709 // %W1<def> = KILL %W1, %X1<imp-def>
710 // %X1<def> = SBFMXri %X1<kill>, 0, 31
711 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
712 // Right now, DstMO has the extended register, since it comes from an
714 unsigned DstRegX = DstMO.getReg();
715 // Get the W variant of that register.
716 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
717 // Update the result of LDP to use the W instead of the X variant.
718 DstMO.setReg(DstRegW);
719 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
720 DEBUG(dbgs() << "\n");
721 // Make the machine verifier happy by providing a definition for
723 // Insert this definition right after the generated LDP, i.e., before
725 MachineInstrBuilder MIBKill =
726 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
727 TII->get(TargetOpcode::KILL), DstRegW)
729 .addReg(DstRegX, RegState::Define);
730 MIBKill->getOperand(2).setImplicit();
731 // Create the sign extension.
732 MachineInstrBuilder MIBSXTW =
733 BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
734 TII->get(AArch64::SBFMXri), DstRegX)
739 DEBUG(dbgs() << " Extend operand:\n ");
740 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
741 DEBUG(dbgs() << "\n");
743 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
744 DEBUG(dbgs() << "\n");
747 // Erase the old instructions.
748 I->eraseFromParent();
749 Paired->eraseFromParent();
754 /// trackRegDefsUses - Remember what registers the specified instruction uses
756 static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
758 const TargetRegisterInfo *TRI) {
759 for (const MachineOperand &MO : MI->operands()) {
761 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
765 unsigned Reg = MO.getReg();
767 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
768 ModifiedRegs.set(*AI);
770 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
771 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
777 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
778 // Convert the byte-offset used by unscaled into an "element" offset used
779 // by the scaled pair load/store instructions.
781 Offset /= OffsetStride;
783 return Offset <= 63 && Offset >= -64;
786 // Do alignment, specialized to power of 2 and for signed ints,
787 // avoiding having to do a C-style cast from uint_64t to int when
788 // using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
789 // FIXME: Move this function to include/MathExtras.h?
790 static int alignTo(int Num, int PowOf2) {
791 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
794 static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
795 const AArch64InstrInfo *TII) {
796 // One of the instructions must modify memory.
797 if (!MIa->mayStore() && !MIb->mayStore())
800 // Both instructions must be memory operations.
801 if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
804 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
807 static bool mayAlias(MachineInstr *MIa,
808 SmallVectorImpl<MachineInstr *> &MemInsns,
809 const AArch64InstrInfo *TII) {
810 for (auto &MIb : MemInsns)
811 if (mayAlias(MIa, MIb, TII))
817 /// findMatchingInsn - Scan the instructions looking for a load/store that can
818 /// be combined with the current instruction into a load/store pair.
819 MachineBasicBlock::iterator
820 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
821 LdStPairFlags &Flags, unsigned Limit) {
822 MachineBasicBlock::iterator E = I->getParent()->end();
823 MachineBasicBlock::iterator MBBI = I;
824 MachineInstr *FirstMI = I;
827 unsigned Opc = FirstMI->getOpcode();
828 bool MayLoad = FirstMI->mayLoad();
829 bool IsUnscaled = isUnscaledLdSt(FirstMI);
830 unsigned Reg = getLdStRegOp(FirstMI).getReg();
831 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
832 int Offset = getLdStOffsetOp(FirstMI).getImm();
834 // Early exit if the first instruction modifies the base register.
835 // e.g., ldr x0, [x0]
836 if (FirstMI->modifiesRegister(BaseReg, TRI))
839 // Early exit if the offset if not possible to match. (6 bits of positive
840 // range, plus allow an extra one in case we find a later insn that matches
842 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
843 if (!isSmallTypeLdMerge(Opc) &&
844 !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
847 // Track which registers have been modified and used between the first insn
848 // (inclusive) and the second insn.
849 BitVector ModifiedRegs, UsedRegs;
850 ModifiedRegs.resize(TRI->getNumRegs());
851 UsedRegs.resize(TRI->getNumRegs());
853 // Remember any instructions that read/write memory between FirstMI and MI.
854 SmallVector<MachineInstr *, 4> MemInsns;
856 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
857 MachineInstr *MI = MBBI;
858 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
859 // optimization by changing how far we scan.
860 if (MI->isDebugValue())
863 // Now that we know this is a real instruction, count it.
866 bool CanMergeOpc = Opc == MI->getOpcode();
867 Flags.setSExtIdx(-1);
869 bool IsValidLdStrOpc;
870 unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
871 assert(IsValidLdStrOpc &&
872 "Given Opc should be a Load or Store with an immediate");
873 // Opc will be the first instruction in the pair.
874 Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
875 CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
878 if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
879 assert(MI->mayLoadOrStore() && "Expected memory operation.");
880 // If we've found another instruction with the same opcode, check to see
881 // if the base and offset are compatible with our starting instruction.
882 // These instructions all have scaled immediate operands, so we just
883 // check for +1/-1. Make sure to check the new instruction offset is
884 // actually an immediate and not a symbolic reference destined for
887 // Pairwise instructions have a 7-bit signed offset field. Single insns
888 // have a 12-bit unsigned offset field. To be a valid combine, the
889 // final offset must be in range.
890 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
891 int MIOffset = getLdStOffsetOp(MI).getImm();
892 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
893 (Offset + OffsetStride == MIOffset))) {
894 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
895 // If this is a volatile load/store that otherwise matched, stop looking
896 // as something is going on that we don't have enough information to
897 // safely transform. Similarly, stop if we see a hint to avoid pairs.
898 if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
900 // If the resultant immediate offset of merging these instructions
901 // is out of range for a pairwise instruction, bail and keep looking.
902 bool MIIsUnscaled = isUnscaledLdSt(MI);
903 bool IsSmallTypeLd = isSmallTypeLdMerge(MI->getOpcode());
904 if (!IsSmallTypeLd &&
905 !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
906 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
907 MemInsns.push_back(MI);
912 // If the alignment requirements of the larger type scaled load
913 // instruction can't express the scaled offset of the smaller type
914 // input, bail and keep looking.
915 if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
916 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
917 MemInsns.push_back(MI);
921 // If the alignment requirements of the paired (scaled) instruction
922 // can't express the offset of the unscaled input, bail and keep
924 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
925 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
926 MemInsns.push_back(MI);
930 // If the destination register of the loads is the same register, bail
931 // and keep looking. A load-pair instruction with both destination
932 // registers the same is UNPREDICTABLE and will result in an exception.
933 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
934 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
935 MemInsns.push_back(MI);
939 // If the Rt of the second instruction was not modified or used between
940 // the two instructions and none of the instructions between the second
941 // and first alias with the second, we can combine the second into the
943 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
944 !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
945 !mayAlias(MI, MemInsns, TII)) {
946 Flags.setMergeForward(false);
950 // Likewise, if the Rt of the first instruction is not modified or used
951 // between the two instructions and none of the instructions between the
952 // first and the second alias with the first, we can combine the first
954 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
955 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
956 !mayAlias(FirstMI, MemInsns, TII)) {
957 Flags.setMergeForward(true);
960 // Unable to combine these instructions due to interference in between.
965 // If the instruction wasn't a matching load or store. Stop searching if we
966 // encounter a call instruction that might modify memory.
970 // Update modified / uses register lists.
971 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
973 // Otherwise, if the base register is modified, we have no match, so
975 if (ModifiedRegs[BaseReg])
978 // Update list of instructions that read/write memory.
979 if (MI->mayLoadOrStore())
980 MemInsns.push_back(MI);
985 MachineBasicBlock::iterator
986 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
987 MachineBasicBlock::iterator Update,
989 assert((Update->getOpcode() == AArch64::ADDXri ||
990 Update->getOpcode() == AArch64::SUBXri) &&
991 "Unexpected base register update instruction to merge!");
992 MachineBasicBlock::iterator NextI = I;
993 // Return the instruction following the merged instruction, which is
994 // the instruction following our unmerged load. Unless that's the add/sub
995 // instruction we're merging, in which case it's the one after that.
996 if (++NextI == Update)
999 int Value = Update->getOperand(2).getImm();
1000 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1001 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1002 if (Update->getOpcode() == AArch64::SUBXri)
1005 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1006 : getPostIndexedOpcode(I->getOpcode());
1007 MachineInstrBuilder MIB;
1008 if (!isPairedLdSt(I)) {
1009 // Non-paired instruction.
1010 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1011 .addOperand(getLdStRegOp(Update))
1012 .addOperand(getLdStRegOp(I))
1013 .addOperand(getLdStBaseOp(I))
1016 // Paired instruction.
1017 int Scale = getMemScale(I);
1018 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1019 .addOperand(getLdStRegOp(Update))
1020 .addOperand(getLdStRegOp(I, 0))
1021 .addOperand(getLdStRegOp(I, 1))
1022 .addOperand(getLdStBaseOp(I))
1023 .addImm(Value / Scale);
1028 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1030 DEBUG(dbgs() << "Creating post-indexed load/store.");
1031 DEBUG(dbgs() << " Replacing instructions:\n ");
1032 DEBUG(I->print(dbgs()));
1033 DEBUG(dbgs() << " ");
1034 DEBUG(Update->print(dbgs()));
1035 DEBUG(dbgs() << " with instruction:\n ");
1036 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1037 DEBUG(dbgs() << "\n");
1039 // Erase the old instructions for the block.
1040 I->eraseFromParent();
1041 Update->eraseFromParent();
1046 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
1048 unsigned BaseReg, int Offset) {
1049 switch (MI->getOpcode()) {
1052 case AArch64::SUBXri:
1053 // Negate the offset for a SUB instruction.
1056 case AArch64::ADDXri:
1057 // Make sure it's a vanilla immediate operand, not a relocation or
1058 // anything else we can't handle.
1059 if (!MI->getOperand(2).isImm())
1061 // Watch out for 1 << 12 shifted value.
1062 if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
1065 // The update instruction source and destination register must be the
1066 // same as the load/store base register.
1067 if (MI->getOperand(0).getReg() != BaseReg ||
1068 MI->getOperand(1).getReg() != BaseReg)
1071 bool IsPairedInsn = isPairedLdSt(MemMI);
1072 int UpdateOffset = MI->getOperand(2).getImm();
1073 // For non-paired load/store instructions, the immediate must fit in a
1074 // signed 9-bit integer.
1075 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1078 // For paired load/store instructions, the immediate must be a multiple of
1079 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1082 int Scale = getMemScale(MemMI);
1083 if (UpdateOffset % Scale != 0)
1086 int ScaledOffset = UpdateOffset / Scale;
1087 if (ScaledOffset > 64 || ScaledOffset < -64)
1091 // If we have a non-zero Offset, we check that it matches the amount
1092 // we're adding to the register.
1093 if (!Offset || Offset == MI->getOperand(2).getImm())
1100 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1101 MachineBasicBlock::iterator I, unsigned Limit, int UnscaledOffset) {
1102 MachineBasicBlock::iterator E = I->getParent()->end();
1103 MachineInstr *MemMI = I;
1104 MachineBasicBlock::iterator MBBI = I;
1106 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1107 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
1109 // Scan forward looking for post-index opportunities. Updating instructions
1110 // can't be formed if the memory instruction doesn't have the offset we're
1112 if (MIUnscaledOffset != UnscaledOffset)
1115 // If the base register overlaps a destination register, we can't
1116 // merge the update.
1117 bool IsPairedInsn = isPairedLdSt(MemMI);
1118 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1119 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1120 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1124 // Track which registers have been modified and used between the first insn
1125 // (inclusive) and the second insn.
1126 BitVector ModifiedRegs, UsedRegs;
1127 ModifiedRegs.resize(TRI->getNumRegs());
1128 UsedRegs.resize(TRI->getNumRegs());
1130 for (unsigned Count = 0; MBBI != E; ++MBBI) {
1131 MachineInstr *MI = MBBI;
1132 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1133 // optimization by changing how far we scan.
1134 if (MI->isDebugValue())
1137 // Now that we know this is a real instruction, count it.
1140 // If we found a match, return it.
1141 if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
1144 // Update the status of what the instruction clobbered and used.
1145 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1147 // Otherwise, if the base register is used or modified, we have no match, so
1149 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1155 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1156 MachineBasicBlock::iterator I, unsigned Limit) {
1157 MachineBasicBlock::iterator B = I->getParent()->begin();
1158 MachineBasicBlock::iterator E = I->getParent()->end();
1159 MachineInstr *MemMI = I;
1160 MachineBasicBlock::iterator MBBI = I;
1162 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1163 int Offset = getLdStOffsetOp(MemMI).getImm();
1165 // If the load/store is the first instruction in the block, there's obviously
1166 // not any matching update. Ditto if the memory offset isn't zero.
1167 if (MBBI == B || Offset != 0)
1169 // If the base register overlaps a destination register, we can't
1170 // merge the update.
1171 bool IsPairedInsn = isPairedLdSt(MemMI);
1172 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1173 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1174 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1178 // Track which registers have been modified and used between the first insn
1179 // (inclusive) and the second insn.
1180 BitVector ModifiedRegs, UsedRegs;
1181 ModifiedRegs.resize(TRI->getNumRegs());
1182 UsedRegs.resize(TRI->getNumRegs());
1184 for (unsigned Count = 0; MBBI != B; --MBBI) {
1185 MachineInstr *MI = MBBI;
1186 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1187 // optimization by changing how far we scan.
1188 if (MI->isDebugValue())
1191 // Now that we know this is a real instruction, count it.
1194 // If we found a match, return it.
1195 if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
1198 // Update the status of what the instruction clobbered and used.
1199 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1201 // Otherwise, if the base register is used or modified, we have no match, so
1203 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1209 bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1210 MachineBasicBlock::iterator &MBBI) {
1211 MachineInstr *MI = MBBI;
1212 MachineBasicBlock::iterator E = MI->getParent()->end();
1213 // If this is a volatile load/store, don't mess with it.
1214 if (MI->hasOrderedMemoryRef())
1217 // Make sure this is a reg+imm (as opposed to an address reloc).
1218 if (!getLdStOffsetOp(MI).isImm())
1221 // Check if this load/store has a hint to avoid pair formation.
1222 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1223 if (TII->isLdStPairSuppressed(MI))
1226 // Look ahead up to ScanLimit instructions for a pairable instruction.
1227 LdStPairFlags Flags;
1228 MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
1230 if (isSmallTypeLdMerge(MI)) {
1231 ++NumSmallTypeMerged;
1234 if (isUnscaledLdSt(MI))
1235 ++NumUnscaledPairCreated;
1238 // Merge the loads into a pair. Keeping the iterator straight is a
1239 // pain, so we let the merge routine tell us what the next instruction
1240 // is after it's done mucking about.
1241 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1247 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1248 bool enableNarrowLdOpt) {
1249 bool Modified = false;
1250 // Three tranformations to do here:
1251 // 1) Find narrow loads that can be converted into a single wider load
1252 // with bitfield extract instructions.
1255 // ldrh w1, [x2, #2]
1258 // ubfx w1, w0, #16, #16
1259 // and w0, w0, #ffff
1260 // 2) Find loads and stores that can be merged into a single load or store
1261 // pair instruction.
1267 // 3) Find base register updates that can be merged into the load or store
1268 // as a base-reg writeback.
1275 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1276 enableNarrowLdOpt && MBBI != E;) {
1277 MachineInstr *MI = MBBI;
1278 switch (MI->getOpcode()) {
1280 // Just move on to the next instruction.
1283 // Scaled instructions.
1284 case AArch64::LDRBBui:
1285 case AArch64::LDRHHui:
1286 case AArch64::LDRSBWui:
1287 case AArch64::LDRSHWui:
1288 // Unscaled instructions.
1289 case AArch64::LDURBBi:
1290 case AArch64::LDURHHi:
1291 case AArch64::LDURSBWi:
1292 case AArch64::LDURSHWi: {
1293 if (tryToMergeLdStInst(MBBI)) {
1300 // FIXME: Do the other instructions.
1304 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1306 MachineInstr *MI = MBBI;
1307 switch (MI->getOpcode()) {
1309 // Just move on to the next instruction.
1312 // Scaled instructions.
1313 case AArch64::STRSui:
1314 case AArch64::STRDui:
1315 case AArch64::STRQui:
1316 case AArch64::STRXui:
1317 case AArch64::STRWui:
1318 case AArch64::LDRSui:
1319 case AArch64::LDRDui:
1320 case AArch64::LDRQui:
1321 case AArch64::LDRXui:
1322 case AArch64::LDRWui:
1323 case AArch64::LDRSWui:
1324 // Unscaled instructions.
1325 case AArch64::STURSi:
1326 case AArch64::STURDi:
1327 case AArch64::STURQi:
1328 case AArch64::STURWi:
1329 case AArch64::STURXi:
1330 case AArch64::LDURSi:
1331 case AArch64::LDURDi:
1332 case AArch64::LDURQi:
1333 case AArch64::LDURWi:
1334 case AArch64::LDURXi:
1335 case AArch64::LDURSWi: {
1336 if (tryToMergeLdStInst(MBBI)) {
1343 // FIXME: Do the other instructions.
1347 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1349 MachineInstr *MI = MBBI;
1350 // Do update merging. It's simpler to keep this separate from the above
1351 // switch, though not strictly necessary.
1352 unsigned Opc = MI->getOpcode();
1355 // Just move on to the next instruction.
1358 // Scaled instructions.
1359 case AArch64::STRSui:
1360 case AArch64::STRDui:
1361 case AArch64::STRQui:
1362 case AArch64::STRXui:
1363 case AArch64::STRWui:
1364 case AArch64::STRHHui:
1365 case AArch64::STRBBui:
1366 case AArch64::LDRSui:
1367 case AArch64::LDRDui:
1368 case AArch64::LDRQui:
1369 case AArch64::LDRXui:
1370 case AArch64::LDRWui:
1371 case AArch64::LDRHHui:
1372 case AArch64::LDRBBui:
1373 // Unscaled instructions.
1374 case AArch64::STURSi:
1375 case AArch64::STURDi:
1376 case AArch64::STURQi:
1377 case AArch64::STURWi:
1378 case AArch64::STURXi:
1379 case AArch64::LDURSi:
1380 case AArch64::LDURDi:
1381 case AArch64::LDURQi:
1382 case AArch64::LDURWi:
1383 case AArch64::LDURXi:
1384 // Paired instructions.
1385 case AArch64::LDPSi:
1386 case AArch64::LDPSWi:
1387 case AArch64::LDPDi:
1388 case AArch64::LDPQi:
1389 case AArch64::LDPWi:
1390 case AArch64::LDPXi:
1391 case AArch64::STPSi:
1392 case AArch64::STPDi:
1393 case AArch64::STPQi:
1394 case AArch64::STPWi:
1395 case AArch64::STPXi: {
1396 // Make sure this is a reg+imm (as opposed to an address reloc).
1397 if (!getLdStOffsetOp(MI).isImm()) {
1401 // Look forward to try to form a post-index instruction. For example,
1403 // add x20, x20, #32
1405 // ldr x0, [x20], #32
1406 MachineBasicBlock::iterator Update =
1407 findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
1409 // Merge the update into the ld/st.
1410 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1415 // Don't know how to handle pre/post-index versions, so move to the next
1417 if (isUnscaledLdSt(Opc)) {
1422 // Look back to try to find a pre-index instruction. For example,
1426 // ldr x1, [x0, #8]!
1427 Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
1429 // Merge the update into the ld/st.
1430 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1435 // The immediate in the load/store is scaled by the size of the memory
1436 // operation. The immediate in the add we're looking for,
1437 // however, is not, so adjust here.
1438 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
1440 // Look forward to try to find a post-index instruction. For example,
1441 // ldr x1, [x0, #64]
1444 // ldr x1, [x0, #64]!
1445 Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, UnscaledOffset);
1447 // Merge the update into the ld/st.
1448 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1454 // Nothing found. Just move to the next instruction.
1458 // FIXME: Do the other instructions.
1465 bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
1466 const AArch64Subtarget *SubTarget =
1467 &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1468 bool ProfitableArch = SubTarget->isCortexA57();
1469 // FIXME: The benefit from converting narrow loads into a wider load could be
1470 // microarchitectural as it assumes that a single load with two bitfield
1471 // extracts is cheaper than two narrow loads. Currently, this conversion is
1472 // enabled only in cortex-a57 on which performance benefits were verified.
1473 return ProfitableArch & (!SubTarget->requiresStrictAlign());
1476 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1477 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1478 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1479 TRI = Subtarget->getRegisterInfo();
1481 bool Modified = false;
1482 bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
1483 for (auto &MBB : Fn)
1484 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
1489 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1490 // loads and stores near one another?
1492 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1493 /// load / store optimization pass.
1494 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1495 return new AArch64LoadStoreOpt();