1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "AArch64MachineCombinerPattern.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
29 #define GET_INSTRINFO_CTOR_DTOR
30 #include "AArch64GenInstrInfo.inc"
32 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
34 RI(this, &STI), Subtarget(STI) {}
36 /// GetInstSize - Return the number of bytes of code the specified
37 /// instruction may be. This returns the maximum number of bytes.
38 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
39 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
43 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
46 const MCInstrDesc &Desc = MI->getDesc();
47 switch (Desc.getOpcode()) {
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
61 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
66 llvm_unreachable("Unknown branch instruction?");
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
93 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.end();
100 if (I == MBB.begin())
103 while (I->isDebugValue()) {
104 if (I == MBB.begin())
108 if (!isUnpredicatedTerminator(I))
111 // Get the last instruction in the block.
112 MachineInstr *LastInst = I;
114 // If there is only one terminator instruction, process it.
115 unsigned LastOpc = LastInst->getOpcode();
116 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
117 if (isUncondBranchOpcode(LastOpc)) {
118 TBB = LastInst->getOperand(0).getMBB();
121 if (isCondBranchOpcode(LastOpc)) {
122 // Block ends with fall-through condbranch.
123 parseCondBranch(LastInst, TBB, Cond);
126 return true; // Can't handle indirect branch.
129 // Get the instruction before it if it is a terminator.
130 MachineInstr *SecondLastInst = I;
131 unsigned SecondLastOpc = SecondLastInst->getOpcode();
133 // If AllowModify is true and the block ends with two or more unconditional
134 // branches, delete all but the first unconditional branch.
135 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
136 while (isUncondBranchOpcode(SecondLastOpc)) {
137 LastInst->eraseFromParent();
138 LastInst = SecondLastInst;
139 LastOpc = LastInst->getOpcode();
140 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
141 // Return now the only terminator is an unconditional branch.
142 TBB = LastInst->getOperand(0).getMBB();
146 SecondLastOpc = SecondLastInst->getOpcode();
151 // If there are three terminators, we don't know what sort of block this is.
152 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
155 // If the block ends with a B and a Bcc, handle it.
156 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
157 parseCondBranch(SecondLastInst, TBB, Cond);
158 FBB = LastInst->getOperand(0).getMBB();
162 // If the block ends with two unconditional branches, handle it. The second
163 // one is not executed, so remove it.
164 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
165 TBB = SecondLastInst->getOperand(0).getMBB();
168 I->eraseFromParent();
172 // ...likewise if it ends with an indirect branch followed by an unconditional
174 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
177 I->eraseFromParent();
181 // Otherwise, can't handle this.
185 bool AArch64InstrInfo::ReverseBranchCondition(
186 SmallVectorImpl<MachineOperand> &Cond) const {
187 if (Cond[0].getImm() != -1) {
189 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
190 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
192 // Folded compare-and-branch
193 switch (Cond[1].getImm()) {
195 llvm_unreachable("Unknown conditional branch!");
197 Cond[1].setImm(AArch64::CBNZW);
200 Cond[1].setImm(AArch64::CBZW);
203 Cond[1].setImm(AArch64::CBNZX);
206 Cond[1].setImm(AArch64::CBZX);
209 Cond[1].setImm(AArch64::TBNZW);
212 Cond[1].setImm(AArch64::TBZW);
215 Cond[1].setImm(AArch64::TBNZX);
218 Cond[1].setImm(AArch64::TBZX);
226 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
227 MachineBasicBlock::iterator I = MBB.end();
228 if (I == MBB.begin())
231 while (I->isDebugValue()) {
232 if (I == MBB.begin())
236 if (!isUncondBranchOpcode(I->getOpcode()) &&
237 !isCondBranchOpcode(I->getOpcode()))
240 // Remove the branch.
241 I->eraseFromParent();
245 if (I == MBB.begin())
248 if (!isCondBranchOpcode(I->getOpcode()))
251 // Remove the branch.
252 I->eraseFromParent();
256 void AArch64InstrInfo::instantiateCondBranch(
257 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
258 const SmallVectorImpl<MachineOperand> &Cond) const {
259 if (Cond[0].getImm() != -1) {
261 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
263 // Folded compare-and-branch
264 const MachineInstrBuilder MIB =
265 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
267 MIB.addImm(Cond[3].getImm());
272 unsigned AArch64InstrInfo::InsertBranch(
273 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
274 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
275 // Shouldn't be a fall through.
276 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
279 if (Cond.empty()) // Unconditional branch?
280 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
282 instantiateCondBranch(MBB, DL, TBB, Cond);
286 // Two-way conditional branch.
287 instantiateCondBranch(MBB, DL, TBB, Cond);
288 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
292 // Find the original register that VReg is copied from.
293 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
294 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
295 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
296 if (!DefMI->isFullCopy())
298 VReg = DefMI->getOperand(1).getReg();
303 // Determine if VReg is defined by an instruction that can be folded into a
304 // csel instruction. If so, return the folded opcode, and the replacement
306 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
307 unsigned *NewVReg = nullptr) {
308 VReg = removeCopies(MRI, VReg);
309 if (!TargetRegisterInfo::isVirtualRegister(VReg))
312 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
313 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
315 unsigned SrcOpNum = 0;
316 switch (DefMI->getOpcode()) {
317 case AArch64::ADDSXri:
318 case AArch64::ADDSWri:
319 // if NZCV is used, do not fold.
320 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
322 // fall-through to ADDXri and ADDWri.
323 case AArch64::ADDXri:
324 case AArch64::ADDWri:
325 // add x, 1 -> csinc.
326 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
327 DefMI->getOperand(3).getImm() != 0)
330 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
333 case AArch64::ORNXrr:
334 case AArch64::ORNWrr: {
335 // not x -> csinv, represented as orn dst, xzr, src.
336 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
337 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
340 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
344 case AArch64::SUBSXrr:
345 case AArch64::SUBSWrr:
346 // if NZCV is used, do not fold.
347 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
349 // fall-through to SUBXrr and SUBWrr.
350 case AArch64::SUBXrr:
351 case AArch64::SUBWrr: {
352 // neg x -> csneg, represented as sub dst, xzr, src.
353 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
354 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
357 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
363 assert(Opc && SrcOpNum && "Missing parameters");
366 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
370 bool AArch64InstrInfo::canInsertSelect(
371 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
372 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
373 int &FalseCycles) const {
374 // Check register classes.
375 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
376 const TargetRegisterClass *RC =
377 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
381 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
382 unsigned ExtraCondLat = Cond.size() != 1;
384 // GPRs are handled by csel.
385 // FIXME: Fold in x+1, -x, and ~x when applicable.
386 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
387 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
388 // Single-cycle csel, csinc, csinv, and csneg.
389 CondCycles = 1 + ExtraCondLat;
390 TrueCycles = FalseCycles = 1;
391 if (canFoldIntoCSel(MRI, TrueReg))
393 else if (canFoldIntoCSel(MRI, FalseReg))
398 // Scalar floating point is handled by fcsel.
399 // FIXME: Form fabs, fmin, and fmax when applicable.
400 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
401 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
402 CondCycles = 5 + ExtraCondLat;
403 TrueCycles = FalseCycles = 2;
411 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
412 MachineBasicBlock::iterator I, DebugLoc DL,
414 const SmallVectorImpl<MachineOperand> &Cond,
415 unsigned TrueReg, unsigned FalseReg) const {
416 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
418 // Parse the condition code, see parseCondBranch() above.
419 AArch64CC::CondCode CC;
420 switch (Cond.size()) {
422 llvm_unreachable("Unknown condition opcode in Cond");
424 CC = AArch64CC::CondCode(Cond[0].getImm());
426 case 3: { // cbz/cbnz
427 // We must insert a compare against 0.
429 switch (Cond[1].getImm()) {
431 llvm_unreachable("Unknown branch opcode in Cond");
449 unsigned SrcReg = Cond[2].getReg();
451 // cmp reg, #0 is actually subs xzr, reg, #0.
452 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
453 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
458 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
459 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
466 case 4: { // tbz/tbnz
467 // We must insert a tst instruction.
468 switch (Cond[1].getImm()) {
470 llvm_unreachable("Unknown branch opcode in Cond");
480 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
481 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
482 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
483 .addReg(Cond[2].getReg())
485 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
487 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
488 .addReg(Cond[2].getReg())
490 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
496 const TargetRegisterClass *RC = nullptr;
497 bool TryFold = false;
498 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
499 RC = &AArch64::GPR64RegClass;
500 Opc = AArch64::CSELXr;
502 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
503 RC = &AArch64::GPR32RegClass;
504 Opc = AArch64::CSELWr;
506 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
507 RC = &AArch64::FPR64RegClass;
508 Opc = AArch64::FCSELDrrr;
509 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
510 RC = &AArch64::FPR32RegClass;
511 Opc = AArch64::FCSELSrrr;
513 assert(RC && "Unsupported regclass");
515 // Try folding simple instructions into the csel.
517 unsigned NewVReg = 0;
518 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
520 // The folded opcodes csinc, csinc and csneg apply the operation to
521 // FalseReg, so we need to invert the condition.
522 CC = AArch64CC::getInvertedCondCode(CC);
525 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
527 // Fold the operation. Leave any dead instructions for DCE to clean up.
531 // The extends the live range of NewVReg.
532 MRI.clearKillFlags(NewVReg);
536 // Pull all virtual register into the appropriate class.
537 MRI.constrainRegClass(TrueReg, RC);
538 MRI.constrainRegClass(FalseReg, RC);
541 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
545 // FIXME: this implementation should be micro-architecture dependent, so a
546 // micro-architecture target hook should be introduced here in future.
547 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
548 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
549 return MI->isAsCheapAsAMove();
551 switch (MI->getOpcode()) {
555 // add/sub on register without shift
556 case AArch64::ADDWri:
557 case AArch64::ADDXri:
558 case AArch64::SUBWri:
559 case AArch64::SUBXri:
560 return (MI->getOperand(3).getImm() == 0);
562 // logical ops on immediate
563 case AArch64::ANDWri:
564 case AArch64::ANDXri:
565 case AArch64::EORWri:
566 case AArch64::EORXri:
567 case AArch64::ORRWri:
568 case AArch64::ORRXri:
571 // logical ops on register without shift
572 case AArch64::ANDWrr:
573 case AArch64::ANDXrr:
574 case AArch64::BICWrr:
575 case AArch64::BICXrr:
576 case AArch64::EONWrr:
577 case AArch64::EONXrr:
578 case AArch64::EORWrr:
579 case AArch64::EORXrr:
580 case AArch64::ORNWrr:
581 case AArch64::ORNXrr:
582 case AArch64::ORRWrr:
583 case AArch64::ORRXrr:
587 llvm_unreachable("Unknown opcode to check as cheap as a move!");
590 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
591 unsigned &SrcReg, unsigned &DstReg,
592 unsigned &SubIdx) const {
593 switch (MI.getOpcode()) {
596 case AArch64::SBFMXri: // aka sxtw
597 case AArch64::UBFMXri: // aka uxtw
598 // Check for the 32 -> 64 bit extension case, these instructions can do
600 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
602 // This is a signed or unsigned 32 -> 64 bit extension.
603 SrcReg = MI.getOperand(1).getReg();
604 DstReg = MI.getOperand(0).getReg();
605 SubIdx = AArch64::sub_32;
611 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
613 AliasAnalysis *AA) const {
614 const TargetRegisterInfo *TRI = &getRegisterInfo();
615 unsigned BaseRegA = 0, BaseRegB = 0;
616 int OffsetA = 0, OffsetB = 0;
617 int WidthA = 0, WidthB = 0;
619 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
620 "MIa must be a store or a load");
621 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
622 "MIb must be a store or a load");
624 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
625 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
628 // Retrieve the base register, offset from the base register and width. Width
629 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
630 // base registers are identical, and the offset of a lower memory access +
631 // the width doesn't overlap the offset of a higher memory access,
632 // then the memory accesses are different.
633 if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
634 getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
635 if (BaseRegA == BaseRegB) {
636 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
637 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
638 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
639 if (LowOffset + LowWidth <= HighOffset)
646 /// analyzeCompare - For a comparison instruction, return the source registers
647 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
648 /// Return true if the comparison instruction can be analyzed.
649 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
650 unsigned &SrcReg2, int &CmpMask,
651 int &CmpValue) const {
652 switch (MI->getOpcode()) {
655 case AArch64::SUBSWrr:
656 case AArch64::SUBSWrs:
657 case AArch64::SUBSWrx:
658 case AArch64::SUBSXrr:
659 case AArch64::SUBSXrs:
660 case AArch64::SUBSXrx:
661 case AArch64::ADDSWrr:
662 case AArch64::ADDSWrs:
663 case AArch64::ADDSWrx:
664 case AArch64::ADDSXrr:
665 case AArch64::ADDSXrs:
666 case AArch64::ADDSXrx:
667 // Replace SUBSWrr with SUBWrr if NZCV is not used.
668 SrcReg = MI->getOperand(1).getReg();
669 SrcReg2 = MI->getOperand(2).getReg();
673 case AArch64::SUBSWri:
674 case AArch64::ADDSWri:
675 case AArch64::SUBSXri:
676 case AArch64::ADDSXri:
677 SrcReg = MI->getOperand(1).getReg();
680 // FIXME: In order to convert CmpValue to 0 or 1
681 CmpValue = (MI->getOperand(2).getImm() != 0);
683 case AArch64::ANDSWri:
684 case AArch64::ANDSXri:
685 // ANDS does not use the same encoding scheme as the others xxxS
687 SrcReg = MI->getOperand(1).getReg();
690 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
691 // while the type of CmpValue is int. When converting uint64_t to int,
692 // the high 32 bits of uint64_t will be lost.
693 // In fact it causes a bug in spec2006-483.xalancbmk
694 // CmpValue is only used to compare with zero in OptimizeCompareInstr
695 CmpValue = (AArch64_AM::decodeLogicalImmediate(
696 MI->getOperand(2).getImm(),
697 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
704 static bool UpdateOperandRegClass(MachineInstr *Instr) {
705 MachineBasicBlock *MBB = Instr->getParent();
706 assert(MBB && "Can't get MachineBasicBlock here");
707 MachineFunction *MF = MBB->getParent();
708 assert(MF && "Can't get MachineFunction here");
709 const TargetMachine *TM = &MF->getTarget();
710 const TargetInstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
711 const TargetRegisterInfo *TRI = TM->getSubtargetImpl()->getRegisterInfo();
712 MachineRegisterInfo *MRI = &MF->getRegInfo();
714 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
716 MachineOperand &MO = Instr->getOperand(OpIdx);
717 const TargetRegisterClass *OpRegCstraints =
718 Instr->getRegClassConstraint(OpIdx, TII, TRI);
720 // If there's no constraint, there's nothing to do.
723 // If the operand is a frame index, there's nothing to do here.
724 // A frame index operand will resolve correctly during PEI.
729 "Operand has register constraints without being a register!");
731 unsigned Reg = MO.getReg();
732 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
733 if (!OpRegCstraints->contains(Reg))
735 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
736 !MRI->constrainRegClass(Reg, OpRegCstraints))
743 /// convertFlagSettingOpcode - return opcode that does not
744 /// set flags when possible. The caller is responsible to do
745 /// the actual substitution and legality checking.
746 static unsigned convertFlagSettingOpcode(MachineInstr *MI) {
748 switch (MI->getOpcode()) {
751 case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
752 case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
753 case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
754 case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
755 case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
756 case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
757 case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
758 case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
759 case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
760 case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
761 case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
762 case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
763 case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
764 case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
765 case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
766 case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
771 /// True when condition code could be modified on the instruction
772 /// trace starting at from and ending at to.
773 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
774 const bool CheckOnlyCCWrites,
775 const TargetRegisterInfo *TRI) {
776 // We iterate backward starting \p To until we hit \p From
777 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
779 // Early exit if To is at the beginning of the BB.
783 // Check whether the definition of SrcReg is in the same basic block as
784 // Compare. If not, assume the condition code gets modified on some path.
785 if (To->getParent() != From->getParent())
788 // Check that NZCV isn't set on the trace.
789 for (--I; I != E; --I) {
790 const MachineInstr &Instr = *I;
792 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
793 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
794 // This instruction modifies or uses NZCV after the one we want to
798 // We currently don't allow the instruction trace to cross basic
804 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
805 /// comparison into one that sets the zero bit in the flags register.
806 bool AArch64InstrInfo::optimizeCompareInstr(
807 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
808 int CmpValue, const MachineRegisterInfo *MRI) const {
810 // Replace SUBSWrr with SUBWrr if NZCV is not used.
811 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
812 if (Cmp_NZCV != -1) {
813 unsigned Opc = CmpInstr->getOpcode();
814 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
817 const MCInstrDesc &MCID = get(NewOpc);
818 CmpInstr->setDesc(MCID);
819 CmpInstr->RemoveOperand(Cmp_NZCV);
820 bool succeeded = UpdateOperandRegClass(CmpInstr);
822 assert(succeeded && "Some operands reg class are incompatible!");
826 // Continue only if we have a "ri" where immediate is zero.
827 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
829 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
830 if (CmpValue != 0 || SrcReg2 != 0)
833 // CmpInstr is a Compare instruction if destination register is not used.
834 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
837 // Get the unique definition of SrcReg.
838 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
842 bool CheckOnlyCCWrites = false;
843 const TargetRegisterInfo *TRI = &getRegisterInfo();
844 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
847 unsigned NewOpc = MI->getOpcode();
848 switch (MI->getOpcode()) {
851 case AArch64::ADDSWrr:
852 case AArch64::ADDSWri:
853 case AArch64::ADDSXrr:
854 case AArch64::ADDSXri:
855 case AArch64::SUBSWrr:
856 case AArch64::SUBSWri:
857 case AArch64::SUBSXrr:
858 case AArch64::SUBSXri:
860 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
861 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
862 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
863 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
864 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
865 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
866 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
867 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
868 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
869 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
870 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
871 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
872 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
873 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
876 // Scan forward for the use of NZCV.
877 // When checking against MI: if it's a conditional code requires
878 // checking of V bit, then this is not safe to do.
879 // It is safe to remove CmpInstr if NZCV is redefined or killed.
880 // If we are done with the basic block, we need to check whether NZCV is
883 for (MachineBasicBlock::iterator I = CmpInstr,
884 E = CmpInstr->getParent()->end();
885 !IsSafe && ++I != E;) {
886 const MachineInstr &Instr = *I;
887 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
889 const MachineOperand &MO = Instr.getOperand(IO);
890 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
894 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
901 // Decode the condition code.
902 unsigned Opc = Instr.getOpcode();
903 AArch64CC::CondCode CC;
908 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
910 case AArch64::CSINVWr:
911 case AArch64::CSINVXr:
912 case AArch64::CSINCWr:
913 case AArch64::CSINCXr:
914 case AArch64::CSELWr:
915 case AArch64::CSELXr:
916 case AArch64::CSNEGWr:
917 case AArch64::CSNEGXr:
918 case AArch64::FCSELSrrr:
919 case AArch64::FCSELDrrr:
920 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
924 // It is not safe to remove Compare instruction if Overflow(V) is used.
927 // NZCV can be used multiple times, we should continue.
940 // If NZCV is not killed nor re-defined, we should check whether it is
941 // live-out. If it is live-out, do not optimize.
943 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
944 for (auto *MBB : ParentBlock->successors())
945 if (MBB->isLiveIn(AArch64::NZCV))
949 // Update the instruction to set NZCV.
950 MI->setDesc(get(NewOpc));
951 CmpInstr->eraseFromParent();
952 bool succeeded = UpdateOperandRegClass(MI);
954 assert(succeeded && "Some operands reg class are incompatible!");
955 MI->addRegisterDefined(AArch64::NZCV, TRI);
960 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
961 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
964 MachineBasicBlock &MBB = *MI->getParent();
965 DebugLoc DL = MI->getDebugLoc();
966 unsigned Reg = MI->getOperand(0).getReg();
967 const GlobalValue *GV =
968 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
969 const TargetMachine &TM = MBB.getParent()->getTarget();
970 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
971 const unsigned char MO_NC = AArch64II::MO_NC;
973 if ((OpFlags & AArch64II::MO_GOT) != 0) {
974 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
975 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
976 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
977 .addReg(Reg, RegState::Kill).addImm(0)
978 .addMemOperand(*MI->memoperands_begin());
979 } else if (TM.getCodeModel() == CodeModel::Large) {
980 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
981 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
982 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
983 .addReg(Reg, RegState::Kill)
984 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
985 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
986 .addReg(Reg, RegState::Kill)
987 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
988 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
989 .addReg(Reg, RegState::Kill)
990 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
991 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
992 .addReg(Reg, RegState::Kill).addImm(0)
993 .addMemOperand(*MI->memoperands_begin());
995 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
996 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
997 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
998 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
999 .addReg(Reg, RegState::Kill)
1000 .addGlobalAddress(GV, 0, LoFlags)
1001 .addMemOperand(*MI->memoperands_begin());
1009 /// Return true if this is this instruction has a non-zero immediate
1010 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1011 switch (MI->getOpcode()) {
1014 case AArch64::ADDSWrs:
1015 case AArch64::ADDSXrs:
1016 case AArch64::ADDWrs:
1017 case AArch64::ADDXrs:
1018 case AArch64::ANDSWrs:
1019 case AArch64::ANDSXrs:
1020 case AArch64::ANDWrs:
1021 case AArch64::ANDXrs:
1022 case AArch64::BICSWrs:
1023 case AArch64::BICSXrs:
1024 case AArch64::BICWrs:
1025 case AArch64::BICXrs:
1026 case AArch64::CRC32Brr:
1027 case AArch64::CRC32CBrr:
1028 case AArch64::CRC32CHrr:
1029 case AArch64::CRC32CWrr:
1030 case AArch64::CRC32CXrr:
1031 case AArch64::CRC32Hrr:
1032 case AArch64::CRC32Wrr:
1033 case AArch64::CRC32Xrr:
1034 case AArch64::EONWrs:
1035 case AArch64::EONXrs:
1036 case AArch64::EORWrs:
1037 case AArch64::EORXrs:
1038 case AArch64::ORNWrs:
1039 case AArch64::ORNXrs:
1040 case AArch64::ORRWrs:
1041 case AArch64::ORRXrs:
1042 case AArch64::SUBSWrs:
1043 case AArch64::SUBSXrs:
1044 case AArch64::SUBWrs:
1045 case AArch64::SUBXrs:
1046 if (MI->getOperand(3).isImm()) {
1047 unsigned val = MI->getOperand(3).getImm();
1055 /// Return true if this is this instruction has a non-zero immediate
1056 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1057 switch (MI->getOpcode()) {
1060 case AArch64::ADDSWrx:
1061 case AArch64::ADDSXrx:
1062 case AArch64::ADDSXrx64:
1063 case AArch64::ADDWrx:
1064 case AArch64::ADDXrx:
1065 case AArch64::ADDXrx64:
1066 case AArch64::SUBSWrx:
1067 case AArch64::SUBSXrx:
1068 case AArch64::SUBSXrx64:
1069 case AArch64::SUBWrx:
1070 case AArch64::SUBXrx:
1071 case AArch64::SUBXrx64:
1072 if (MI->getOperand(3).isImm()) {
1073 unsigned val = MI->getOperand(3).getImm();
1082 // Return true if this instruction simply sets its single destination register
1083 // to zero. This is equivalent to a register rename of the zero-register.
1084 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1085 switch (MI->getOpcode()) {
1088 case AArch64::MOVZWi:
1089 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1090 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1091 assert(MI->getDesc().getNumOperands() == 3 &&
1092 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1096 case AArch64::ANDWri: // and Rd, Rzr, #imm
1097 return MI->getOperand(1).getReg() == AArch64::WZR;
1098 case AArch64::ANDXri:
1099 return MI->getOperand(1).getReg() == AArch64::XZR;
1100 case TargetOpcode::COPY:
1101 return MI->getOperand(1).getReg() == AArch64::WZR;
1106 // Return true if this instruction simply renames a general register without
1108 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1109 switch (MI->getOpcode()) {
1112 case TargetOpcode::COPY: {
1113 // GPR32 copies will by lowered to ORRXrs
1114 unsigned DstReg = MI->getOperand(0).getReg();
1115 return (AArch64::GPR32RegClass.contains(DstReg) ||
1116 AArch64::GPR64RegClass.contains(DstReg));
1118 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1119 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1120 assert(MI->getDesc().getNumOperands() == 4 &&
1121 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1125 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1126 if (MI->getOperand(2).getImm() == 0) {
1127 assert(MI->getDesc().getNumOperands() == 4 &&
1128 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1136 // Return true if this instruction simply renames a general register without
1138 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1139 switch (MI->getOpcode()) {
1142 case TargetOpcode::COPY: {
1143 // FPR64 copies will by lowered to ORR.16b
1144 unsigned DstReg = MI->getOperand(0).getReg();
1145 return (AArch64::FPR64RegClass.contains(DstReg) ||
1146 AArch64::FPR128RegClass.contains(DstReg));
1148 case AArch64::ORRv16i8:
1149 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1150 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1151 "invalid ORRv16i8 operands");
1159 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1160 int &FrameIndex) const {
1161 switch (MI->getOpcode()) {
1164 case AArch64::LDRWui:
1165 case AArch64::LDRXui:
1166 case AArch64::LDRBui:
1167 case AArch64::LDRHui:
1168 case AArch64::LDRSui:
1169 case AArch64::LDRDui:
1170 case AArch64::LDRQui:
1171 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1172 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1173 FrameIndex = MI->getOperand(1).getIndex();
1174 return MI->getOperand(0).getReg();
1182 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1183 int &FrameIndex) const {
1184 switch (MI->getOpcode()) {
1187 case AArch64::STRWui:
1188 case AArch64::STRXui:
1189 case AArch64::STRBui:
1190 case AArch64::STRHui:
1191 case AArch64::STRSui:
1192 case AArch64::STRDui:
1193 case AArch64::STRQui:
1194 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1195 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1196 FrameIndex = MI->getOperand(1).getIndex();
1197 return MI->getOperand(0).getReg();
1204 /// Return true if this is load/store scales or extends its register offset.
1205 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1206 /// MI should be a memory op that allows scaled addressing.
1207 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1208 switch (MI->getOpcode()) {
1211 case AArch64::LDRBBroW:
1212 case AArch64::LDRBroW:
1213 case AArch64::LDRDroW:
1214 case AArch64::LDRHHroW:
1215 case AArch64::LDRHroW:
1216 case AArch64::LDRQroW:
1217 case AArch64::LDRSBWroW:
1218 case AArch64::LDRSBXroW:
1219 case AArch64::LDRSHWroW:
1220 case AArch64::LDRSHXroW:
1221 case AArch64::LDRSWroW:
1222 case AArch64::LDRSroW:
1223 case AArch64::LDRWroW:
1224 case AArch64::LDRXroW:
1225 case AArch64::STRBBroW:
1226 case AArch64::STRBroW:
1227 case AArch64::STRDroW:
1228 case AArch64::STRHHroW:
1229 case AArch64::STRHroW:
1230 case AArch64::STRQroW:
1231 case AArch64::STRSroW:
1232 case AArch64::STRWroW:
1233 case AArch64::STRXroW:
1234 case AArch64::LDRBBroX:
1235 case AArch64::LDRBroX:
1236 case AArch64::LDRDroX:
1237 case AArch64::LDRHHroX:
1238 case AArch64::LDRHroX:
1239 case AArch64::LDRQroX:
1240 case AArch64::LDRSBWroX:
1241 case AArch64::LDRSBXroX:
1242 case AArch64::LDRSHWroX:
1243 case AArch64::LDRSHXroX:
1244 case AArch64::LDRSWroX:
1245 case AArch64::LDRSroX:
1246 case AArch64::LDRWroX:
1247 case AArch64::LDRXroX:
1248 case AArch64::STRBBroX:
1249 case AArch64::STRBroX:
1250 case AArch64::STRDroX:
1251 case AArch64::STRHHroX:
1252 case AArch64::STRHroX:
1253 case AArch64::STRQroX:
1254 case AArch64::STRSroX:
1255 case AArch64::STRWroX:
1256 case AArch64::STRXroX:
1258 unsigned Val = MI->getOperand(3).getImm();
1259 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1260 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1265 /// Check all MachineMemOperands for a hint to suppress pairing.
1266 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1267 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1268 "Too many target MO flags");
1269 for (auto *MM : MI->memoperands()) {
1270 if (MM->getFlags() &
1271 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1278 /// Set a flag on the first MachineMemOperand to suppress pairing.
1279 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1280 if (MI->memoperands_empty())
1283 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1284 "Too many target MO flags");
1285 (*MI->memoperands_begin())
1286 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1290 AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1292 const TargetRegisterInfo *TRI) const {
1293 switch (LdSt->getOpcode()) {
1296 case AArch64::STRSui:
1297 case AArch64::STRDui:
1298 case AArch64::STRQui:
1299 case AArch64::STRXui:
1300 case AArch64::STRWui:
1301 case AArch64::LDRSui:
1302 case AArch64::LDRDui:
1303 case AArch64::LDRQui:
1304 case AArch64::LDRXui:
1305 case AArch64::LDRWui:
1306 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1308 BaseReg = LdSt->getOperand(1).getReg();
1309 MachineFunction &MF = *LdSt->getParent()->getParent();
1310 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1311 Offset = LdSt->getOperand(2).getImm() * Width;
1316 bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
1317 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1318 const TargetRegisterInfo *TRI) const {
1319 // Handle only loads/stores with base register followed by immediate offset.
1320 if (LdSt->getNumOperands() != 3)
1322 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1325 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1326 // Unscaled instructions have scaling factor set to 1.
1328 switch (LdSt->getOpcode()) {
1331 case AArch64::LDURQi:
1332 case AArch64::STURQi:
1336 case AArch64::LDURXi:
1337 case AArch64::LDURDi:
1338 case AArch64::STURXi:
1339 case AArch64::STURDi:
1343 case AArch64::LDURWi:
1344 case AArch64::LDURSi:
1345 case AArch64::LDURSWi:
1346 case AArch64::STURWi:
1347 case AArch64::STURSi:
1351 case AArch64::LDURHi:
1352 case AArch64::LDURHHi:
1353 case AArch64::LDURSHXi:
1354 case AArch64::LDURSHWi:
1355 case AArch64::STURHi:
1356 case AArch64::STURHHi:
1360 case AArch64::LDURBi:
1361 case AArch64::LDURBBi:
1362 case AArch64::LDURSBXi:
1363 case AArch64::LDURSBWi:
1364 case AArch64::STURBi:
1365 case AArch64::STURBBi:
1369 case AArch64::LDRXui:
1370 case AArch64::STRXui:
1373 case AArch64::LDRWui:
1374 case AArch64::STRWui:
1377 case AArch64::LDRBui:
1378 case AArch64::STRBui:
1381 case AArch64::LDRHui:
1382 case AArch64::STRHui:
1385 case AArch64::LDRSui:
1386 case AArch64::STRSui:
1389 case AArch64::LDRDui:
1390 case AArch64::STRDui:
1393 case AArch64::LDRQui:
1394 case AArch64::STRQui:
1397 case AArch64::LDRBBui:
1398 case AArch64::STRBBui:
1401 case AArch64::LDRHHui:
1402 case AArch64::STRHHui:
1407 BaseReg = LdSt->getOperand(1).getReg();
1408 Offset = LdSt->getOperand(2).getImm() * Scale;
1412 /// Detect opportunities for ldp/stp formation.
1414 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1415 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1416 MachineInstr *SecondLdSt,
1417 unsigned NumLoads) const {
1418 // Only cluster up to a single pair.
1421 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1423 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1424 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1425 // Allow 6 bits of positive range.
1428 // The caller should already have ordered First/SecondLdSt by offset.
1429 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1430 return Ofs1 + 1 == Ofs2;
1433 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1434 MachineInstr *Second) const {
1435 // Cyclone can fuse CMN, CMP followed by Bcc.
1437 // FIXME: B0 can also fuse:
1438 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1439 if (Second->getOpcode() != AArch64::Bcc)
1441 switch (First->getOpcode()) {
1444 case AArch64::SUBSWri:
1445 case AArch64::ADDSWri:
1446 case AArch64::ANDSWri:
1447 case AArch64::SUBSXri:
1448 case AArch64::ADDSXri:
1449 case AArch64::ANDSXri:
1454 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1455 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1456 const MDNode *Expr, DebugLoc DL) const {
1457 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1458 .addFrameIndex(FrameIx)
1466 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1467 unsigned Reg, unsigned SubIdx,
1469 const TargetRegisterInfo *TRI) {
1471 return MIB.addReg(Reg, State);
1473 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1474 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1475 return MIB.addReg(Reg, State, SubIdx);
1478 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1480 // We really want the positive remainder mod 32 here, that happens to be
1481 // easily obtainable with a mask.
1482 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1485 void AArch64InstrInfo::copyPhysRegTuple(
1486 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1487 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1488 llvm::ArrayRef<unsigned> Indices) const {
1489 assert(Subtarget.hasNEON() &&
1490 "Unexpected register copy without NEON");
1491 const TargetRegisterInfo *TRI = &getRegisterInfo();
1492 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1493 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1494 unsigned NumRegs = Indices.size();
1496 int SubReg = 0, End = NumRegs, Incr = 1;
1497 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1498 SubReg = NumRegs - 1;
1503 for (; SubReg != End; SubReg += Incr) {
1504 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1505 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1506 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1507 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1511 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1512 MachineBasicBlock::iterator I, DebugLoc DL,
1513 unsigned DestReg, unsigned SrcReg,
1514 bool KillSrc) const {
1515 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1516 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1517 const TargetRegisterInfo *TRI = &getRegisterInfo();
1519 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1520 // If either operand is WSP, expand to ADD #0.
1521 if (Subtarget.hasZeroCycleRegMove()) {
1522 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1523 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1524 &AArch64::GPR64spRegClass);
1525 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1526 &AArch64::GPR64spRegClass);
1527 // This instruction is reading and writing X registers. This may upset
1528 // the register scavenger and machine verifier, so we need to indicate
1529 // that we are reading an undefined value from SrcRegX, but a proper
1530 // value from SrcReg.
1531 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1532 .addReg(SrcRegX, RegState::Undef)
1534 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1535 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1537 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1538 .addReg(SrcReg, getKillRegState(KillSrc))
1540 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1542 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1543 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1544 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1546 if (Subtarget.hasZeroCycleRegMove()) {
1547 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1548 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1549 &AArch64::GPR64spRegClass);
1550 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1551 &AArch64::GPR64spRegClass);
1552 // This instruction is reading and writing X registers. This may upset
1553 // the register scavenger and machine verifier, so we need to indicate
1554 // that we are reading an undefined value from SrcRegX, but a proper
1555 // value from SrcReg.
1556 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1557 .addReg(AArch64::XZR)
1558 .addReg(SrcRegX, RegState::Undef)
1559 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1561 // Otherwise, expand to ORR WZR.
1562 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1563 .addReg(AArch64::WZR)
1564 .addReg(SrcReg, getKillRegState(KillSrc));
1570 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1571 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1572 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1573 // If either operand is SP, expand to ADD #0.
1574 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1575 .addReg(SrcReg, getKillRegState(KillSrc))
1577 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1578 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1579 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1580 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1582 // Otherwise, expand to ORR XZR.
1583 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1584 .addReg(AArch64::XZR)
1585 .addReg(SrcReg, getKillRegState(KillSrc));
1590 // Copy a DDDD register quad by copying the individual sub-registers.
1591 if (AArch64::DDDDRegClass.contains(DestReg) &&
1592 AArch64::DDDDRegClass.contains(SrcReg)) {
1593 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1594 AArch64::dsub2, AArch64::dsub3 };
1595 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1600 // Copy a DDD register triple by copying the individual sub-registers.
1601 if (AArch64::DDDRegClass.contains(DestReg) &&
1602 AArch64::DDDRegClass.contains(SrcReg)) {
1603 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1605 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1610 // Copy a DD register pair by copying the individual sub-registers.
1611 if (AArch64::DDRegClass.contains(DestReg) &&
1612 AArch64::DDRegClass.contains(SrcReg)) {
1613 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1614 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1619 // Copy a QQQQ register quad by copying the individual sub-registers.
1620 if (AArch64::QQQQRegClass.contains(DestReg) &&
1621 AArch64::QQQQRegClass.contains(SrcReg)) {
1622 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1623 AArch64::qsub2, AArch64::qsub3 };
1624 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1629 // Copy a QQQ register triple by copying the individual sub-registers.
1630 if (AArch64::QQQRegClass.contains(DestReg) &&
1631 AArch64::QQQRegClass.contains(SrcReg)) {
1632 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1634 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1639 // Copy a QQ register pair by copying the individual sub-registers.
1640 if (AArch64::QQRegClass.contains(DestReg) &&
1641 AArch64::QQRegClass.contains(SrcReg)) {
1642 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1643 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1648 if (AArch64::FPR128RegClass.contains(DestReg) &&
1649 AArch64::FPR128RegClass.contains(SrcReg)) {
1650 if(Subtarget.hasNEON()) {
1651 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1653 .addReg(SrcReg, getKillRegState(KillSrc));
1655 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1656 .addReg(AArch64::SP, RegState::Define)
1657 .addReg(SrcReg, getKillRegState(KillSrc))
1658 .addReg(AArch64::SP)
1660 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1661 .addReg(AArch64::SP, RegState::Define)
1662 .addReg(DestReg, RegState::Define)
1663 .addReg(AArch64::SP)
1669 if (AArch64::FPR64RegClass.contains(DestReg) &&
1670 AArch64::FPR64RegClass.contains(SrcReg)) {
1671 if(Subtarget.hasNEON()) {
1672 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1673 &AArch64::FPR128RegClass);
1674 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1675 &AArch64::FPR128RegClass);
1676 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1678 .addReg(SrcReg, getKillRegState(KillSrc));
1680 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1681 .addReg(SrcReg, getKillRegState(KillSrc));
1686 if (AArch64::FPR32RegClass.contains(DestReg) &&
1687 AArch64::FPR32RegClass.contains(SrcReg)) {
1688 if(Subtarget.hasNEON()) {
1689 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1690 &AArch64::FPR128RegClass);
1691 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1692 &AArch64::FPR128RegClass);
1693 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1695 .addReg(SrcReg, getKillRegState(KillSrc));
1697 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1698 .addReg(SrcReg, getKillRegState(KillSrc));
1703 if (AArch64::FPR16RegClass.contains(DestReg) &&
1704 AArch64::FPR16RegClass.contains(SrcReg)) {
1705 if(Subtarget.hasNEON()) {
1706 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1707 &AArch64::FPR128RegClass);
1708 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1709 &AArch64::FPR128RegClass);
1710 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1712 .addReg(SrcReg, getKillRegState(KillSrc));
1714 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1715 &AArch64::FPR32RegClass);
1716 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1717 &AArch64::FPR32RegClass);
1718 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1719 .addReg(SrcReg, getKillRegState(KillSrc));
1724 if (AArch64::FPR8RegClass.contains(DestReg) &&
1725 AArch64::FPR8RegClass.contains(SrcReg)) {
1726 if(Subtarget.hasNEON()) {
1727 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1728 &AArch64::FPR128RegClass);
1729 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1730 &AArch64::FPR128RegClass);
1731 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1733 .addReg(SrcReg, getKillRegState(KillSrc));
1735 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1736 &AArch64::FPR32RegClass);
1737 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1738 &AArch64::FPR32RegClass);
1739 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1740 .addReg(SrcReg, getKillRegState(KillSrc));
1745 // Copies between GPR64 and FPR64.
1746 if (AArch64::FPR64RegClass.contains(DestReg) &&
1747 AArch64::GPR64RegClass.contains(SrcReg)) {
1748 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1749 .addReg(SrcReg, getKillRegState(KillSrc));
1752 if (AArch64::GPR64RegClass.contains(DestReg) &&
1753 AArch64::FPR64RegClass.contains(SrcReg)) {
1754 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1755 .addReg(SrcReg, getKillRegState(KillSrc));
1758 // Copies between GPR32 and FPR32.
1759 if (AArch64::FPR32RegClass.contains(DestReg) &&
1760 AArch64::GPR32RegClass.contains(SrcReg)) {
1761 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1762 .addReg(SrcReg, getKillRegState(KillSrc));
1765 if (AArch64::GPR32RegClass.contains(DestReg) &&
1766 AArch64::FPR32RegClass.contains(SrcReg)) {
1767 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1768 .addReg(SrcReg, getKillRegState(KillSrc));
1772 if (DestReg == AArch64::NZCV) {
1773 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1774 BuildMI(MBB, I, DL, get(AArch64::MSR))
1775 .addImm(AArch64SysReg::NZCV)
1776 .addReg(SrcReg, getKillRegState(KillSrc))
1777 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1781 if (SrcReg == AArch64::NZCV) {
1782 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1783 BuildMI(MBB, I, DL, get(AArch64::MRS))
1785 .addImm(AArch64SysReg::NZCV)
1786 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1790 llvm_unreachable("unimplemented reg-to-reg copy");
1793 void AArch64InstrInfo::storeRegToStackSlot(
1794 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1795 bool isKill, int FI, const TargetRegisterClass *RC,
1796 const TargetRegisterInfo *TRI) const {
1798 if (MBBI != MBB.end())
1799 DL = MBBI->getDebugLoc();
1800 MachineFunction &MF = *MBB.getParent();
1801 MachineFrameInfo &MFI = *MF.getFrameInfo();
1802 unsigned Align = MFI.getObjectAlignment(FI);
1804 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1805 MachineMemOperand *MMO = MF.getMachineMemOperand(
1806 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1809 switch (RC->getSize()) {
1811 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1812 Opc = AArch64::STRBui;
1815 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1816 Opc = AArch64::STRHui;
1819 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1820 Opc = AArch64::STRWui;
1821 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1822 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1824 assert(SrcReg != AArch64::WSP);
1825 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1826 Opc = AArch64::STRSui;
1829 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1830 Opc = AArch64::STRXui;
1831 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1832 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1834 assert(SrcReg != AArch64::SP);
1835 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1836 Opc = AArch64::STRDui;
1839 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1840 Opc = AArch64::STRQui;
1841 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1842 assert(Subtarget.hasNEON() &&
1843 "Unexpected register store without NEON");
1844 Opc = AArch64::ST1Twov1d, Offset = false;
1848 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1849 assert(Subtarget.hasNEON() &&
1850 "Unexpected register store without NEON");
1851 Opc = AArch64::ST1Threev1d, Offset = false;
1855 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1856 assert(Subtarget.hasNEON() &&
1857 "Unexpected register store without NEON");
1858 Opc = AArch64::ST1Fourv1d, Offset = false;
1859 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1860 assert(Subtarget.hasNEON() &&
1861 "Unexpected register store without NEON");
1862 Opc = AArch64::ST1Twov2d, Offset = false;
1866 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1867 assert(Subtarget.hasNEON() &&
1868 "Unexpected register store without NEON");
1869 Opc = AArch64::ST1Threev2d, Offset = false;
1873 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1874 assert(Subtarget.hasNEON() &&
1875 "Unexpected register store without NEON");
1876 Opc = AArch64::ST1Fourv2d, Offset = false;
1880 assert(Opc && "Unknown register class");
1882 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1883 .addReg(SrcReg, getKillRegState(isKill))
1888 MI.addMemOperand(MMO);
1891 void AArch64InstrInfo::loadRegFromStackSlot(
1892 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1893 int FI, const TargetRegisterClass *RC,
1894 const TargetRegisterInfo *TRI) const {
1896 if (MBBI != MBB.end())
1897 DL = MBBI->getDebugLoc();
1898 MachineFunction &MF = *MBB.getParent();
1899 MachineFrameInfo &MFI = *MF.getFrameInfo();
1900 unsigned Align = MFI.getObjectAlignment(FI);
1901 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1902 MachineMemOperand *MMO = MF.getMachineMemOperand(
1903 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1907 switch (RC->getSize()) {
1909 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1910 Opc = AArch64::LDRBui;
1913 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1914 Opc = AArch64::LDRHui;
1917 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1918 Opc = AArch64::LDRWui;
1919 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1920 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1922 assert(DestReg != AArch64::WSP);
1923 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1924 Opc = AArch64::LDRSui;
1927 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1928 Opc = AArch64::LDRXui;
1929 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1930 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1932 assert(DestReg != AArch64::SP);
1933 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1934 Opc = AArch64::LDRDui;
1937 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1938 Opc = AArch64::LDRQui;
1939 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1940 assert(Subtarget.hasNEON() &&
1941 "Unexpected register load without NEON");
1942 Opc = AArch64::LD1Twov1d, Offset = false;
1946 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1947 assert(Subtarget.hasNEON() &&
1948 "Unexpected register load without NEON");
1949 Opc = AArch64::LD1Threev1d, Offset = false;
1953 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1954 assert(Subtarget.hasNEON() &&
1955 "Unexpected register load without NEON");
1956 Opc = AArch64::LD1Fourv1d, Offset = false;
1957 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1958 assert(Subtarget.hasNEON() &&
1959 "Unexpected register load without NEON");
1960 Opc = AArch64::LD1Twov2d, Offset = false;
1964 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1965 assert(Subtarget.hasNEON() &&
1966 "Unexpected register load without NEON");
1967 Opc = AArch64::LD1Threev2d, Offset = false;
1971 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1972 assert(Subtarget.hasNEON() &&
1973 "Unexpected register load without NEON");
1974 Opc = AArch64::LD1Fourv2d, Offset = false;
1978 assert(Opc && "Unknown register class");
1980 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1981 .addReg(DestReg, getDefRegState(true))
1985 MI.addMemOperand(MMO);
1988 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1989 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1990 unsigned DestReg, unsigned SrcReg, int Offset,
1991 const TargetInstrInfo *TII,
1992 MachineInstr::MIFlag Flag, bool SetNZCV) {
1993 if (DestReg == SrcReg && Offset == 0)
1996 bool isSub = Offset < 0;
2000 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2001 // scratch register. If DestReg is a virtual register, use it as the
2002 // scratch register; otherwise, create a new virtual register (to be
2003 // replaced by the scavenger at the end of PEI). That case can be optimized
2004 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2005 // register can be loaded with offset%8 and the add/sub can use an extending
2006 // instruction with LSL#3.
2007 // Currently the function handles any offsets but generates a poor sequence
2009 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2013 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2015 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2016 const unsigned MaxEncoding = 0xfff;
2017 const unsigned ShiftSize = 12;
2018 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2019 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2021 if (((unsigned)Offset) > MaxEncodableValue) {
2022 ThisVal = MaxEncodableValue;
2024 ThisVal = Offset & MaxEncodableValue;
2026 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2027 "Encoding cannot handle value that big");
2028 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2030 .addImm(ThisVal >> ShiftSize)
2031 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2039 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2042 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2047 AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
2048 const SmallVectorImpl<unsigned> &Ops,
2049 int FrameIndex) const {
2050 // This is a bit of a hack. Consider this instruction:
2052 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2054 // We explicitly chose GPR64all for the virtual register so such a copy might
2055 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2056 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2057 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2059 // To prevent that, we are going to constrain the %vreg0 register class here.
2061 // <rdar://problem/11522048>
2064 unsigned DstReg = MI->getOperand(0).getReg();
2065 unsigned SrcReg = MI->getOperand(1).getReg();
2066 if (SrcReg == AArch64::SP &&
2067 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2068 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2071 if (DstReg == AArch64::SP &&
2072 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2073 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2082 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2083 bool *OutUseUnscaledOp,
2084 unsigned *OutUnscaledOp,
2085 int *EmittableOffset) {
2087 bool IsSigned = false;
2088 // The ImmIdx should be changed case by case if it is not 2.
2089 unsigned ImmIdx = 2;
2090 unsigned UnscaledOp = 0;
2091 // Set output values in case of early exit.
2092 if (EmittableOffset)
2093 *EmittableOffset = 0;
2094 if (OutUseUnscaledOp)
2095 *OutUseUnscaledOp = false;
2098 switch (MI.getOpcode()) {
2100 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2101 // Vector spills/fills can't take an immediate offset.
2102 case AArch64::LD1Twov2d:
2103 case AArch64::LD1Threev2d:
2104 case AArch64::LD1Fourv2d:
2105 case AArch64::LD1Twov1d:
2106 case AArch64::LD1Threev1d:
2107 case AArch64::LD1Fourv1d:
2108 case AArch64::ST1Twov2d:
2109 case AArch64::ST1Threev2d:
2110 case AArch64::ST1Fourv2d:
2111 case AArch64::ST1Twov1d:
2112 case AArch64::ST1Threev1d:
2113 case AArch64::ST1Fourv1d:
2114 return AArch64FrameOffsetCannotUpdate;
2115 case AArch64::PRFMui:
2117 UnscaledOp = AArch64::PRFUMi;
2119 case AArch64::LDRXui:
2121 UnscaledOp = AArch64::LDURXi;
2123 case AArch64::LDRWui:
2125 UnscaledOp = AArch64::LDURWi;
2127 case AArch64::LDRBui:
2129 UnscaledOp = AArch64::LDURBi;
2131 case AArch64::LDRHui:
2133 UnscaledOp = AArch64::LDURHi;
2135 case AArch64::LDRSui:
2137 UnscaledOp = AArch64::LDURSi;
2139 case AArch64::LDRDui:
2141 UnscaledOp = AArch64::LDURDi;
2143 case AArch64::LDRQui:
2145 UnscaledOp = AArch64::LDURQi;
2147 case AArch64::LDRBBui:
2149 UnscaledOp = AArch64::LDURBBi;
2151 case AArch64::LDRHHui:
2153 UnscaledOp = AArch64::LDURHHi;
2155 case AArch64::LDRSBXui:
2157 UnscaledOp = AArch64::LDURSBXi;
2159 case AArch64::LDRSBWui:
2161 UnscaledOp = AArch64::LDURSBWi;
2163 case AArch64::LDRSHXui:
2165 UnscaledOp = AArch64::LDURSHXi;
2167 case AArch64::LDRSHWui:
2169 UnscaledOp = AArch64::LDURSHWi;
2171 case AArch64::LDRSWui:
2173 UnscaledOp = AArch64::LDURSWi;
2176 case AArch64::STRXui:
2178 UnscaledOp = AArch64::STURXi;
2180 case AArch64::STRWui:
2182 UnscaledOp = AArch64::STURWi;
2184 case AArch64::STRBui:
2186 UnscaledOp = AArch64::STURBi;
2188 case AArch64::STRHui:
2190 UnscaledOp = AArch64::STURHi;
2192 case AArch64::STRSui:
2194 UnscaledOp = AArch64::STURSi;
2196 case AArch64::STRDui:
2198 UnscaledOp = AArch64::STURDi;
2200 case AArch64::STRQui:
2202 UnscaledOp = AArch64::STURQi;
2204 case AArch64::STRBBui:
2206 UnscaledOp = AArch64::STURBBi;
2208 case AArch64::STRHHui:
2210 UnscaledOp = AArch64::STURHHi;
2213 case AArch64::LDPXi:
2214 case AArch64::LDPDi:
2215 case AArch64::STPXi:
2216 case AArch64::STPDi:
2220 case AArch64::LDPQi:
2221 case AArch64::STPQi:
2225 case AArch64::LDPWi:
2226 case AArch64::LDPSi:
2227 case AArch64::STPWi:
2228 case AArch64::STPSi:
2233 case AArch64::LDURXi:
2234 case AArch64::LDURWi:
2235 case AArch64::LDURBi:
2236 case AArch64::LDURHi:
2237 case AArch64::LDURSi:
2238 case AArch64::LDURDi:
2239 case AArch64::LDURQi:
2240 case AArch64::LDURHHi:
2241 case AArch64::LDURBBi:
2242 case AArch64::LDURSBXi:
2243 case AArch64::LDURSBWi:
2244 case AArch64::LDURSHXi:
2245 case AArch64::LDURSHWi:
2246 case AArch64::LDURSWi:
2247 case AArch64::STURXi:
2248 case AArch64::STURWi:
2249 case AArch64::STURBi:
2250 case AArch64::STURHi:
2251 case AArch64::STURSi:
2252 case AArch64::STURDi:
2253 case AArch64::STURQi:
2254 case AArch64::STURBBi:
2255 case AArch64::STURHHi:
2260 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2262 bool useUnscaledOp = false;
2263 // If the offset doesn't match the scale, we rewrite the instruction to
2264 // use the unscaled instruction instead. Likewise, if we have a negative
2265 // offset (and have an unscaled op to use).
2266 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2267 useUnscaledOp = true;
2269 // Use an unscaled addressing mode if the instruction has a negative offset
2270 // (or if the instruction is already using an unscaled addressing mode).
2273 // ldp/stp instructions.
2276 } else if (UnscaledOp == 0 || useUnscaledOp) {
2286 // Attempt to fold address computation.
2287 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2288 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2289 if (Offset >= MinOff && Offset <= MaxOff) {
2290 if (EmittableOffset)
2291 *EmittableOffset = Offset;
2294 int NewOff = Offset < 0 ? MinOff : MaxOff;
2295 if (EmittableOffset)
2296 *EmittableOffset = NewOff;
2297 Offset = (Offset - NewOff) * Scale;
2299 if (OutUseUnscaledOp)
2300 *OutUseUnscaledOp = useUnscaledOp;
2302 *OutUnscaledOp = UnscaledOp;
2303 return AArch64FrameOffsetCanUpdate |
2304 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2307 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2308 unsigned FrameReg, int &Offset,
2309 const AArch64InstrInfo *TII) {
2310 unsigned Opcode = MI.getOpcode();
2311 unsigned ImmIdx = FrameRegIdx + 1;
2313 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2314 Offset += MI.getOperand(ImmIdx).getImm();
2315 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2316 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2317 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2318 MI.eraseFromParent();
2324 unsigned UnscaledOp;
2326 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2327 &UnscaledOp, &NewOffset);
2328 if (Status & AArch64FrameOffsetCanUpdate) {
2329 if (Status & AArch64FrameOffsetIsLegal)
2330 // Replace the FrameIndex with FrameReg.
2331 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2333 MI.setDesc(TII->get(UnscaledOp));
2335 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2342 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2343 NopInst.setOpcode(AArch64::HINT);
2344 NopInst.addOperand(MCOperand::CreateImm(0));
2346 /// useMachineCombiner - return true when a target supports MachineCombiner
2347 bool AArch64InstrInfo::useMachineCombiner() const {
2348 // AArch64 supports the combiner
2352 // True when Opc sets flag
2353 static bool isCombineInstrSettingFlag(unsigned Opc) {
2355 case AArch64::ADDSWrr:
2356 case AArch64::ADDSWri:
2357 case AArch64::ADDSXrr:
2358 case AArch64::ADDSXri:
2359 case AArch64::SUBSWrr:
2360 case AArch64::SUBSXrr:
2361 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2362 case AArch64::SUBSWri:
2363 case AArch64::SUBSXri:
2371 // 32b Opcodes that can be combined with a MUL
2372 static bool isCombineInstrCandidate32(unsigned Opc) {
2374 case AArch64::ADDWrr:
2375 case AArch64::ADDWri:
2376 case AArch64::SUBWrr:
2377 case AArch64::ADDSWrr:
2378 case AArch64::ADDSWri:
2379 case AArch64::SUBSWrr:
2380 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2381 case AArch64::SUBWri:
2382 case AArch64::SUBSWri:
2390 // 64b Opcodes that can be combined with a MUL
2391 static bool isCombineInstrCandidate64(unsigned Opc) {
2393 case AArch64::ADDXrr:
2394 case AArch64::ADDXri:
2395 case AArch64::SUBXrr:
2396 case AArch64::ADDSXrr:
2397 case AArch64::ADDSXri:
2398 case AArch64::SUBSXrr:
2399 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2400 case AArch64::SUBXri:
2401 case AArch64::SUBSXri:
2409 // Opcodes that can be combined with a MUL
2410 static bool isCombineInstrCandidate(unsigned Opc) {
2411 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2414 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2415 unsigned MulOpc, unsigned ZeroReg) {
2416 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2417 MachineInstr *MI = nullptr;
2418 // We need a virtual register definition.
2419 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2420 MI = MRI.getUniqueVRegDef(MO.getReg());
2421 // And it needs to be in the trace (otherwise, it won't have a depth).
2422 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2425 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2426 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2427 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2429 // The third input reg must be zero.
2430 if (MI->getOperand(3).getReg() != ZeroReg)
2433 // Must only used by the user we combine with.
2434 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2440 /// hasPattern - return true when there is potentially a faster code sequence
2441 /// for an instruction chain ending in \p Root. All potential patterns are
2443 /// in the \p Pattern vector. Pattern should be sorted in priority order since
2444 /// the pattern evaluator stops checking as soon as it finds a faster sequence.
2446 bool AArch64InstrInfo::hasPattern(
2448 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
2449 unsigned Opc = Root.getOpcode();
2450 MachineBasicBlock &MBB = *Root.getParent();
2453 if (!isCombineInstrCandidate(Opc))
2455 if (isCombineInstrSettingFlag(Opc)) {
2456 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2457 // When NZCV is live bail out.
2460 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2461 // When opcode can't change bail out.
2462 // CHECKME: do we miss any cases for opcode conversion?
2471 case AArch64::ADDWrr:
2472 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2473 "ADDWrr does not have register operands");
2474 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2476 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2479 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2481 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2485 case AArch64::ADDXrr:
2486 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2488 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2491 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2493 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2497 case AArch64::SUBWrr:
2498 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2500 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2503 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2505 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2509 case AArch64::SUBXrr:
2510 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2512 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2515 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2517 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2521 case AArch64::ADDWri:
2522 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2524 Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2528 case AArch64::ADDXri:
2529 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2531 Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2535 case AArch64::SUBWri:
2536 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2538 Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2542 case AArch64::SUBXri:
2543 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2545 Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2553 /// genMadd - Generate madd instruction and combine mul and add.
2557 /// ==> MADD R,A,B,C
2558 /// \param Root is the ADD instruction
2559 /// \param [out] InsInstrs is a vector of machine instructions and will
2560 /// contain the generated madd instruction
2561 /// \param IdxMulOpd is index of operand in Root that is the result of
2562 /// the MUL. In the example above IdxMulOpd is 1.
2563 /// \param MaddOpc the opcode fo the madd instruction
2564 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2565 const TargetInstrInfo *TII, MachineInstr &Root,
2566 SmallVectorImpl<MachineInstr *> &InsInstrs,
2567 unsigned IdxMulOpd, unsigned MaddOpc,
2568 const TargetRegisterClass *RC) {
2569 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2571 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2572 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2573 unsigned ResultReg = Root.getOperand(0).getReg();
2574 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2575 bool Src0IsKill = MUL->getOperand(1).isKill();
2576 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2577 bool Src1IsKill = MUL->getOperand(2).isKill();
2578 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2579 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2581 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2582 MRI.constrainRegClass(ResultReg, RC);
2583 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2584 MRI.constrainRegClass(SrcReg0, RC);
2585 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2586 MRI.constrainRegClass(SrcReg1, RC);
2587 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2588 MRI.constrainRegClass(SrcReg2, RC);
2590 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2592 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2593 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2594 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2596 InsInstrs.push_back(MIB);
2600 /// genMaddR - Generate madd instruction and combine mul and add using
2601 /// an extra virtual register
2602 /// Example - an ADD intermediate needs to be stored in a register:
2605 /// ==> ORR V, ZR, Imm
2606 /// ==> MADD R,A,B,V
2607 /// \param Root is the ADD instruction
2608 /// \param [out] InsInstrs is a vector of machine instructions and will
2609 /// contain the generated madd instruction
2610 /// \param IdxMulOpd is index of operand in Root that is the result of
2611 /// the MUL. In the example above IdxMulOpd is 1.
2612 /// \param MaddOpc the opcode fo the madd instruction
2613 /// \param VR is a virtual register that holds the value of an ADD operand
2614 /// (V in the example above).
2615 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2616 const TargetInstrInfo *TII, MachineInstr &Root,
2617 SmallVectorImpl<MachineInstr *> &InsInstrs,
2618 unsigned IdxMulOpd, unsigned MaddOpc,
2619 unsigned VR, const TargetRegisterClass *RC) {
2620 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2622 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2623 unsigned ResultReg = Root.getOperand(0).getReg();
2624 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2625 bool Src0IsKill = MUL->getOperand(1).isKill();
2626 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2627 bool Src1IsKill = MUL->getOperand(2).isKill();
2629 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2630 MRI.constrainRegClass(ResultReg, RC);
2631 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2632 MRI.constrainRegClass(SrcReg0, RC);
2633 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2634 MRI.constrainRegClass(SrcReg1, RC);
2635 if (TargetRegisterInfo::isVirtualRegister(VR))
2636 MRI.constrainRegClass(VR, RC);
2638 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2640 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2641 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2644 InsInstrs.push_back(MIB);
2648 /// genAlternativeCodeSequence - when hasPattern() finds a pattern
2649 /// this function generates the instructions that could replace the
2650 /// original code sequence
2651 void AArch64InstrInfo::genAlternativeCodeSequence(
2652 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2653 SmallVectorImpl<MachineInstr *> &InsInstrs,
2654 SmallVectorImpl<MachineInstr *> &DelInstrs,
2655 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2656 MachineBasicBlock &MBB = *Root.getParent();
2657 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2658 MachineFunction &MF = *MBB.getParent();
2659 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2662 const TargetRegisterClass *RC;
2668 case MachineCombinerPattern::MC_MULADDW_OP1:
2669 case MachineCombinerPattern::MC_MULADDX_OP1:
2673 // --- Create(MADD);
2674 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
2675 Opc = AArch64::MADDWrrr;
2676 RC = &AArch64::GPR32RegClass;
2678 Opc = AArch64::MADDXrrr;
2679 RC = &AArch64::GPR64RegClass;
2681 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2683 case MachineCombinerPattern::MC_MULADDW_OP2:
2684 case MachineCombinerPattern::MC_MULADDX_OP2:
2688 // --- Create(MADD);
2689 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
2690 Opc = AArch64::MADDWrrr;
2691 RC = &AArch64::GPR32RegClass;
2693 Opc = AArch64::MADDXrrr;
2694 RC = &AArch64::GPR64RegClass;
2696 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2698 case MachineCombinerPattern::MC_MULADDWI_OP1:
2699 case MachineCombinerPattern::MC_MULADDXI_OP1: {
2702 // ==> ORR V, ZR, Imm
2704 // --- Create(MADD);
2705 const TargetRegisterClass *OrrRC;
2706 unsigned BitSize, OrrOpc, ZeroReg;
2707 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2708 OrrOpc = AArch64::ORRWri;
2709 OrrRC = &AArch64::GPR32spRegClass;
2711 ZeroReg = AArch64::WZR;
2712 Opc = AArch64::MADDWrrr;
2713 RC = &AArch64::GPR32RegClass;
2715 OrrOpc = AArch64::ORRXri;
2716 OrrRC = &AArch64::GPR64spRegClass;
2718 ZeroReg = AArch64::XZR;
2719 Opc = AArch64::MADDXrrr;
2720 RC = &AArch64::GPR64RegClass;
2722 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2723 uint64_t Imm = Root.getOperand(2).getImm();
2725 if (Root.getOperand(3).isImm()) {
2726 unsigned Val = Root.getOperand(3).getImm();
2729 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2731 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2732 MachineInstrBuilder MIB1 =
2733 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2736 InsInstrs.push_back(MIB1);
2737 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2738 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2742 case MachineCombinerPattern::MC_MULSUBW_OP1:
2743 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2747 // ==> MADD R,A,B,V // = -C + A*B
2748 // --- Create(MADD);
2749 const TargetRegisterClass *SubRC;
2750 unsigned SubOpc, ZeroReg;
2751 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2752 SubOpc = AArch64::SUBWrr;
2753 SubRC = &AArch64::GPR32spRegClass;
2754 ZeroReg = AArch64::WZR;
2755 Opc = AArch64::MADDWrrr;
2756 RC = &AArch64::GPR32RegClass;
2758 SubOpc = AArch64::SUBXrr;
2759 SubRC = &AArch64::GPR64spRegClass;
2760 ZeroReg = AArch64::XZR;
2761 Opc = AArch64::MADDXrrr;
2762 RC = &AArch64::GPR64RegClass;
2764 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2766 MachineInstrBuilder MIB1 =
2767 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2769 .addOperand(Root.getOperand(2));
2770 InsInstrs.push_back(MIB1);
2771 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2772 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2775 case MachineCombinerPattern::MC_MULSUBW_OP2:
2776 case MachineCombinerPattern::MC_MULSUBX_OP2:
2779 // ==> MSUB R,A,B,C (computes C - A*B)
2780 // --- Create(MSUB);
2781 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
2782 Opc = AArch64::MSUBWrrr;
2783 RC = &AArch64::GPR32RegClass;
2785 Opc = AArch64::MSUBXrrr;
2786 RC = &AArch64::GPR64RegClass;
2788 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2790 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2791 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2794 // ==> ORR V, ZR, -Imm
2795 // ==> MADD R,A,B,V // = -Imm + A*B
2796 // --- Create(MADD);
2797 const TargetRegisterClass *OrrRC;
2798 unsigned BitSize, OrrOpc, ZeroReg;
2799 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
2800 OrrOpc = AArch64::ORRWri;
2801 OrrRC = &AArch64::GPR32spRegClass;
2803 ZeroReg = AArch64::WZR;
2804 Opc = AArch64::MADDWrrr;
2805 RC = &AArch64::GPR32RegClass;
2807 OrrOpc = AArch64::ORRXri;
2808 OrrRC = &AArch64::GPR64RegClass;
2810 ZeroReg = AArch64::XZR;
2811 Opc = AArch64::MADDXrrr;
2812 RC = &AArch64::GPR64RegClass;
2814 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2815 int Imm = Root.getOperand(2).getImm();
2816 if (Root.getOperand(3).isImm()) {
2817 unsigned Val = Root.getOperand(3).getImm();
2820 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2822 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2823 MachineInstrBuilder MIB1 =
2824 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2827 InsInstrs.push_back(MIB1);
2828 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2829 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2833 } // end switch (Pattern)
2834 // Record MUL and ADD/SUB for deletion
2835 DelInstrs.push_back(MUL);
2836 DelInstrs.push_back(&Root);
2841 /// \brief Replace csincr-branch sequence by simple conditional branch
2845 /// csinc w9, wzr, wzr, <condition code>
2846 /// tbnz w9, #0, 0x44
2848 /// b.<inverted condition code>
2851 /// csinc w9, wzr, wzr, <condition code>
2852 /// tbz w9, #0, 0x44
2854 /// b.<condition code>
2856 /// \param MI Conditional Branch
2857 /// \return True when the simple conditional branch is generated
2859 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2860 bool IsNegativeBranch = false;
2861 bool IsTestAndBranch = false;
2862 unsigned TargetBBInMI = 0;
2863 unsigned CCInMI = 0;
2864 switch (MI->getOpcode()) {
2866 llvm_unreachable("Unknown branch instruction?");
2873 case AArch64::CBNZW:
2874 case AArch64::CBNZX:
2876 IsNegativeBranch = true;
2881 IsTestAndBranch = true;
2883 case AArch64::TBNZW:
2884 case AArch64::TBNZX:
2886 IsNegativeBranch = true;
2887 IsTestAndBranch = true;
2890 // So we increment a zero register and test for bits other
2891 // than bit 0? Conservatively bail out in case the verifier
2892 // missed this case.
2893 if (IsTestAndBranch && MI->getOperand(1).getImm())
2897 assert(MI->getParent() && "Incomplete machine instruciton\n");
2898 MachineBasicBlock *MBB = MI->getParent();
2899 MachineFunction *MF = MBB->getParent();
2900 MachineRegisterInfo *MRI = &MF->getRegInfo();
2901 unsigned VReg = MI->getOperand(0).getReg();
2902 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2905 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2908 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2909 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2910 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
2911 !(DefMI->getOpcode() == AArch64::CSINCXr &&
2912 DefMI->getOperand(1).getReg() == AArch64::XZR &&
2913 DefMI->getOperand(2).getReg() == AArch64::XZR))
2916 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
2919 AArch64CC::CondCode CC =
2920 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
2921 bool CheckOnlyCCWrites = true;
2922 // Convert only when the condition code is not modified between
2923 // the CSINC and the branch. The CC may be used by other
2924 // instructions in between.
2925 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
2927 MachineBasicBlock &RefToMBB = *MBB;
2928 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
2929 DebugLoc DL = MI->getDebugLoc();
2930 if (IsNegativeBranch)
2931 CC = AArch64CC::getInvertedCondCode(CC);
2932 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
2933 MI->eraseFromParent();