1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "AArch64MachineCombinerPattern.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
29 #define GET_INSTRINFO_CTOR_DTOR
30 #include "AArch64GenInstrInfo.inc"
32 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
34 RI(this, &STI), Subtarget(STI) {}
36 /// GetInstSize - Return the number of bytes of code the specified
37 /// instruction may be. This returns the maximum number of bytes.
38 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
39 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
43 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
46 const MCInstrDesc &Desc = MI->getDesc();
47 switch (Desc.getOpcode()) {
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
61 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
66 llvm_unreachable("Unknown branch instruction?");
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
93 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.end();
100 if (I == MBB.begin())
103 while (I->isDebugValue()) {
104 if (I == MBB.begin())
108 if (!isUnpredicatedTerminator(I))
111 // Get the last instruction in the block.
112 MachineInstr *LastInst = I;
114 // If there is only one terminator instruction, process it.
115 unsigned LastOpc = LastInst->getOpcode();
116 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
117 if (isUncondBranchOpcode(LastOpc)) {
118 TBB = LastInst->getOperand(0).getMBB();
121 if (isCondBranchOpcode(LastOpc)) {
122 // Block ends with fall-through condbranch.
123 parseCondBranch(LastInst, TBB, Cond);
126 return true; // Can't handle indirect branch.
129 // Get the instruction before it if it is a terminator.
130 MachineInstr *SecondLastInst = I;
131 unsigned SecondLastOpc = SecondLastInst->getOpcode();
133 // If AllowModify is true and the block ends with two or more unconditional
134 // branches, delete all but the first unconditional branch.
135 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
136 while (isUncondBranchOpcode(SecondLastOpc)) {
137 LastInst->eraseFromParent();
138 LastInst = SecondLastInst;
139 LastOpc = LastInst->getOpcode();
140 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
141 // Return now the only terminator is an unconditional branch.
142 TBB = LastInst->getOperand(0).getMBB();
146 SecondLastOpc = SecondLastInst->getOpcode();
151 // If there are three terminators, we don't know what sort of block this is.
152 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
155 // If the block ends with a B and a Bcc, handle it.
156 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
157 parseCondBranch(SecondLastInst, TBB, Cond);
158 FBB = LastInst->getOperand(0).getMBB();
162 // If the block ends with two unconditional branches, handle it. The second
163 // one is not executed, so remove it.
164 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
165 TBB = SecondLastInst->getOperand(0).getMBB();
168 I->eraseFromParent();
172 // ...likewise if it ends with an indirect branch followed by an unconditional
174 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
177 I->eraseFromParent();
181 // Otherwise, can't handle this.
185 bool AArch64InstrInfo::ReverseBranchCondition(
186 SmallVectorImpl<MachineOperand> &Cond) const {
187 if (Cond[0].getImm() != -1) {
189 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
190 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
192 // Folded compare-and-branch
193 switch (Cond[1].getImm()) {
195 llvm_unreachable("Unknown conditional branch!");
197 Cond[1].setImm(AArch64::CBNZW);
200 Cond[1].setImm(AArch64::CBZW);
203 Cond[1].setImm(AArch64::CBNZX);
206 Cond[1].setImm(AArch64::CBZX);
209 Cond[1].setImm(AArch64::TBNZW);
212 Cond[1].setImm(AArch64::TBZW);
215 Cond[1].setImm(AArch64::TBNZX);
218 Cond[1].setImm(AArch64::TBZX);
226 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
227 MachineBasicBlock::iterator I = MBB.end();
228 if (I == MBB.begin())
231 while (I->isDebugValue()) {
232 if (I == MBB.begin())
236 if (!isUncondBranchOpcode(I->getOpcode()) &&
237 !isCondBranchOpcode(I->getOpcode()))
240 // Remove the branch.
241 I->eraseFromParent();
245 if (I == MBB.begin())
248 if (!isCondBranchOpcode(I->getOpcode()))
251 // Remove the branch.
252 I->eraseFromParent();
256 void AArch64InstrInfo::instantiateCondBranch(
257 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
258 const SmallVectorImpl<MachineOperand> &Cond) const {
259 if (Cond[0].getImm() != -1) {
261 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
263 // Folded compare-and-branch
264 const MachineInstrBuilder MIB =
265 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
267 MIB.addImm(Cond[3].getImm());
272 unsigned AArch64InstrInfo::InsertBranch(
273 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
274 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
275 // Shouldn't be a fall through.
276 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
279 if (Cond.empty()) // Unconditional branch?
280 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
282 instantiateCondBranch(MBB, DL, TBB, Cond);
286 // Two-way conditional branch.
287 instantiateCondBranch(MBB, DL, TBB, Cond);
288 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
292 // Find the original register that VReg is copied from.
293 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
294 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
295 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
296 if (!DefMI->isFullCopy())
298 VReg = DefMI->getOperand(1).getReg();
303 // Determine if VReg is defined by an instruction that can be folded into a
304 // csel instruction. If so, return the folded opcode, and the replacement
306 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
307 unsigned *NewVReg = nullptr) {
308 VReg = removeCopies(MRI, VReg);
309 if (!TargetRegisterInfo::isVirtualRegister(VReg))
312 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
313 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
315 unsigned SrcOpNum = 0;
316 switch (DefMI->getOpcode()) {
317 case AArch64::ADDSXri:
318 case AArch64::ADDSWri:
319 // if NZCV is used, do not fold.
320 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
322 // fall-through to ADDXri and ADDWri.
323 case AArch64::ADDXri:
324 case AArch64::ADDWri:
325 // add x, 1 -> csinc.
326 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
327 DefMI->getOperand(3).getImm() != 0)
330 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
333 case AArch64::ORNXrr:
334 case AArch64::ORNWrr: {
335 // not x -> csinv, represented as orn dst, xzr, src.
336 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
337 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
340 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
344 case AArch64::SUBSXrr:
345 case AArch64::SUBSWrr:
346 // if NZCV is used, do not fold.
347 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
349 // fall-through to SUBXrr and SUBWrr.
350 case AArch64::SUBXrr:
351 case AArch64::SUBWrr: {
352 // neg x -> csneg, represented as sub dst, xzr, src.
353 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
354 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
357 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
363 assert(Opc && SrcOpNum && "Missing parameters");
366 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
370 bool AArch64InstrInfo::canInsertSelect(
371 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
372 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
373 int &FalseCycles) const {
374 // Check register classes.
375 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
376 const TargetRegisterClass *RC =
377 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
381 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
382 unsigned ExtraCondLat = Cond.size() != 1;
384 // GPRs are handled by csel.
385 // FIXME: Fold in x+1, -x, and ~x when applicable.
386 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
387 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
388 // Single-cycle csel, csinc, csinv, and csneg.
389 CondCycles = 1 + ExtraCondLat;
390 TrueCycles = FalseCycles = 1;
391 if (canFoldIntoCSel(MRI, TrueReg))
393 else if (canFoldIntoCSel(MRI, FalseReg))
398 // Scalar floating point is handled by fcsel.
399 // FIXME: Form fabs, fmin, and fmax when applicable.
400 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
401 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
402 CondCycles = 5 + ExtraCondLat;
403 TrueCycles = FalseCycles = 2;
411 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
412 MachineBasicBlock::iterator I, DebugLoc DL,
414 const SmallVectorImpl<MachineOperand> &Cond,
415 unsigned TrueReg, unsigned FalseReg) const {
416 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
418 // Parse the condition code, see parseCondBranch() above.
419 AArch64CC::CondCode CC;
420 switch (Cond.size()) {
422 llvm_unreachable("Unknown condition opcode in Cond");
424 CC = AArch64CC::CondCode(Cond[0].getImm());
426 case 3: { // cbz/cbnz
427 // We must insert a compare against 0.
429 switch (Cond[1].getImm()) {
431 llvm_unreachable("Unknown branch opcode in Cond");
449 unsigned SrcReg = Cond[2].getReg();
451 // cmp reg, #0 is actually subs xzr, reg, #0.
452 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
453 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
458 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
459 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
466 case 4: { // tbz/tbnz
467 // We must insert a tst instruction.
468 switch (Cond[1].getImm()) {
470 llvm_unreachable("Unknown branch opcode in Cond");
480 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
481 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
482 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
483 .addReg(Cond[2].getReg())
485 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
487 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
488 .addReg(Cond[2].getReg())
490 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
496 const TargetRegisterClass *RC = nullptr;
497 bool TryFold = false;
498 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
499 RC = &AArch64::GPR64RegClass;
500 Opc = AArch64::CSELXr;
502 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
503 RC = &AArch64::GPR32RegClass;
504 Opc = AArch64::CSELWr;
506 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
507 RC = &AArch64::FPR64RegClass;
508 Opc = AArch64::FCSELDrrr;
509 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
510 RC = &AArch64::FPR32RegClass;
511 Opc = AArch64::FCSELSrrr;
513 assert(RC && "Unsupported regclass");
515 // Try folding simple instructions into the csel.
517 unsigned NewVReg = 0;
518 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
520 // The folded opcodes csinc, csinc and csneg apply the operation to
521 // FalseReg, so we need to invert the condition.
522 CC = AArch64CC::getInvertedCondCode(CC);
525 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
527 // Fold the operation. Leave any dead instructions for DCE to clean up.
531 // The extends the live range of NewVReg.
532 MRI.clearKillFlags(NewVReg);
536 // Pull all virtual register into the appropriate class.
537 MRI.constrainRegClass(TrueReg, RC);
538 MRI.constrainRegClass(FalseReg, RC);
541 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
545 // FIXME: this implementation should be micro-architecture dependent, so a
546 // micro-architecture target hook should be introduced here in future.
547 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
548 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
549 return MI->isAsCheapAsAMove();
551 switch (MI->getOpcode()) {
555 // add/sub on register without shift
556 case AArch64::ADDWri:
557 case AArch64::ADDXri:
558 case AArch64::SUBWri:
559 case AArch64::SUBXri:
560 return (MI->getOperand(3).getImm() == 0);
562 // logical ops on immediate
563 case AArch64::ANDWri:
564 case AArch64::ANDXri:
565 case AArch64::EORWri:
566 case AArch64::EORXri:
567 case AArch64::ORRWri:
568 case AArch64::ORRXri:
571 // logical ops on register without shift
572 case AArch64::ANDWrr:
573 case AArch64::ANDXrr:
574 case AArch64::BICWrr:
575 case AArch64::BICXrr:
576 case AArch64::EONWrr:
577 case AArch64::EONXrr:
578 case AArch64::EORWrr:
579 case AArch64::EORXrr:
580 case AArch64::ORNWrr:
581 case AArch64::ORNXrr:
582 case AArch64::ORRWrr:
583 case AArch64::ORRXrr:
587 llvm_unreachable("Unknown opcode to check as cheap as a move!");
590 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
591 unsigned &SrcReg, unsigned &DstReg,
592 unsigned &SubIdx) const {
593 switch (MI.getOpcode()) {
596 case AArch64::SBFMXri: // aka sxtw
597 case AArch64::UBFMXri: // aka uxtw
598 // Check for the 32 -> 64 bit extension case, these instructions can do
600 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
602 // This is a signed or unsigned 32 -> 64 bit extension.
603 SrcReg = MI.getOperand(1).getReg();
604 DstReg = MI.getOperand(0).getReg();
605 SubIdx = AArch64::sub_32;
610 /// analyzeCompare - For a comparison instruction, return the source registers
611 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
612 /// Return true if the comparison instruction can be analyzed.
613 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
614 unsigned &SrcReg2, int &CmpMask,
615 int &CmpValue) const {
616 switch (MI->getOpcode()) {
619 case AArch64::SUBSWrr:
620 case AArch64::SUBSWrs:
621 case AArch64::SUBSWrx:
622 case AArch64::SUBSXrr:
623 case AArch64::SUBSXrs:
624 case AArch64::SUBSXrx:
625 case AArch64::ADDSWrr:
626 case AArch64::ADDSWrs:
627 case AArch64::ADDSWrx:
628 case AArch64::ADDSXrr:
629 case AArch64::ADDSXrs:
630 case AArch64::ADDSXrx:
631 // Replace SUBSWrr with SUBWrr if NZCV is not used.
632 SrcReg = MI->getOperand(1).getReg();
633 SrcReg2 = MI->getOperand(2).getReg();
637 case AArch64::SUBSWri:
638 case AArch64::ADDSWri:
639 case AArch64::SUBSXri:
640 case AArch64::ADDSXri:
641 SrcReg = MI->getOperand(1).getReg();
644 CmpValue = MI->getOperand(2).getImm();
646 case AArch64::ANDSWri:
647 case AArch64::ANDSXri:
648 // ANDS does not use the same encoding scheme as the others xxxS
650 SrcReg = MI->getOperand(1).getReg();
653 CmpValue = AArch64_AM::decodeLogicalImmediate(
654 MI->getOperand(2).getImm(),
655 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64);
662 static bool UpdateOperandRegClass(MachineInstr *Instr) {
663 MachineBasicBlock *MBB = Instr->getParent();
664 assert(MBB && "Can't get MachineBasicBlock here");
665 MachineFunction *MF = MBB->getParent();
666 assert(MF && "Can't get MachineFunction here");
667 const TargetMachine *TM = &MF->getTarget();
668 const TargetInstrInfo *TII = TM->getInstrInfo();
669 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
670 MachineRegisterInfo *MRI = &MF->getRegInfo();
672 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
674 MachineOperand &MO = Instr->getOperand(OpIdx);
675 const TargetRegisterClass *OpRegCstraints =
676 Instr->getRegClassConstraint(OpIdx, TII, TRI);
678 // If there's no constraint, there's nothing to do.
681 // If the operand is a frame index, there's nothing to do here.
682 // A frame index operand will resolve correctly during PEI.
687 "Operand has register constraints without being a register!");
689 unsigned Reg = MO.getReg();
690 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
691 if (!OpRegCstraints->contains(Reg))
693 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
694 !MRI->constrainRegClass(Reg, OpRegCstraints))
701 /// convertFlagSettingOpcode - return opcode that does not
702 /// set flags when possible. The caller is responsible to do
703 /// the actual substitution and legality checking.
704 static unsigned convertFlagSettingOpcode(MachineInstr *MI) {
706 switch (MI->getOpcode()) {
709 case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
710 case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
711 case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
712 case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
713 case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
714 case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
715 case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
716 case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
717 case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
718 case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
719 case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
720 case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
721 case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
722 case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
723 case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
724 case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
729 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
730 /// comparison into one that sets the zero bit in the flags register.
731 bool AArch64InstrInfo::optimizeCompareInstr(
732 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
733 int CmpValue, const MachineRegisterInfo *MRI) const {
735 // Replace SUBSWrr with SUBWrr if NZCV is not used.
736 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
737 if (Cmp_NZCV != -1) {
738 unsigned Opc = CmpInstr->getOpcode();
739 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
742 const MCInstrDesc &MCID = get(NewOpc);
743 CmpInstr->setDesc(MCID);
744 CmpInstr->RemoveOperand(Cmp_NZCV);
745 bool succeeded = UpdateOperandRegClass(CmpInstr);
747 assert(succeeded && "Some operands reg class are incompatible!");
751 // Continue only if we have a "ri" where immediate is zero.
752 if (CmpValue != 0 || SrcReg2 != 0)
755 // CmpInstr is a Compare instruction if destination register is not used.
756 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
759 // Get the unique definition of SrcReg.
760 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
764 // We iterate backward, starting from the instruction before CmpInstr and
765 // stop when reaching the definition of the source register or done with the
766 // basic block, to check whether NZCV is used or modified in between.
767 MachineBasicBlock::iterator I = CmpInstr, E = MI,
768 B = CmpInstr->getParent()->begin();
770 // Early exit if CmpInstr is at the beginning of the BB.
774 // Check whether the definition of SrcReg is in the same basic block as
775 // Compare. If not, we can't optimize away the Compare.
776 if (MI->getParent() != CmpInstr->getParent())
779 // Check that NZCV isn't set between the comparison instruction and the one we
781 const TargetRegisterInfo *TRI = &getRegisterInfo();
782 for (--I; I != E; --I) {
783 const MachineInstr &Instr = *I;
785 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
786 Instr.readsRegister(AArch64::NZCV, TRI))
787 // This instruction modifies or uses NZCV after the one we want to
788 // change. We can't do this transformation.
791 // The 'and' is below the comparison instruction.
795 unsigned NewOpc = MI->getOpcode();
796 switch (MI->getOpcode()) {
799 case AArch64::ADDSWrr:
800 case AArch64::ADDSWri:
801 case AArch64::ADDSXrr:
802 case AArch64::ADDSXri:
803 case AArch64::SUBSWrr:
804 case AArch64::SUBSWri:
805 case AArch64::SUBSXrr:
806 case AArch64::SUBSXri:
808 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
809 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
810 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
811 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
812 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
813 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
814 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
815 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
816 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
817 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
818 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
819 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
820 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
821 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
824 // Scan forward for the use of NZCV.
825 // When checking against MI: if it's a conditional code requires
826 // checking of V bit, then this is not safe to do.
827 // It is safe to remove CmpInstr if NZCV is redefined or killed.
828 // If we are done with the basic block, we need to check whether NZCV is
831 for (MachineBasicBlock::iterator I = CmpInstr,
832 E = CmpInstr->getParent()->end();
833 !IsSafe && ++I != E;) {
834 const MachineInstr &Instr = *I;
835 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
837 const MachineOperand &MO = Instr.getOperand(IO);
838 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
842 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
849 // Decode the condition code.
850 unsigned Opc = Instr.getOpcode();
851 AArch64CC::CondCode CC;
856 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
858 case AArch64::CSINVWr:
859 case AArch64::CSINVXr:
860 case AArch64::CSINCWr:
861 case AArch64::CSINCXr:
862 case AArch64::CSELWr:
863 case AArch64::CSELXr:
864 case AArch64::CSNEGWr:
865 case AArch64::CSNEGXr:
866 case AArch64::FCSELSrrr:
867 case AArch64::FCSELDrrr:
868 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
872 // It is not safe to remove Compare instruction if Overflow(V) is used.
875 // NZCV can be used multiple times, we should continue.
888 // If NZCV is not killed nor re-defined, we should check whether it is
889 // live-out. If it is live-out, do not optimize.
891 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
892 for (auto *MBB : ParentBlock->successors())
893 if (MBB->isLiveIn(AArch64::NZCV))
897 // Update the instruction to set NZCV.
898 MI->setDesc(get(NewOpc));
899 CmpInstr->eraseFromParent();
900 bool succeeded = UpdateOperandRegClass(MI);
902 assert(succeeded && "Some operands reg class are incompatible!");
903 MI->addRegisterDefined(AArch64::NZCV, TRI);
908 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
909 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
912 MachineBasicBlock &MBB = *MI->getParent();
913 DebugLoc DL = MI->getDebugLoc();
914 unsigned Reg = MI->getOperand(0).getReg();
915 const GlobalValue *GV =
916 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
917 const TargetMachine &TM = MBB.getParent()->getTarget();
918 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
919 const unsigned char MO_NC = AArch64II::MO_NC;
921 if ((OpFlags & AArch64II::MO_GOT) != 0) {
922 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
923 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
924 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
925 .addReg(Reg, RegState::Kill).addImm(0)
926 .addMemOperand(*MI->memoperands_begin());
927 } else if (TM.getCodeModel() == CodeModel::Large) {
928 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
929 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
930 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
931 .addReg(Reg, RegState::Kill)
932 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
933 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
934 .addReg(Reg, RegState::Kill)
935 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
936 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
937 .addReg(Reg, RegState::Kill)
938 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
939 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
940 .addReg(Reg, RegState::Kill).addImm(0)
941 .addMemOperand(*MI->memoperands_begin());
943 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
944 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
945 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
946 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
947 .addReg(Reg, RegState::Kill)
948 .addGlobalAddress(GV, 0, LoFlags)
949 .addMemOperand(*MI->memoperands_begin());
957 /// Return true if this is this instruction has a non-zero immediate
958 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
959 switch (MI->getOpcode()) {
962 case AArch64::ADDSWrs:
963 case AArch64::ADDSXrs:
964 case AArch64::ADDWrs:
965 case AArch64::ADDXrs:
966 case AArch64::ANDSWrs:
967 case AArch64::ANDSXrs:
968 case AArch64::ANDWrs:
969 case AArch64::ANDXrs:
970 case AArch64::BICSWrs:
971 case AArch64::BICSXrs:
972 case AArch64::BICWrs:
973 case AArch64::BICXrs:
974 case AArch64::CRC32Brr:
975 case AArch64::CRC32CBrr:
976 case AArch64::CRC32CHrr:
977 case AArch64::CRC32CWrr:
978 case AArch64::CRC32CXrr:
979 case AArch64::CRC32Hrr:
980 case AArch64::CRC32Wrr:
981 case AArch64::CRC32Xrr:
982 case AArch64::EONWrs:
983 case AArch64::EONXrs:
984 case AArch64::EORWrs:
985 case AArch64::EORXrs:
986 case AArch64::ORNWrs:
987 case AArch64::ORNXrs:
988 case AArch64::ORRWrs:
989 case AArch64::ORRXrs:
990 case AArch64::SUBSWrs:
991 case AArch64::SUBSXrs:
992 case AArch64::SUBWrs:
993 case AArch64::SUBXrs:
994 if (MI->getOperand(3).isImm()) {
995 unsigned val = MI->getOperand(3).getImm();
1003 /// Return true if this is this instruction has a non-zero immediate
1004 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1005 switch (MI->getOpcode()) {
1008 case AArch64::ADDSWrx:
1009 case AArch64::ADDSXrx:
1010 case AArch64::ADDSXrx64:
1011 case AArch64::ADDWrx:
1012 case AArch64::ADDXrx:
1013 case AArch64::ADDXrx64:
1014 case AArch64::SUBSWrx:
1015 case AArch64::SUBSXrx:
1016 case AArch64::SUBSXrx64:
1017 case AArch64::SUBWrx:
1018 case AArch64::SUBXrx:
1019 case AArch64::SUBXrx64:
1020 if (MI->getOperand(3).isImm()) {
1021 unsigned val = MI->getOperand(3).getImm();
1030 // Return true if this instruction simply sets its single destination register
1031 // to zero. This is equivalent to a register rename of the zero-register.
1032 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1033 switch (MI->getOpcode()) {
1036 case AArch64::MOVZWi:
1037 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1038 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1039 assert(MI->getDesc().getNumOperands() == 3 &&
1040 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1044 case AArch64::ANDWri: // and Rd, Rzr, #imm
1045 return MI->getOperand(1).getReg() == AArch64::WZR;
1046 case AArch64::ANDXri:
1047 return MI->getOperand(1).getReg() == AArch64::XZR;
1048 case TargetOpcode::COPY:
1049 return MI->getOperand(1).getReg() == AArch64::WZR;
1054 // Return true if this instruction simply renames a general register without
1056 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1057 switch (MI->getOpcode()) {
1060 case TargetOpcode::COPY: {
1061 // GPR32 copies will by lowered to ORRXrs
1062 unsigned DstReg = MI->getOperand(0).getReg();
1063 return (AArch64::GPR32RegClass.contains(DstReg) ||
1064 AArch64::GPR64RegClass.contains(DstReg));
1066 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1067 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1068 assert(MI->getDesc().getNumOperands() == 4 &&
1069 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1073 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1074 if (MI->getOperand(2).getImm() == 0) {
1075 assert(MI->getDesc().getNumOperands() == 4 &&
1076 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1084 // Return true if this instruction simply renames a general register without
1086 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1087 switch (MI->getOpcode()) {
1090 case TargetOpcode::COPY: {
1091 // FPR64 copies will by lowered to ORR.16b
1092 unsigned DstReg = MI->getOperand(0).getReg();
1093 return (AArch64::FPR64RegClass.contains(DstReg) ||
1094 AArch64::FPR128RegClass.contains(DstReg));
1096 case AArch64::ORRv16i8:
1097 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1098 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1099 "invalid ORRv16i8 operands");
1107 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1108 int &FrameIndex) const {
1109 switch (MI->getOpcode()) {
1112 case AArch64::LDRWui:
1113 case AArch64::LDRXui:
1114 case AArch64::LDRBui:
1115 case AArch64::LDRHui:
1116 case AArch64::LDRSui:
1117 case AArch64::LDRDui:
1118 case AArch64::LDRQui:
1119 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1120 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1121 FrameIndex = MI->getOperand(1).getIndex();
1122 return MI->getOperand(0).getReg();
1130 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1131 int &FrameIndex) const {
1132 switch (MI->getOpcode()) {
1135 case AArch64::STRWui:
1136 case AArch64::STRXui:
1137 case AArch64::STRBui:
1138 case AArch64::STRHui:
1139 case AArch64::STRSui:
1140 case AArch64::STRDui:
1141 case AArch64::STRQui:
1142 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1143 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1144 FrameIndex = MI->getOperand(1).getIndex();
1145 return MI->getOperand(0).getReg();
1152 /// Return true if this is load/store scales or extends its register offset.
1153 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1154 /// MI should be a memory op that allows scaled addressing.
1155 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1156 switch (MI->getOpcode()) {
1159 case AArch64::LDRBBroW:
1160 case AArch64::LDRBroW:
1161 case AArch64::LDRDroW:
1162 case AArch64::LDRHHroW:
1163 case AArch64::LDRHroW:
1164 case AArch64::LDRQroW:
1165 case AArch64::LDRSBWroW:
1166 case AArch64::LDRSBXroW:
1167 case AArch64::LDRSHWroW:
1168 case AArch64::LDRSHXroW:
1169 case AArch64::LDRSWroW:
1170 case AArch64::LDRSroW:
1171 case AArch64::LDRWroW:
1172 case AArch64::LDRXroW:
1173 case AArch64::STRBBroW:
1174 case AArch64::STRBroW:
1175 case AArch64::STRDroW:
1176 case AArch64::STRHHroW:
1177 case AArch64::STRHroW:
1178 case AArch64::STRQroW:
1179 case AArch64::STRSroW:
1180 case AArch64::STRWroW:
1181 case AArch64::STRXroW:
1182 case AArch64::LDRBBroX:
1183 case AArch64::LDRBroX:
1184 case AArch64::LDRDroX:
1185 case AArch64::LDRHHroX:
1186 case AArch64::LDRHroX:
1187 case AArch64::LDRQroX:
1188 case AArch64::LDRSBWroX:
1189 case AArch64::LDRSBXroX:
1190 case AArch64::LDRSHWroX:
1191 case AArch64::LDRSHXroX:
1192 case AArch64::LDRSWroX:
1193 case AArch64::LDRSroX:
1194 case AArch64::LDRWroX:
1195 case AArch64::LDRXroX:
1196 case AArch64::STRBBroX:
1197 case AArch64::STRBroX:
1198 case AArch64::STRDroX:
1199 case AArch64::STRHHroX:
1200 case AArch64::STRHroX:
1201 case AArch64::STRQroX:
1202 case AArch64::STRSroX:
1203 case AArch64::STRWroX:
1204 case AArch64::STRXroX:
1206 unsigned Val = MI->getOperand(3).getImm();
1207 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1208 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1213 /// Check all MachineMemOperands for a hint to suppress pairing.
1214 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1215 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1216 "Too many target MO flags");
1217 for (auto *MM : MI->memoperands()) {
1218 if (MM->getFlags() &
1219 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1226 /// Set a flag on the first MachineMemOperand to suppress pairing.
1227 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1228 if (MI->memoperands_empty())
1231 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1232 "Too many target MO flags");
1233 (*MI->memoperands_begin())
1234 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1238 AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1240 const TargetRegisterInfo *TRI) const {
1241 switch (LdSt->getOpcode()) {
1244 case AArch64::STRSui:
1245 case AArch64::STRDui:
1246 case AArch64::STRQui:
1247 case AArch64::STRXui:
1248 case AArch64::STRWui:
1249 case AArch64::LDRSui:
1250 case AArch64::LDRDui:
1251 case AArch64::LDRQui:
1252 case AArch64::LDRXui:
1253 case AArch64::LDRWui:
1254 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1256 BaseReg = LdSt->getOperand(1).getReg();
1257 MachineFunction &MF = *LdSt->getParent()->getParent();
1258 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1259 Offset = LdSt->getOperand(2).getImm() * Width;
1264 /// Detect opportunities for ldp/stp formation.
1266 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1267 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1268 MachineInstr *SecondLdSt,
1269 unsigned NumLoads) const {
1270 // Only cluster up to a single pair.
1273 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1275 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1276 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1277 // Allow 6 bits of positive range.
1280 // The caller should already have ordered First/SecondLdSt by offset.
1281 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1282 return Ofs1 + 1 == Ofs2;
1285 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1286 MachineInstr *Second) const {
1287 // Cyclone can fuse CMN, CMP followed by Bcc.
1289 // FIXME: B0 can also fuse:
1290 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1291 if (Second->getOpcode() != AArch64::Bcc)
1293 switch (First->getOpcode()) {
1296 case AArch64::SUBSWri:
1297 case AArch64::ADDSWri:
1298 case AArch64::ANDSWri:
1299 case AArch64::SUBSXri:
1300 case AArch64::ADDSXri:
1301 case AArch64::ANDSXri:
1306 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1309 const MDNode *MDPtr,
1310 DebugLoc DL) const {
1311 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1312 .addFrameIndex(FrameIx)
1315 .addMetadata(MDPtr);
1319 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1320 unsigned Reg, unsigned SubIdx,
1322 const TargetRegisterInfo *TRI) {
1324 return MIB.addReg(Reg, State);
1326 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1327 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1328 return MIB.addReg(Reg, State, SubIdx);
1331 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1333 // We really want the positive remainder mod 32 here, that happens to be
1334 // easily obtainable with a mask.
1335 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1338 void AArch64InstrInfo::copyPhysRegTuple(
1339 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1340 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1341 llvm::ArrayRef<unsigned> Indices) const {
1342 assert(Subtarget.hasNEON() &&
1343 "Unexpected register copy without NEON");
1344 const TargetRegisterInfo *TRI = &getRegisterInfo();
1345 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1346 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1347 unsigned NumRegs = Indices.size();
1349 int SubReg = 0, End = NumRegs, Incr = 1;
1350 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1351 SubReg = NumRegs - 1;
1356 for (; SubReg != End; SubReg += Incr) {
1357 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1358 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1359 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1360 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1364 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1365 MachineBasicBlock::iterator I, DebugLoc DL,
1366 unsigned DestReg, unsigned SrcReg,
1367 bool KillSrc) const {
1368 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1369 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1370 const TargetRegisterInfo *TRI = &getRegisterInfo();
1372 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1373 // If either operand is WSP, expand to ADD #0.
1374 if (Subtarget.hasZeroCycleRegMove()) {
1375 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1376 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1377 &AArch64::GPR64spRegClass);
1378 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1379 &AArch64::GPR64spRegClass);
1380 // This instruction is reading and writing X registers. This may upset
1381 // the register scavenger and machine verifier, so we need to indicate
1382 // that we are reading an undefined value from SrcRegX, but a proper
1383 // value from SrcReg.
1384 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1385 .addReg(SrcRegX, RegState::Undef)
1387 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1388 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1390 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1391 .addReg(SrcReg, getKillRegState(KillSrc))
1393 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1395 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1396 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1397 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1399 if (Subtarget.hasZeroCycleRegMove()) {
1400 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1401 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1402 &AArch64::GPR64spRegClass);
1403 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1404 &AArch64::GPR64spRegClass);
1405 // This instruction is reading and writing X registers. This may upset
1406 // the register scavenger and machine verifier, so we need to indicate
1407 // that we are reading an undefined value from SrcRegX, but a proper
1408 // value from SrcReg.
1409 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1410 .addReg(AArch64::XZR)
1411 .addReg(SrcRegX, RegState::Undef)
1412 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1414 // Otherwise, expand to ORR WZR.
1415 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1416 .addReg(AArch64::WZR)
1417 .addReg(SrcReg, getKillRegState(KillSrc));
1423 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1424 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1425 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1426 // If either operand is SP, expand to ADD #0.
1427 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1428 .addReg(SrcReg, getKillRegState(KillSrc))
1430 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1431 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1432 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1433 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1435 // Otherwise, expand to ORR XZR.
1436 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1437 .addReg(AArch64::XZR)
1438 .addReg(SrcReg, getKillRegState(KillSrc));
1443 // Copy a DDDD register quad by copying the individual sub-registers.
1444 if (AArch64::DDDDRegClass.contains(DestReg) &&
1445 AArch64::DDDDRegClass.contains(SrcReg)) {
1446 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1447 AArch64::dsub2, AArch64::dsub3 };
1448 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1453 // Copy a DDD register triple by copying the individual sub-registers.
1454 if (AArch64::DDDRegClass.contains(DestReg) &&
1455 AArch64::DDDRegClass.contains(SrcReg)) {
1456 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1458 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1463 // Copy a DD register pair by copying the individual sub-registers.
1464 if (AArch64::DDRegClass.contains(DestReg) &&
1465 AArch64::DDRegClass.contains(SrcReg)) {
1466 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1467 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1472 // Copy a QQQQ register quad by copying the individual sub-registers.
1473 if (AArch64::QQQQRegClass.contains(DestReg) &&
1474 AArch64::QQQQRegClass.contains(SrcReg)) {
1475 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1476 AArch64::qsub2, AArch64::qsub3 };
1477 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1482 // Copy a QQQ register triple by copying the individual sub-registers.
1483 if (AArch64::QQQRegClass.contains(DestReg) &&
1484 AArch64::QQQRegClass.contains(SrcReg)) {
1485 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1487 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1492 // Copy a QQ register pair by copying the individual sub-registers.
1493 if (AArch64::QQRegClass.contains(DestReg) &&
1494 AArch64::QQRegClass.contains(SrcReg)) {
1495 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1496 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1501 if (AArch64::FPR128RegClass.contains(DestReg) &&
1502 AArch64::FPR128RegClass.contains(SrcReg)) {
1503 if(Subtarget.hasNEON()) {
1504 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1506 .addReg(SrcReg, getKillRegState(KillSrc));
1508 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1509 .addReg(AArch64::SP, RegState::Define)
1510 .addReg(SrcReg, getKillRegState(KillSrc))
1511 .addReg(AArch64::SP)
1513 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1514 .addReg(AArch64::SP, RegState::Define)
1515 .addReg(DestReg, RegState::Define)
1516 .addReg(AArch64::SP)
1522 if (AArch64::FPR64RegClass.contains(DestReg) &&
1523 AArch64::FPR64RegClass.contains(SrcReg)) {
1524 if(Subtarget.hasNEON()) {
1525 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1526 &AArch64::FPR128RegClass);
1527 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1528 &AArch64::FPR128RegClass);
1529 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1531 .addReg(SrcReg, getKillRegState(KillSrc));
1533 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1534 .addReg(SrcReg, getKillRegState(KillSrc));
1539 if (AArch64::FPR32RegClass.contains(DestReg) &&
1540 AArch64::FPR32RegClass.contains(SrcReg)) {
1541 if(Subtarget.hasNEON()) {
1542 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1543 &AArch64::FPR128RegClass);
1544 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1545 &AArch64::FPR128RegClass);
1546 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1548 .addReg(SrcReg, getKillRegState(KillSrc));
1550 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1551 .addReg(SrcReg, getKillRegState(KillSrc));
1556 if (AArch64::FPR16RegClass.contains(DestReg) &&
1557 AArch64::FPR16RegClass.contains(SrcReg)) {
1558 if(Subtarget.hasNEON()) {
1559 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1560 &AArch64::FPR128RegClass);
1561 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1562 &AArch64::FPR128RegClass);
1563 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1565 .addReg(SrcReg, getKillRegState(KillSrc));
1567 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1568 &AArch64::FPR32RegClass);
1569 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1570 &AArch64::FPR32RegClass);
1571 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1572 .addReg(SrcReg, getKillRegState(KillSrc));
1577 if (AArch64::FPR8RegClass.contains(DestReg) &&
1578 AArch64::FPR8RegClass.contains(SrcReg)) {
1579 if(Subtarget.hasNEON()) {
1580 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1581 &AArch64::FPR128RegClass);
1582 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1583 &AArch64::FPR128RegClass);
1584 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1586 .addReg(SrcReg, getKillRegState(KillSrc));
1588 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1589 &AArch64::FPR32RegClass);
1590 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1591 &AArch64::FPR32RegClass);
1592 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1593 .addReg(SrcReg, getKillRegState(KillSrc));
1598 // Copies between GPR64 and FPR64.
1599 if (AArch64::FPR64RegClass.contains(DestReg) &&
1600 AArch64::GPR64RegClass.contains(SrcReg)) {
1601 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1602 .addReg(SrcReg, getKillRegState(KillSrc));
1605 if (AArch64::GPR64RegClass.contains(DestReg) &&
1606 AArch64::FPR64RegClass.contains(SrcReg)) {
1607 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1608 .addReg(SrcReg, getKillRegState(KillSrc));
1611 // Copies between GPR32 and FPR32.
1612 if (AArch64::FPR32RegClass.contains(DestReg) &&
1613 AArch64::GPR32RegClass.contains(SrcReg)) {
1614 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1615 .addReg(SrcReg, getKillRegState(KillSrc));
1618 if (AArch64::GPR32RegClass.contains(DestReg) &&
1619 AArch64::FPR32RegClass.contains(SrcReg)) {
1620 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1621 .addReg(SrcReg, getKillRegState(KillSrc));
1625 if (DestReg == AArch64::NZCV) {
1626 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1627 BuildMI(MBB, I, DL, get(AArch64::MSR))
1628 .addImm(AArch64SysReg::NZCV)
1629 .addReg(SrcReg, getKillRegState(KillSrc))
1630 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1634 if (SrcReg == AArch64::NZCV) {
1635 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1636 BuildMI(MBB, I, DL, get(AArch64::MRS))
1638 .addImm(AArch64SysReg::NZCV)
1639 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1643 llvm_unreachable("unimplemented reg-to-reg copy");
1646 void AArch64InstrInfo::storeRegToStackSlot(
1647 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1648 bool isKill, int FI, const TargetRegisterClass *RC,
1649 const TargetRegisterInfo *TRI) const {
1651 if (MBBI != MBB.end())
1652 DL = MBBI->getDebugLoc();
1653 MachineFunction &MF = *MBB.getParent();
1654 MachineFrameInfo &MFI = *MF.getFrameInfo();
1655 unsigned Align = MFI.getObjectAlignment(FI);
1657 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1658 MachineMemOperand *MMO = MF.getMachineMemOperand(
1659 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1662 switch (RC->getSize()) {
1664 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1665 Opc = AArch64::STRBui;
1668 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1669 Opc = AArch64::STRHui;
1672 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1673 Opc = AArch64::STRWui;
1674 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1675 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1677 assert(SrcReg != AArch64::WSP);
1678 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1679 Opc = AArch64::STRSui;
1682 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1683 Opc = AArch64::STRXui;
1684 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1685 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1687 assert(SrcReg != AArch64::SP);
1688 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1689 Opc = AArch64::STRDui;
1692 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1693 Opc = AArch64::STRQui;
1694 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1695 assert(Subtarget.hasNEON() &&
1696 "Unexpected register store without NEON");
1697 Opc = AArch64::ST1Twov1d, Offset = false;
1701 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1702 assert(Subtarget.hasNEON() &&
1703 "Unexpected register store without NEON");
1704 Opc = AArch64::ST1Threev1d, Offset = false;
1708 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1709 assert(Subtarget.hasNEON() &&
1710 "Unexpected register store without NEON");
1711 Opc = AArch64::ST1Fourv1d, Offset = false;
1712 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1713 assert(Subtarget.hasNEON() &&
1714 "Unexpected register store without NEON");
1715 Opc = AArch64::ST1Twov2d, Offset = false;
1719 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1720 assert(Subtarget.hasNEON() &&
1721 "Unexpected register store without NEON");
1722 Opc = AArch64::ST1Threev2d, Offset = false;
1726 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1727 assert(Subtarget.hasNEON() &&
1728 "Unexpected register store without NEON");
1729 Opc = AArch64::ST1Fourv2d, Offset = false;
1733 assert(Opc && "Unknown register class");
1735 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1736 .addReg(SrcReg, getKillRegState(isKill))
1741 MI.addMemOperand(MMO);
1744 void AArch64InstrInfo::loadRegFromStackSlot(
1745 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1746 int FI, const TargetRegisterClass *RC,
1747 const TargetRegisterInfo *TRI) const {
1749 if (MBBI != MBB.end())
1750 DL = MBBI->getDebugLoc();
1751 MachineFunction &MF = *MBB.getParent();
1752 MachineFrameInfo &MFI = *MF.getFrameInfo();
1753 unsigned Align = MFI.getObjectAlignment(FI);
1754 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1755 MachineMemOperand *MMO = MF.getMachineMemOperand(
1756 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1760 switch (RC->getSize()) {
1762 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1763 Opc = AArch64::LDRBui;
1766 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1767 Opc = AArch64::LDRHui;
1770 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1771 Opc = AArch64::LDRWui;
1772 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1773 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1775 assert(DestReg != AArch64::WSP);
1776 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1777 Opc = AArch64::LDRSui;
1780 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1781 Opc = AArch64::LDRXui;
1782 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1783 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1785 assert(DestReg != AArch64::SP);
1786 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1787 Opc = AArch64::LDRDui;
1790 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1791 Opc = AArch64::LDRQui;
1792 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1793 assert(Subtarget.hasNEON() &&
1794 "Unexpected register load without NEON");
1795 Opc = AArch64::LD1Twov1d, Offset = false;
1799 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1800 assert(Subtarget.hasNEON() &&
1801 "Unexpected register load without NEON");
1802 Opc = AArch64::LD1Threev1d, Offset = false;
1806 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1807 assert(Subtarget.hasNEON() &&
1808 "Unexpected register load without NEON");
1809 Opc = AArch64::LD1Fourv1d, Offset = false;
1810 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1811 assert(Subtarget.hasNEON() &&
1812 "Unexpected register load without NEON");
1813 Opc = AArch64::LD1Twov2d, Offset = false;
1817 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1818 assert(Subtarget.hasNEON() &&
1819 "Unexpected register load without NEON");
1820 Opc = AArch64::LD1Threev2d, Offset = false;
1824 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1825 assert(Subtarget.hasNEON() &&
1826 "Unexpected register load without NEON");
1827 Opc = AArch64::LD1Fourv2d, Offset = false;
1831 assert(Opc && "Unknown register class");
1833 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1834 .addReg(DestReg, getDefRegState(true))
1838 MI.addMemOperand(MMO);
1841 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1842 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1843 unsigned DestReg, unsigned SrcReg, int Offset,
1844 const TargetInstrInfo *TII,
1845 MachineInstr::MIFlag Flag, bool SetNZCV) {
1846 if (DestReg == SrcReg && Offset == 0)
1849 bool isSub = Offset < 0;
1853 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1854 // scratch register. If DestReg is a virtual register, use it as the
1855 // scratch register; otherwise, create a new virtual register (to be
1856 // replaced by the scavenger at the end of PEI). That case can be optimized
1857 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1858 // register can be loaded with offset%8 and the add/sub can use an extending
1859 // instruction with LSL#3.
1860 // Currently the function handles any offsets but generates a poor sequence
1862 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1866 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
1868 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
1869 const unsigned MaxEncoding = 0xfff;
1870 const unsigned ShiftSize = 12;
1871 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1872 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1874 if (((unsigned)Offset) > MaxEncodableValue) {
1875 ThisVal = MaxEncodableValue;
1877 ThisVal = Offset & MaxEncodableValue;
1879 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1880 "Encoding cannot handle value that big");
1881 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1883 .addImm(ThisVal >> ShiftSize)
1884 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
1892 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1895 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1900 AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1901 const SmallVectorImpl<unsigned> &Ops,
1902 int FrameIndex) const {
1903 // This is a bit of a hack. Consider this instruction:
1905 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1907 // We explicitly chose GPR64all for the virtual register so such a copy might
1908 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1909 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1910 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1912 // To prevent that, we are going to constrain the %vreg0 register class here.
1914 // <rdar://problem/11522048>
1917 unsigned DstReg = MI->getOperand(0).getReg();
1918 unsigned SrcReg = MI->getOperand(1).getReg();
1919 if (SrcReg == AArch64::SP &&
1920 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1921 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
1924 if (DstReg == AArch64::SP &&
1925 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1926 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1935 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1936 bool *OutUseUnscaledOp,
1937 unsigned *OutUnscaledOp,
1938 int *EmittableOffset) {
1940 bool IsSigned = false;
1941 // The ImmIdx should be changed case by case if it is not 2.
1942 unsigned ImmIdx = 2;
1943 unsigned UnscaledOp = 0;
1944 // Set output values in case of early exit.
1945 if (EmittableOffset)
1946 *EmittableOffset = 0;
1947 if (OutUseUnscaledOp)
1948 *OutUseUnscaledOp = false;
1951 switch (MI.getOpcode()) {
1953 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
1954 // Vector spills/fills can't take an immediate offset.
1955 case AArch64::LD1Twov2d:
1956 case AArch64::LD1Threev2d:
1957 case AArch64::LD1Fourv2d:
1958 case AArch64::LD1Twov1d:
1959 case AArch64::LD1Threev1d:
1960 case AArch64::LD1Fourv1d:
1961 case AArch64::ST1Twov2d:
1962 case AArch64::ST1Threev2d:
1963 case AArch64::ST1Fourv2d:
1964 case AArch64::ST1Twov1d:
1965 case AArch64::ST1Threev1d:
1966 case AArch64::ST1Fourv1d:
1967 return AArch64FrameOffsetCannotUpdate;
1968 case AArch64::PRFMui:
1970 UnscaledOp = AArch64::PRFUMi;
1972 case AArch64::LDRXui:
1974 UnscaledOp = AArch64::LDURXi;
1976 case AArch64::LDRWui:
1978 UnscaledOp = AArch64::LDURWi;
1980 case AArch64::LDRBui:
1982 UnscaledOp = AArch64::LDURBi;
1984 case AArch64::LDRHui:
1986 UnscaledOp = AArch64::LDURHi;
1988 case AArch64::LDRSui:
1990 UnscaledOp = AArch64::LDURSi;
1992 case AArch64::LDRDui:
1994 UnscaledOp = AArch64::LDURDi;
1996 case AArch64::LDRQui:
1998 UnscaledOp = AArch64::LDURQi;
2000 case AArch64::LDRBBui:
2002 UnscaledOp = AArch64::LDURBBi;
2004 case AArch64::LDRHHui:
2006 UnscaledOp = AArch64::LDURHHi;
2008 case AArch64::LDRSBXui:
2010 UnscaledOp = AArch64::LDURSBXi;
2012 case AArch64::LDRSBWui:
2014 UnscaledOp = AArch64::LDURSBWi;
2016 case AArch64::LDRSHXui:
2018 UnscaledOp = AArch64::LDURSHXi;
2020 case AArch64::LDRSHWui:
2022 UnscaledOp = AArch64::LDURSHWi;
2024 case AArch64::LDRSWui:
2026 UnscaledOp = AArch64::LDURSWi;
2029 case AArch64::STRXui:
2031 UnscaledOp = AArch64::STURXi;
2033 case AArch64::STRWui:
2035 UnscaledOp = AArch64::STURWi;
2037 case AArch64::STRBui:
2039 UnscaledOp = AArch64::STURBi;
2041 case AArch64::STRHui:
2043 UnscaledOp = AArch64::STURHi;
2045 case AArch64::STRSui:
2047 UnscaledOp = AArch64::STURSi;
2049 case AArch64::STRDui:
2051 UnscaledOp = AArch64::STURDi;
2053 case AArch64::STRQui:
2055 UnscaledOp = AArch64::STURQi;
2057 case AArch64::STRBBui:
2059 UnscaledOp = AArch64::STURBBi;
2061 case AArch64::STRHHui:
2063 UnscaledOp = AArch64::STURHHi;
2066 case AArch64::LDPXi:
2067 case AArch64::LDPDi:
2068 case AArch64::STPXi:
2069 case AArch64::STPDi:
2073 case AArch64::LDPQi:
2074 case AArch64::STPQi:
2078 case AArch64::LDPWi:
2079 case AArch64::LDPSi:
2080 case AArch64::STPWi:
2081 case AArch64::STPSi:
2086 case AArch64::LDURXi:
2087 case AArch64::LDURWi:
2088 case AArch64::LDURBi:
2089 case AArch64::LDURHi:
2090 case AArch64::LDURSi:
2091 case AArch64::LDURDi:
2092 case AArch64::LDURQi:
2093 case AArch64::LDURHHi:
2094 case AArch64::LDURBBi:
2095 case AArch64::LDURSBXi:
2096 case AArch64::LDURSBWi:
2097 case AArch64::LDURSHXi:
2098 case AArch64::LDURSHWi:
2099 case AArch64::LDURSWi:
2100 case AArch64::STURXi:
2101 case AArch64::STURWi:
2102 case AArch64::STURBi:
2103 case AArch64::STURHi:
2104 case AArch64::STURSi:
2105 case AArch64::STURDi:
2106 case AArch64::STURQi:
2107 case AArch64::STURBBi:
2108 case AArch64::STURHHi:
2113 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2115 bool useUnscaledOp = false;
2116 // If the offset doesn't match the scale, we rewrite the instruction to
2117 // use the unscaled instruction instead. Likewise, if we have a negative
2118 // offset (and have an unscaled op to use).
2119 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2120 useUnscaledOp = true;
2122 // Use an unscaled addressing mode if the instruction has a negative offset
2123 // (or if the instruction is already using an unscaled addressing mode).
2126 // ldp/stp instructions.
2129 } else if (UnscaledOp == 0 || useUnscaledOp) {
2139 // Attempt to fold address computation.
2140 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2141 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2142 if (Offset >= MinOff && Offset <= MaxOff) {
2143 if (EmittableOffset)
2144 *EmittableOffset = Offset;
2147 int NewOff = Offset < 0 ? MinOff : MaxOff;
2148 if (EmittableOffset)
2149 *EmittableOffset = NewOff;
2150 Offset = (Offset - NewOff) * Scale;
2152 if (OutUseUnscaledOp)
2153 *OutUseUnscaledOp = useUnscaledOp;
2155 *OutUnscaledOp = UnscaledOp;
2156 return AArch64FrameOffsetCanUpdate |
2157 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2160 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2161 unsigned FrameReg, int &Offset,
2162 const AArch64InstrInfo *TII) {
2163 unsigned Opcode = MI.getOpcode();
2164 unsigned ImmIdx = FrameRegIdx + 1;
2166 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2167 Offset += MI.getOperand(ImmIdx).getImm();
2168 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2169 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2170 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2171 MI.eraseFromParent();
2177 unsigned UnscaledOp;
2179 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2180 &UnscaledOp, &NewOffset);
2181 if (Status & AArch64FrameOffsetCanUpdate) {
2182 if (Status & AArch64FrameOffsetIsLegal)
2183 // Replace the FrameIndex with FrameReg.
2184 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2186 MI.setDesc(TII->get(UnscaledOp));
2188 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2195 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2196 NopInst.setOpcode(AArch64::HINT);
2197 NopInst.addOperand(MCOperand::CreateImm(0));
2199 /// useMachineCombiner - return true when a target supports MachineCombiner
2200 bool AArch64InstrInfo::useMachineCombiner(void) const {
2201 // AArch64 supports the combiner
2205 // True when Opc sets flag
2206 static bool isCombineInstrSettingFlag(unsigned Opc) {
2208 case AArch64::ADDSWrr:
2209 case AArch64::ADDSWri:
2210 case AArch64::ADDSXrr:
2211 case AArch64::ADDSXri:
2212 case AArch64::SUBSWrr:
2213 case AArch64::SUBSXrr:
2214 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2215 case AArch64::SUBSWri:
2216 case AArch64::SUBSXri:
2224 // 32b Opcodes that can be combined with a MUL
2225 static bool isCombineInstrCandidate32(unsigned Opc) {
2227 case AArch64::ADDWrr:
2228 case AArch64::ADDWri:
2229 case AArch64::SUBWrr:
2230 case AArch64::ADDSWrr:
2231 case AArch64::ADDSWri:
2232 case AArch64::SUBSWrr:
2233 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2234 case AArch64::SUBWri:
2235 case AArch64::SUBSWri:
2243 // 64b Opcodes that can be combined with a MUL
2244 static bool isCombineInstrCandidate64(unsigned Opc) {
2246 case AArch64::ADDXrr:
2247 case AArch64::ADDXri:
2248 case AArch64::SUBXrr:
2249 case AArch64::ADDSXrr:
2250 case AArch64::ADDSXri:
2251 case AArch64::SUBSXrr:
2252 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2253 case AArch64::SUBXri:
2254 case AArch64::SUBSXri:
2262 // Opcodes that can be combined with a MUL
2263 static bool isCombineInstrCandidate(unsigned Opc) {
2264 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2267 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2268 unsigned MulOpc, unsigned ZeroReg) {
2269 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2270 MachineInstr *MI = nullptr;
2271 // We need a virtual register definition.
2272 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2273 MI = MRI.getUniqueVRegDef(MO.getReg());
2274 // And it needs to be in the trace (otherwise, it won't have a depth).
2275 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2278 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2279 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2280 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2282 // The third input reg must be zero.
2283 if (MI->getOperand(3).getReg() != ZeroReg)
2286 // Must only used by the user we combine with.
2287 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2293 /// hasPattern - return true when there is potentially a faster code sequence
2294 /// for an instruction chain ending in \p Root. All potential patterns are
2296 /// in the \p Pattern vector. Pattern should be sorted in priority order since
2297 /// the pattern evaluator stops checking as soon as it finds a faster sequence.
2299 bool AArch64InstrInfo::hasPattern(
2301 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
2302 unsigned Opc = Root.getOpcode();
2303 MachineBasicBlock &MBB = *Root.getParent();
2306 if (!isCombineInstrCandidate(Opc))
2308 if (isCombineInstrSettingFlag(Opc)) {
2309 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2310 // When NZCV is live bail out.
2313 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2314 // When opcode can't change bail out.
2315 // CHECKME: do we miss any cases for opcode conversion?
2324 case AArch64::ADDWrr:
2325 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2326 "ADDWrr does not have register operands");
2327 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2329 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2332 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2334 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2338 case AArch64::ADDXrr:
2339 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2341 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2344 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2346 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2350 case AArch64::SUBWrr:
2351 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2353 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2356 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2358 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2362 case AArch64::SUBXrr:
2363 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2365 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2368 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2370 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2374 case AArch64::ADDWri:
2375 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2377 Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2381 case AArch64::ADDXri:
2382 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2384 Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2388 case AArch64::SUBWri:
2389 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2391 Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2395 case AArch64::SUBXri:
2396 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2398 Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2406 /// genMadd - Generate madd instruction and combine mul and add.
2410 /// ==> MADD R,A,B,C
2411 /// \param Root is the ADD instruction
2412 /// \param [out] InsInstr is a vector of machine instructions and will
2413 /// contain the generated madd instruction
2414 /// \param IdxMulOpd is index of operand in Root that is the result of
2415 /// the MUL. In the example above IdxMulOpd is 1.
2416 /// \param MaddOpc the opcode fo the madd instruction
2417 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2418 const TargetInstrInfo *TII, MachineInstr &Root,
2419 SmallVectorImpl<MachineInstr *> &InsInstrs,
2420 unsigned IdxMulOpd, unsigned MaddOpc) {
2421 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2423 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2424 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2425 MachineOperand R = Root.getOperand(0);
2426 MachineOperand A = MUL->getOperand(1);
2427 MachineOperand B = MUL->getOperand(2);
2428 MachineOperand C = Root.getOperand(IdxOtherOpd);
2429 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc))
2435 InsInstrs.push_back(MIB);
2439 /// genMaddR - Generate madd instruction and combine mul and add using
2440 /// an extra virtual register
2441 /// Example - an ADD intermediate needs to be stored in a register:
2444 /// ==> ORR V, ZR, Imm
2445 /// ==> MADD R,A,B,V
2446 /// \param Root is the ADD instruction
2447 /// \param [out] InsInstr is a vector of machine instructions and will
2448 /// contain the generated madd instruction
2449 /// \param IdxMulOpd is index of operand in Root that is the result of
2450 /// the MUL. In the example above IdxMulOpd is 1.
2451 /// \param MaddOpc the opcode fo the madd instruction
2452 /// \param VR is a virtual register that holds the value of an ADD operand
2453 /// (V in the example above).
2454 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2455 const TargetInstrInfo *TII, MachineInstr &Root,
2456 SmallVectorImpl<MachineInstr *> &InsInstrs,
2457 unsigned IdxMulOpd, unsigned MaddOpc,
2459 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2461 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2462 MachineOperand R = Root.getOperand(0);
2463 MachineOperand A = MUL->getOperand(1);
2464 MachineOperand B = MUL->getOperand(2);
2465 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc))
2471 InsInstrs.push_back(MIB);
2474 /// genAlternativeCodeSequence - when hasPattern() finds a pattern
2475 /// this function generates the instructions that could replace the
2476 /// original code sequence
2477 void AArch64InstrInfo::genAlternativeCodeSequence(
2478 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2479 SmallVectorImpl<MachineInstr *> &InsInstrs,
2480 SmallVectorImpl<MachineInstr *> &DelInstrs,
2481 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2482 MachineBasicBlock &MBB = *Root.getParent();
2483 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2484 MachineFunction &MF = *MBB.getParent();
2485 const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
2493 case MachineCombinerPattern::MC_MULADDW_OP1:
2494 case MachineCombinerPattern::MC_MULADDX_OP1:
2498 // --- Create(MADD);
2499 Opc = Pattern == MachineCombinerPattern::MC_MULADDW_OP1 ? AArch64::MADDWrrr
2500 : AArch64::MADDXrrr;
2501 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc);
2503 case MachineCombinerPattern::MC_MULADDW_OP2:
2504 case MachineCombinerPattern::MC_MULADDX_OP2:
2508 // --- Create(MADD);
2509 Opc = Pattern == MachineCombinerPattern::MC_MULADDW_OP2 ? AArch64::MADDWrrr
2510 : AArch64::MADDXrrr;
2511 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc);
2513 case MachineCombinerPattern::MC_MULADDWI_OP1:
2514 case MachineCombinerPattern::MC_MULADDXI_OP1:
2517 // ==> ORR V, ZR, Imm
2519 // --- Create(MADD);
2521 const TargetRegisterClass *RC =
2522 MRI.getRegClass(Root.getOperand(1).getReg());
2523 unsigned NewVR = MRI.createVirtualRegister(RC);
2524 unsigned BitSize, OrrOpc, ZeroReg;
2525 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2527 OrrOpc = AArch64::ORRWri;
2528 ZeroReg = AArch64::WZR;
2529 Opc = AArch64::MADDWrrr;
2531 OrrOpc = AArch64::ORRXri;
2533 ZeroReg = AArch64::XZR;
2534 Opc = AArch64::MADDXrrr;
2536 uint64_t Imm = Root.getOperand(2).getImm();
2538 if (Root.getOperand(3).isImm()) {
2539 unsigned val = Root.getOperand(3).getImm();
2542 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2545 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2546 MachineInstrBuilder MIB1 =
2547 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc))
2548 .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define))
2551 InsInstrs.push_back(MIB1);
2552 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2553 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR);
2557 case MachineCombinerPattern::MC_MULSUBW_OP1:
2558 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2562 // ==> MADD R,A,B,V // = -C + A*B
2563 // --- Create(MADD);
2564 const TargetRegisterClass *RC =
2565 MRI.getRegClass(Root.getOperand(1).getReg());
2566 unsigned NewVR = MRI.createVirtualRegister(RC);
2567 unsigned SubOpc, ZeroReg;
2568 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2569 SubOpc = AArch64::SUBWrr;
2570 ZeroReg = AArch64::WZR;
2571 Opc = AArch64::MADDWrrr;
2573 SubOpc = AArch64::SUBXrr;
2574 ZeroReg = AArch64::XZR;
2575 Opc = AArch64::MADDXrrr;
2578 MachineInstrBuilder MIB1 =
2579 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc))
2580 .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define))
2582 .addOperand(Root.getOperand(2));
2583 InsInstrs.push_back(MIB1);
2584 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2585 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR);
2587 case MachineCombinerPattern::MC_MULSUBW_OP2:
2588 case MachineCombinerPattern::MC_MULSUBX_OP2:
2591 // ==> MSUB R,A,B,C (computes C - A*B)
2592 // --- Create(MSUB);
2593 Opc = Pattern == MachineCombinerPattern::MC_MULSUBW_OP2 ? AArch64::MSUBWrrr
2594 : AArch64::MSUBXrrr;
2595 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc);
2597 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2598 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2601 // ==> ORR V, ZR, -Imm
2602 // ==> MADD R,A,B,V // = -Imm + A*B
2603 // --- Create(MADD);
2604 const TargetRegisterClass *RC =
2605 MRI.getRegClass(Root.getOperand(1).getReg());
2606 unsigned NewVR = MRI.createVirtualRegister(RC);
2607 unsigned BitSize, OrrOpc, ZeroReg;
2608 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
2610 OrrOpc = AArch64::ORRWri;
2611 ZeroReg = AArch64::WZR;
2612 Opc = AArch64::MADDWrrr;
2614 OrrOpc = AArch64::ORRXri;
2616 ZeroReg = AArch64::XZR;
2617 Opc = AArch64::MADDXrrr;
2619 int Imm = Root.getOperand(2).getImm();
2620 if (Root.getOperand(3).isImm()) {
2621 unsigned val = Root.getOperand(3).getImm();
2624 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2626 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2627 MachineInstrBuilder MIB1 =
2628 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc))
2629 .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define))
2632 InsInstrs.push_back(MIB1);
2633 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2634 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR);
2638 // Record MUL and ADD/SUB for deletion
2639 DelInstrs.push_back(MUL);
2640 DelInstrs.push_back(&Root);