1 //===- ARM64InstrInfo.cpp - ARM64 Instruction Information -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARM64InstrInfo.h"
15 #include "ARM64Subtarget.h"
16 #include "MCTargetDesc/ARM64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "ARM64GenInstrInfo.inc"
31 ARM64InstrInfo::ARM64InstrInfo(const ARM64Subtarget &STI)
32 : ARM64GenInstrInfo(ARM64::ADJCALLSTACKDOWN, ARM64::ADJCALLSTACKUP),
33 RI(this, &STI), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned ARM64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MCInstrDesc &Desc = MI->getDesc();
40 switch (Desc.getOpcode()) {
42 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
44 case TargetOpcode::DBG_VALUE:
45 case TargetOpcode::EH_LABEL:
46 case TargetOpcode::IMPLICIT_DEF:
47 case TargetOpcode::KILL:
51 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
54 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
55 SmallVectorImpl<MachineOperand> &Cond) {
56 // Block ends with fall-through condbranch.
57 switch (LastInst->getOpcode()) {
59 llvm_unreachable("Unknown branch instruction?");
61 Target = LastInst->getOperand(1).getMBB();
62 Cond.push_back(LastInst->getOperand(0));
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(MachineOperand::CreateImm(-1));
70 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
71 Cond.push_back(LastInst->getOperand(0));
75 Target = LastInst->getOperand(2).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
79 Cond.push_back(LastInst->getOperand(1));
84 bool ARM64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
85 MachineBasicBlock *&TBB,
86 MachineBasicBlock *&FBB,
87 SmallVectorImpl<MachineOperand> &Cond,
88 bool AllowModify) const {
89 // If the block has no terminators, it just falls into the block after it.
90 MachineBasicBlock::iterator I = MBB.end();
94 while (I->isDebugValue()) {
99 if (!isUnpredicatedTerminator(I))
102 // Get the last instruction in the block.
103 MachineInstr *LastInst = I;
105 // If there is only one terminator instruction, process it.
106 unsigned LastOpc = LastInst->getOpcode();
107 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
108 if (isUncondBranchOpcode(LastOpc)) {
109 TBB = LastInst->getOperand(0).getMBB();
112 if (isCondBranchOpcode(LastOpc)) {
113 // Block ends with fall-through condbranch.
114 parseCondBranch(LastInst, TBB, Cond);
117 return true; // Can't handle indirect branch.
120 // Get the instruction before it if it is a terminator.
121 MachineInstr *SecondLastInst = I;
122 unsigned SecondLastOpc = SecondLastInst->getOpcode();
124 // If AllowModify is true and the block ends with two or more unconditional
125 // branches, delete all but the first unconditional branch.
126 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
127 while (isUncondBranchOpcode(SecondLastOpc)) {
128 LastInst->eraseFromParent();
129 LastInst = SecondLastInst;
130 LastOpc = LastInst->getOpcode();
131 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
132 // Return now the only terminator is an unconditional branch.
133 TBB = LastInst->getOperand(0).getMBB();
137 SecondLastOpc = SecondLastInst->getOpcode();
142 // If there are three terminators, we don't know what sort of block this is.
143 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
146 // If the block ends with a B and a Bcc, handle it.
147 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
148 parseCondBranch(SecondLastInst, TBB, Cond);
149 FBB = LastInst->getOperand(0).getMBB();
153 // If the block ends with two unconditional branches, handle it. The second
154 // one is not executed, so remove it.
155 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
156 TBB = SecondLastInst->getOperand(0).getMBB();
159 I->eraseFromParent();
163 // ...likewise if it ends with an indirect branch followed by an unconditional
165 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
168 I->eraseFromParent();
172 // Otherwise, can't handle this.
176 bool ARM64InstrInfo::ReverseBranchCondition(
177 SmallVectorImpl<MachineOperand> &Cond) const {
178 if (Cond[0].getImm() != -1) {
180 ARM64CC::CondCode CC = (ARM64CC::CondCode)(int)Cond[0].getImm();
181 Cond[0].setImm(ARM64CC::getInvertedCondCode(CC));
183 // Folded compare-and-branch
184 switch (Cond[1].getImm()) {
186 llvm_unreachable("Unknown conditional branch!");
188 Cond[1].setImm(ARM64::CBNZW);
191 Cond[1].setImm(ARM64::CBZW);
194 Cond[1].setImm(ARM64::CBNZX);
197 Cond[1].setImm(ARM64::CBZX);
200 Cond[1].setImm(ARM64::TBNZ);
203 Cond[1].setImm(ARM64::TBZ);
211 unsigned ARM64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
212 MachineBasicBlock::iterator I = MBB.end();
213 if (I == MBB.begin())
216 while (I->isDebugValue()) {
217 if (I == MBB.begin())
221 if (!isUncondBranchOpcode(I->getOpcode()) &&
222 !isCondBranchOpcode(I->getOpcode()))
225 // Remove the branch.
226 I->eraseFromParent();
230 if (I == MBB.begin())
233 if (!isCondBranchOpcode(I->getOpcode()))
236 // Remove the branch.
237 I->eraseFromParent();
241 void ARM64InstrInfo::instantiateCondBranch(
242 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
243 const SmallVectorImpl<MachineOperand> &Cond) const {
244 if (Cond[0].getImm() != -1) {
246 BuildMI(&MBB, DL, get(ARM64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
248 // Folded compare-and-branch
249 const MachineInstrBuilder MIB =
250 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
252 MIB.addImm(Cond[3].getImm());
257 unsigned ARM64InstrInfo::InsertBranch(
258 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
259 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
260 // Shouldn't be a fall through.
261 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
264 if (Cond.empty()) // Unconditional branch?
265 BuildMI(&MBB, DL, get(ARM64::B)).addMBB(TBB);
267 instantiateCondBranch(MBB, DL, TBB, Cond);
271 // Two-way conditional branch.
272 instantiateCondBranch(MBB, DL, TBB, Cond);
273 BuildMI(&MBB, DL, get(ARM64::B)).addMBB(FBB);
277 // Find the original register that VReg is copied from.
278 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
279 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
280 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
281 if (!DefMI->isFullCopy())
283 VReg = DefMI->getOperand(1).getReg();
288 // Determine if VReg is defined by an instruction that can be folded into a
289 // csel instruction. If so, return the folded opcode, and the replacement
291 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
292 unsigned *NewVReg = 0) {
293 VReg = removeCopies(MRI, VReg);
294 if (!TargetRegisterInfo::isVirtualRegister(VReg))
297 bool Is64Bit = ARM64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
298 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
300 unsigned SrcOpNum = 0;
301 switch (DefMI->getOpcode()) {
304 // if CPSR is used, do not fold.
305 if (DefMI->findRegisterDefOperandIdx(ARM64::CPSR, true) == -1)
307 // fall-through to ADDXri and ADDWri.
310 // add x, 1 -> csinc.
311 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
312 DefMI->getOperand(3).getImm() != 0)
315 Opc = Is64Bit ? ARM64::CSINCXr : ARM64::CSINCWr;
319 case ARM64::ORNWrr: {
320 // not x -> csinv, represented as orn dst, xzr, src.
321 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
322 if (ZReg != ARM64::XZR && ZReg != ARM64::WZR)
325 Opc = Is64Bit ? ARM64::CSINVXr : ARM64::CSINVWr;
331 // if CPSR is used, do not fold.
332 if (DefMI->findRegisterDefOperandIdx(ARM64::CPSR, true) == -1)
334 // fall-through to SUBXrr and SUBWrr.
336 case ARM64::SUBWrr: {
337 // neg x -> csneg, represented as sub dst, xzr, src.
338 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
339 if (ZReg != ARM64::XZR && ZReg != ARM64::WZR)
342 Opc = Is64Bit ? ARM64::CSNEGXr : ARM64::CSNEGWr;
348 assert(Opc && SrcOpNum && "Missing parameters");
351 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
355 bool ARM64InstrInfo::canInsertSelect(
356 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
357 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
358 int &FalseCycles) const {
359 // Check register classes.
360 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
361 const TargetRegisterClass *RC =
362 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
366 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
367 unsigned ExtraCondLat = Cond.size() != 1;
369 // GPRs are handled by csel.
370 // FIXME: Fold in x+1, -x, and ~x when applicable.
371 if (ARM64::GPR64allRegClass.hasSubClassEq(RC) ||
372 ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
373 // Single-cycle csel, csinc, csinv, and csneg.
374 CondCycles = 1 + ExtraCondLat;
375 TrueCycles = FalseCycles = 1;
376 if (canFoldIntoCSel(MRI, TrueReg))
378 else if (canFoldIntoCSel(MRI, FalseReg))
383 // Scalar floating point is handled by fcsel.
384 // FIXME: Form fabs, fmin, and fmax when applicable.
385 if (ARM64::FPR64RegClass.hasSubClassEq(RC) ||
386 ARM64::FPR32RegClass.hasSubClassEq(RC)) {
387 CondCycles = 5 + ExtraCondLat;
388 TrueCycles = FalseCycles = 2;
396 void ARM64InstrInfo::insertSelect(MachineBasicBlock &MBB,
397 MachineBasicBlock::iterator I, DebugLoc DL,
399 const SmallVectorImpl<MachineOperand> &Cond,
400 unsigned TrueReg, unsigned FalseReg) const {
401 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
403 // Parse the condition code, see parseCondBranch() above.
404 ARM64CC::CondCode CC;
405 switch (Cond.size()) {
407 llvm_unreachable("Unknown condition opcode in Cond");
409 CC = ARM64CC::CondCode(Cond[0].getImm());
411 case 3: { // cbz/cbnz
412 // We must insert a compare against 0.
414 switch (Cond[1].getImm()) {
416 llvm_unreachable("Unknown branch opcode in Cond");
434 unsigned SrcReg = Cond[2].getReg();
436 // cmp reg, #0 is actually subs xzr, reg, #0.
437 MRI.constrainRegClass(SrcReg, &ARM64::GPR64spRegClass);
438 BuildMI(MBB, I, DL, get(ARM64::SUBSXri), ARM64::XZR)
443 MRI.constrainRegClass(SrcReg, &ARM64::GPR32spRegClass);
444 BuildMI(MBB, I, DL, get(ARM64::SUBSWri), ARM64::WZR)
451 case 4: { // tbz/tbnz
452 // We must insert a tst instruction.
453 switch (Cond[1].getImm()) {
455 llvm_unreachable("Unknown branch opcode in Cond");
463 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
464 BuildMI(MBB, I, DL, get(ARM64::ANDSXri), ARM64::XZR)
465 .addReg(Cond[2].getReg())
466 .addImm(ARM64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
472 const TargetRegisterClass *RC = 0;
473 bool TryFold = false;
474 if (MRI.constrainRegClass(DstReg, &ARM64::GPR64RegClass)) {
475 RC = &ARM64::GPR64RegClass;
478 } else if (MRI.constrainRegClass(DstReg, &ARM64::GPR32RegClass)) {
479 RC = &ARM64::GPR32RegClass;
482 } else if (MRI.constrainRegClass(DstReg, &ARM64::FPR64RegClass)) {
483 RC = &ARM64::FPR64RegClass;
484 Opc = ARM64::FCSELDrrr;
485 } else if (MRI.constrainRegClass(DstReg, &ARM64::FPR32RegClass)) {
486 RC = &ARM64::FPR32RegClass;
487 Opc = ARM64::FCSELSrrr;
489 assert(RC && "Unsupported regclass");
491 // Try folding simple instructions into the csel.
493 unsigned NewVReg = 0;
494 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
496 // The folded opcodes csinc, csinc and csneg apply the operation to
497 // FalseReg, so we need to invert the condition.
498 CC = ARM64CC::getInvertedCondCode(CC);
501 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
503 // Fold the operation. Leave any dead instructions for DCE to clean up.
507 // The extends the live range of NewVReg.
508 MRI.clearKillFlags(NewVReg);
512 // Pull all virtual register into the appropriate class.
513 MRI.constrainRegClass(TrueReg, RC);
514 MRI.constrainRegClass(FalseReg, RC);
517 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
521 bool ARM64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
522 unsigned &SrcReg, unsigned &DstReg,
523 unsigned &SubIdx) const {
524 switch (MI.getOpcode()) {
527 case ARM64::SBFMXri: // aka sxtw
528 case ARM64::UBFMXri: // aka uxtw
529 // Check for the 32 -> 64 bit extension case, these instructions can do
531 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
533 // This is a signed or unsigned 32 -> 64 bit extension.
534 SrcReg = MI.getOperand(1).getReg();
535 DstReg = MI.getOperand(0).getReg();
536 SubIdx = ARM64::sub_32;
541 /// analyzeCompare - For a comparison instruction, return the source registers
542 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
543 /// Return true if the comparison instruction can be analyzed.
544 bool ARM64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
545 unsigned &SrcReg2, int &CmpMask,
546 int &CmpValue) const {
547 switch (MI->getOpcode()) {
562 // Replace SUBSWrr with SUBWrr if CPSR is not used.
563 SrcReg = MI->getOperand(1).getReg();
564 SrcReg2 = MI->getOperand(2).getReg();
572 SrcReg = MI->getOperand(1).getReg();
575 CmpValue = MI->getOperand(2).getImm();
579 // ANDS does not use the same encoding scheme as the others xxxS
581 SrcReg = MI->getOperand(1).getReg();
584 CmpValue = ARM64_AM::decodeLogicalImmediate(
585 MI->getOperand(2).getImm(),
586 MI->getOpcode() == ARM64::ANDSWri ? 32 : 64);
593 static bool UpdateOperandRegClass(MachineInstr *Instr) {
594 MachineBasicBlock *MBB = Instr->getParent();
595 assert(MBB && "Can't get MachineBasicBlock here");
596 MachineFunction *MF = MBB->getParent();
597 assert(MF && "Can't get MachineFunction here");
598 const TargetMachine *TM = &MF->getTarget();
599 const TargetInstrInfo *TII = TM->getInstrInfo();
600 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
601 MachineRegisterInfo *MRI = &MF->getRegInfo();
603 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
605 MachineOperand &MO = Instr->getOperand(OpIdx);
606 const TargetRegisterClass *OpRegCstraints =
607 Instr->getRegClassConstraint(OpIdx, TII, TRI);
609 // If there's no constraint, there's nothing to do.
612 // If the operand is a frame index, there's nothing to do here.
613 // A frame index operand will resolve correctly during PEI.
618 "Operand has register constraints without being a register!");
620 unsigned Reg = MO.getReg();
621 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
622 if (!OpRegCstraints->contains(Reg))
624 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
625 !MRI->constrainRegClass(Reg, OpRegCstraints))
632 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
633 /// comparison into one that sets the zero bit in the flags register.
634 bool ARM64InstrInfo::optimizeCompareInstr(
635 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
636 int CmpValue, const MachineRegisterInfo *MRI) const {
638 // Replace SUBSWrr with SUBWrr if CPSR is not used.
639 int Cmp_CPSR = CmpInstr->findRegisterDefOperandIdx(ARM64::CPSR, true);
640 if (Cmp_CPSR != -1) {
642 switch (CmpInstr->getOpcode()) {
645 case ARM64::ADDSWrr: NewOpc = ARM64::ADDWrr; break;
646 case ARM64::ADDSWri: NewOpc = ARM64::ADDWri; break;
647 case ARM64::ADDSWrs: NewOpc = ARM64::ADDWrs; break;
648 case ARM64::ADDSWrx: NewOpc = ARM64::ADDWrx; break;
649 case ARM64::ADDSXrr: NewOpc = ARM64::ADDXrr; break;
650 case ARM64::ADDSXri: NewOpc = ARM64::ADDXri; break;
651 case ARM64::ADDSXrs: NewOpc = ARM64::ADDXrs; break;
652 case ARM64::ADDSXrx: NewOpc = ARM64::ADDXrx; break;
653 case ARM64::SUBSWrr: NewOpc = ARM64::SUBWrr; break;
654 case ARM64::SUBSWri: NewOpc = ARM64::SUBWri; break;
655 case ARM64::SUBSWrs: NewOpc = ARM64::SUBWrs; break;
656 case ARM64::SUBSWrx: NewOpc = ARM64::SUBWrx; break;
657 case ARM64::SUBSXrr: NewOpc = ARM64::SUBXrr; break;
658 case ARM64::SUBSXri: NewOpc = ARM64::SUBXri; break;
659 case ARM64::SUBSXrs: NewOpc = ARM64::SUBXrs; break;
660 case ARM64::SUBSXrx: NewOpc = ARM64::SUBXrx; break;
663 const MCInstrDesc &MCID = get(NewOpc);
664 CmpInstr->setDesc(MCID);
665 CmpInstr->RemoveOperand(Cmp_CPSR);
666 bool succeeded = UpdateOperandRegClass(CmpInstr);
668 assert(succeeded && "Some operands reg class are incompatible!");
672 // Continue only if we have a "ri" where immediate is zero.
673 if (CmpValue != 0 || SrcReg2 != 0)
676 // CmpInstr is a Compare instruction if destination register is not used.
677 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
680 // Get the unique definition of SrcReg.
681 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
685 // We iterate backward, starting from the instruction before CmpInstr and
686 // stop when reaching the definition of the source register or done with the
687 // basic block, to check whether CPSR is used or modified in between.
688 MachineBasicBlock::iterator I = CmpInstr, E = MI,
689 B = CmpInstr->getParent()->begin();
691 // Early exit if CmpInstr is at the beginning of the BB.
695 // Check whether the definition of SrcReg is in the same basic block as
696 // Compare. If not, we can't optimize away the Compare.
697 if (MI->getParent() != CmpInstr->getParent())
700 // Check that CPSR isn't set between the comparison instruction and the one we
702 const TargetRegisterInfo *TRI = &getRegisterInfo();
703 for (--I; I != E; --I) {
704 const MachineInstr &Instr = *I;
706 if (Instr.modifiesRegister(ARM64::CPSR, TRI) ||
707 Instr.readsRegister(ARM64::CPSR, TRI))
708 // This instruction modifies or uses CPSR after the one we want to
709 // change. We can't do this transformation.
712 // The 'and' is below the comparison instruction.
716 unsigned NewOpc = MI->getOpcode();
717 switch (MI->getOpcode()) {
729 case ARM64::ADDWrr: NewOpc = ARM64::ADDSWrr; break;
730 case ARM64::ADDWri: NewOpc = ARM64::ADDSWri; break;
731 case ARM64::ADDXrr: NewOpc = ARM64::ADDSXrr; break;
732 case ARM64::ADDXri: NewOpc = ARM64::ADDSXri; break;
733 case ARM64::ADCWr: NewOpc = ARM64::ADCSWr; break;
734 case ARM64::ADCXr: NewOpc = ARM64::ADCSXr; break;
735 case ARM64::SUBWrr: NewOpc = ARM64::SUBSWrr; break;
736 case ARM64::SUBWri: NewOpc = ARM64::SUBSWri; break;
737 case ARM64::SUBXrr: NewOpc = ARM64::SUBSXrr; break;
738 case ARM64::SUBXri: NewOpc = ARM64::SUBSXri; break;
739 case ARM64::SBCWr: NewOpc = ARM64::SBCSWr; break;
740 case ARM64::SBCXr: NewOpc = ARM64::SBCSXr; break;
741 case ARM64::ANDWri: NewOpc = ARM64::ANDSWri; break;
742 case ARM64::ANDXri: NewOpc = ARM64::ANDSXri; break;
745 // Scan forward for the use of CPSR.
746 // When checking against MI: if it's a conditional code requires
747 // checking of V bit, then this is not safe to do.
748 // It is safe to remove CmpInstr if CPSR is redefined or killed.
749 // If we are done with the basic block, we need to check whether CPSR is
752 for (MachineBasicBlock::iterator I = CmpInstr,
753 E = CmpInstr->getParent()->end();
754 !IsSafe && ++I != E;) {
755 const MachineInstr &Instr = *I;
756 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
758 const MachineOperand &MO = Instr.getOperand(IO);
759 if (MO.isRegMask() && MO.clobbersPhysReg(ARM64::CPSR)) {
763 if (!MO.isReg() || MO.getReg() != ARM64::CPSR)
770 // Decode the condition code.
771 unsigned Opc = Instr.getOpcode();
772 ARM64CC::CondCode CC;
777 CC = (ARM64CC::CondCode)Instr.getOperand(IO - 2).getImm();
787 case ARM64::FCSELSrrr:
788 case ARM64::FCSELDrrr:
789 CC = (ARM64CC::CondCode)Instr.getOperand(IO - 1).getImm();
793 // It is not safe to remove Compare instruction if Overflow(V) is used.
796 // CPSR can be used multiple times, we should continue.
809 // If CPSR is not killed nor re-defined, we should check whether it is
810 // live-out. If it is live-out, do not optimize.
812 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
813 for (auto *MBB : ParentBlock->successors())
814 if (MBB->isLiveIn(ARM64::CPSR))
818 // Update the instruction to set CPSR.
819 MI->setDesc(get(NewOpc));
820 CmpInstr->eraseFromParent();
821 bool succeeded = UpdateOperandRegClass(MI);
823 assert(succeeded && "Some operands reg class are incompatible!");
824 MI->addRegisterDefined(ARM64::CPSR, TRI);
828 // Return true if this instruction simply sets its single destination register
829 // to zero. This is equivalent to a register rename of the zero-register.
830 bool ARM64InstrInfo::isGPRZero(const MachineInstr *MI) const {
831 switch (MI->getOpcode()) {
835 case ARM64::MOVZXi: // movz Rd, #0 (LSL #0)
836 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
837 assert(MI->getDesc().getNumOperands() == 3 &&
838 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
842 case ARM64::ANDWri: // and Rd, Rzr, #imm
843 return MI->getOperand(1).getReg() == ARM64::WZR;
845 return MI->getOperand(1).getReg() == ARM64::XZR;
846 case TargetOpcode::COPY:
847 return MI->getOperand(1).getReg() == ARM64::WZR;
852 // Return true if this instruction simply renames a general register without
854 bool ARM64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
855 switch (MI->getOpcode()) {
858 case TargetOpcode::COPY: {
859 // GPR32 copies will by lowered to ORRXrs
860 unsigned DstReg = MI->getOperand(0).getReg();
861 return (ARM64::GPR32RegClass.contains(DstReg) ||
862 ARM64::GPR64RegClass.contains(DstReg));
864 case ARM64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
865 if (MI->getOperand(1).getReg() == ARM64::XZR) {
866 assert(MI->getDesc().getNumOperands() == 4 &&
867 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
870 case ARM64::ADDXri: // add Xd, Xn, #0 (LSL #0)
871 if (MI->getOperand(2).getImm() == 0) {
872 assert(MI->getDesc().getNumOperands() == 4 &&
873 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
880 // Return true if this instruction simply renames a general register without
882 bool ARM64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
883 switch (MI->getOpcode()) {
886 case TargetOpcode::COPY: {
887 // FPR64 copies will by lowered to ORR.16b
888 unsigned DstReg = MI->getOperand(0).getReg();
889 return (ARM64::FPR64RegClass.contains(DstReg) ||
890 ARM64::FPR128RegClass.contains(DstReg));
892 case ARM64::ORRv16i8:
893 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
894 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
895 "invalid ORRv16i8 operands");
902 unsigned ARM64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
903 int &FrameIndex) const {
904 switch (MI->getOpcode()) {
914 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
915 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
916 FrameIndex = MI->getOperand(1).getIndex();
917 return MI->getOperand(0).getReg();
925 unsigned ARM64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
926 int &FrameIndex) const {
927 switch (MI->getOpcode()) {
937 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
938 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
939 FrameIndex = MI->getOperand(1).getIndex();
940 return MI->getOperand(0).getReg();
947 /// Return true if this is load/store scales or extends its register offset.
948 /// This refers to scaling a dynamic index as opposed to scaled immediates.
949 /// MI should be a memory op that allows scaled addressing.
950 bool ARM64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
951 switch (MI->getOpcode()) {
960 case ARM64::LDRSBWro:
961 case ARM64::LDRSBXro:
962 case ARM64::LDRSHWro:
963 case ARM64::LDRSHXro:
977 unsigned Val = MI->getOperand(3).getImm();
978 ARM64_AM::ExtendType ExtType = ARM64_AM::getMemExtendType(Val);
979 return (ExtType != ARM64_AM::UXTX) || ARM64_AM::getMemDoShift(Val);
984 /// Check all MachineMemOperands for a hint to suppress pairing.
985 bool ARM64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
986 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
987 "Too many target MO flags");
988 for (auto *MM : MI->memoperands()) {
990 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
997 /// Set a flag on the first MachineMemOperand to suppress pairing.
998 void ARM64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
999 if (MI->memoperands_empty())
1002 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1003 "Too many target MO flags");
1004 (*MI->memoperands_begin())
1005 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1008 bool ARM64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1010 const TargetRegisterInfo *TRI) const {
1011 switch (LdSt->getOpcode()) {
1024 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1026 BaseReg = LdSt->getOperand(1).getReg();
1027 MachineFunction &MF = *LdSt->getParent()->getParent();
1028 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1029 Offset = LdSt->getOperand(2).getImm() * Width;
1034 /// Detect opportunities for ldp/stp formation.
1036 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1037 bool ARM64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1038 MachineInstr *SecondLdSt,
1039 unsigned NumLoads) const {
1040 // Only cluster up to a single pair.
1043 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1045 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1046 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1047 // Allow 6 bits of positive range.
1050 // The caller should already have ordered First/SecondLdSt by offset.
1051 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1052 return Ofs1 + 1 == Ofs2;
1055 bool ARM64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1056 MachineInstr *Second) const {
1057 // Cyclone can fuse CMN, CMP followed by Bcc.
1059 // FIXME: B0 can also fuse:
1060 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1061 if (Second->getOpcode() != ARM64::Bcc)
1063 switch (First->getOpcode()) {
1066 case ARM64::SUBSWri:
1067 case ARM64::ADDSWri:
1068 case ARM64::ANDSWri:
1069 case ARM64::SUBSXri:
1070 case ARM64::ADDSXri:
1071 case ARM64::ANDSXri:
1076 MachineInstr *ARM64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1079 const MDNode *MDPtr,
1080 DebugLoc DL) const {
1081 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM64::DBG_VALUE))
1082 .addFrameIndex(FrameIx)
1085 .addMetadata(MDPtr);
1089 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1090 unsigned Reg, unsigned SubIdx,
1092 const TargetRegisterInfo *TRI) {
1094 return MIB.addReg(Reg, State);
1096 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1097 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1098 return MIB.addReg(Reg, State, SubIdx);
1101 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1103 // We really want the positive remainder mod 32 here, that happens to be
1104 // easily obtainable with a mask.
1105 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1108 void ARM64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
1109 MachineBasicBlock::iterator I,
1110 DebugLoc DL, unsigned DestReg,
1111 unsigned SrcReg, bool KillSrc,
1113 llvm::ArrayRef<unsigned> Indices) const {
1114 assert(getSubTarget().hasNEON() &&
1115 "Unexpected register copy without NEON");
1116 const TargetRegisterInfo *TRI = &getRegisterInfo();
1117 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1118 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1119 unsigned NumRegs = Indices.size();
1121 int SubReg = 0, End = NumRegs, Incr = 1;
1122 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1123 SubReg = NumRegs - 1;
1128 for (; SubReg != End; SubReg += Incr) {
1129 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1130 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1131 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1132 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1136 void ARM64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1137 MachineBasicBlock::iterator I, DebugLoc DL,
1138 unsigned DestReg, unsigned SrcReg,
1139 bool KillSrc) const {
1140 if (ARM64::GPR32spRegClass.contains(DestReg) &&
1141 (ARM64::GPR32spRegClass.contains(SrcReg) || SrcReg == ARM64::WZR)) {
1142 const TargetRegisterInfo *TRI = &getRegisterInfo();
1144 if (DestReg == ARM64::WSP || SrcReg == ARM64::WSP) {
1145 // If either operand is WSP, expand to ADD #0.
1146 if (Subtarget.hasZeroCycleRegMove()) {
1147 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1148 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, ARM64::sub_32,
1149 &ARM64::GPR64spRegClass);
1150 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, ARM64::sub_32,
1151 &ARM64::GPR64spRegClass);
1152 // This instruction is reading and writing X registers. This may upset
1153 // the register scavenger and machine verifier, so we need to indicate
1154 // that we are reading an undefined value from SrcRegX, but a proper
1155 // value from SrcReg.
1156 BuildMI(MBB, I, DL, get(ARM64::ADDXri), DestRegX)
1157 .addReg(SrcRegX, RegState::Undef)
1159 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0))
1160 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1162 BuildMI(MBB, I, DL, get(ARM64::ADDWri), DestReg)
1163 .addReg(SrcReg, getKillRegState(KillSrc))
1165 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1167 } else if (SrcReg == ARM64::WZR && Subtarget.hasZeroCycleZeroing()) {
1168 BuildMI(MBB, I, DL, get(ARM64::MOVZWi), DestReg).addImm(0).addImm(
1169 ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1171 if (Subtarget.hasZeroCycleRegMove()) {
1172 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1173 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, ARM64::sub_32,
1174 &ARM64::GPR64spRegClass);
1175 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, ARM64::sub_32,
1176 &ARM64::GPR64spRegClass);
1177 // This instruction is reading and writing X registers. This may upset
1178 // the register scavenger and machine verifier, so we need to indicate
1179 // that we are reading an undefined value from SrcRegX, but a proper
1180 // value from SrcReg.
1181 BuildMI(MBB, I, DL, get(ARM64::ORRXrr), DestRegX)
1183 .addReg(SrcRegX, RegState::Undef)
1184 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1186 // Otherwise, expand to ORR WZR.
1187 BuildMI(MBB, I, DL, get(ARM64::ORRWrr), DestReg)
1189 .addReg(SrcReg, getKillRegState(KillSrc));
1195 if (ARM64::GPR64spRegClass.contains(DestReg) &&
1196 (ARM64::GPR64spRegClass.contains(SrcReg) || SrcReg == ARM64::XZR)) {
1197 if (DestReg == ARM64::SP || SrcReg == ARM64::SP) {
1198 // If either operand is SP, expand to ADD #0.
1199 BuildMI(MBB, I, DL, get(ARM64::ADDXri), DestReg)
1200 .addReg(SrcReg, getKillRegState(KillSrc))
1202 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1203 } else if (SrcReg == ARM64::XZR && Subtarget.hasZeroCycleZeroing()) {
1204 BuildMI(MBB, I, DL, get(ARM64::MOVZXi), DestReg).addImm(0).addImm(
1205 ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1207 // Otherwise, expand to ORR XZR.
1208 BuildMI(MBB, I, DL, get(ARM64::ORRXrr), DestReg)
1210 .addReg(SrcReg, getKillRegState(KillSrc));
1215 // Copy a DDDD register quad by copying the individual sub-registers.
1216 if (ARM64::DDDDRegClass.contains(DestReg) &&
1217 ARM64::DDDDRegClass.contains(SrcReg)) {
1218 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1,
1219 ARM64::dsub2, ARM64::dsub3 };
1220 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1225 // Copy a DDD register triple by copying the individual sub-registers.
1226 if (ARM64::DDDRegClass.contains(DestReg) &&
1227 ARM64::DDDRegClass.contains(SrcReg)) {
1228 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1,
1230 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1235 // Copy a DD register pair by copying the individual sub-registers.
1236 if (ARM64::DDRegClass.contains(DestReg) &&
1237 ARM64::DDRegClass.contains(SrcReg)) {
1238 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1 };
1239 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1244 // Copy a QQQQ register quad by copying the individual sub-registers.
1245 if (ARM64::QQQQRegClass.contains(DestReg) &&
1246 ARM64::QQQQRegClass.contains(SrcReg)) {
1247 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1,
1248 ARM64::qsub2, ARM64::qsub3 };
1249 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1254 // Copy a QQQ register triple by copying the individual sub-registers.
1255 if (ARM64::QQQRegClass.contains(DestReg) &&
1256 ARM64::QQQRegClass.contains(SrcReg)) {
1257 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1,
1259 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1264 // Copy a QQ register pair by copying the individual sub-registers.
1265 if (ARM64::QQRegClass.contains(DestReg) &&
1266 ARM64::QQRegClass.contains(SrcReg)) {
1267 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1 };
1268 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1273 if (ARM64::FPR128RegClass.contains(DestReg) &&
1274 ARM64::FPR128RegClass.contains(SrcReg)) {
1275 if(getSubTarget().hasNEON()) {
1276 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1277 SrcReg, getKillRegState(KillSrc));
1279 BuildMI(MBB, I, DL, get(ARM64::STRQpre))
1280 .addReg(SrcReg, getKillRegState(KillSrc))
1283 BuildMI(MBB, I, DL, get(ARM64::LDRQpre))
1284 .addReg(DestReg, RegState::Define)
1291 if (ARM64::FPR64RegClass.contains(DestReg) &&
1292 ARM64::FPR64RegClass.contains(SrcReg)) {
1293 if(getSubTarget().hasNEON()) {
1295 RI.getMatchingSuperReg(DestReg, ARM64::dsub, &ARM64::FPR128RegClass);
1297 RI.getMatchingSuperReg(SrcReg, ARM64::dsub, &ARM64::FPR128RegClass);
1298 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1299 SrcReg, getKillRegState(KillSrc));
1301 BuildMI(MBB, I, DL, get(ARM64::FMOVDr), DestReg)
1302 .addReg(SrcReg, getKillRegState(KillSrc));
1307 if (ARM64::FPR32RegClass.contains(DestReg) &&
1308 ARM64::FPR32RegClass.contains(SrcReg)) {
1309 if(getSubTarget().hasNEON()) {
1311 RI.getMatchingSuperReg(DestReg, ARM64::ssub, &ARM64::FPR128RegClass);
1313 RI.getMatchingSuperReg(SrcReg, ARM64::ssub, &ARM64::FPR128RegClass);
1314 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1315 SrcReg, getKillRegState(KillSrc));
1317 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1318 .addReg(SrcReg, getKillRegState(KillSrc));
1323 if (ARM64::FPR16RegClass.contains(DestReg) &&
1324 ARM64::FPR16RegClass.contains(SrcReg)) {
1325 if(getSubTarget().hasNEON()) {
1327 RI.getMatchingSuperReg(DestReg, ARM64::hsub, &ARM64::FPR128RegClass);
1329 RI.getMatchingSuperReg(SrcReg, ARM64::hsub, &ARM64::FPR128RegClass);
1330 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1331 SrcReg, getKillRegState(KillSrc));
1334 RI.getMatchingSuperReg(DestReg, ARM64::hsub, &ARM64::FPR32RegClass);
1336 RI.getMatchingSuperReg(SrcReg, ARM64::hsub, &ARM64::FPR32RegClass);
1337 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1338 .addReg(SrcReg, getKillRegState(KillSrc));
1343 if (ARM64::FPR8RegClass.contains(DestReg) &&
1344 ARM64::FPR8RegClass.contains(SrcReg)) {
1345 if(getSubTarget().hasNEON()) {
1347 RI.getMatchingSuperReg(DestReg, ARM64::bsub, &ARM64::FPR128RegClass);
1349 RI.getMatchingSuperReg(SrcReg, ARM64::bsub, &ARM64::FPR128RegClass);
1350 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1351 SrcReg, getKillRegState(KillSrc));
1354 RI.getMatchingSuperReg(DestReg, ARM64::bsub, &ARM64::FPR32RegClass);
1356 RI.getMatchingSuperReg(SrcReg, ARM64::bsub, &ARM64::FPR32RegClass);
1357 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1358 .addReg(SrcReg, getKillRegState(KillSrc));
1363 // Copies between GPR64 and FPR64.
1364 if (ARM64::FPR64RegClass.contains(DestReg) &&
1365 ARM64::GPR64RegClass.contains(SrcReg)) {
1366 BuildMI(MBB, I, DL, get(ARM64::FMOVXDr), DestReg)
1367 .addReg(SrcReg, getKillRegState(KillSrc));
1370 if (ARM64::GPR64RegClass.contains(DestReg) &&
1371 ARM64::FPR64RegClass.contains(SrcReg)) {
1372 BuildMI(MBB, I, DL, get(ARM64::FMOVDXr), DestReg)
1373 .addReg(SrcReg, getKillRegState(KillSrc));
1376 // Copies between GPR32 and FPR32.
1377 if (ARM64::FPR32RegClass.contains(DestReg) &&
1378 ARM64::GPR32RegClass.contains(SrcReg)) {
1379 BuildMI(MBB, I, DL, get(ARM64::FMOVWSr), DestReg)
1380 .addReg(SrcReg, getKillRegState(KillSrc));
1383 if (ARM64::GPR32RegClass.contains(DestReg) &&
1384 ARM64::FPR32RegClass.contains(SrcReg)) {
1385 BuildMI(MBB, I, DL, get(ARM64::FMOVSWr), DestReg)
1386 .addReg(SrcReg, getKillRegState(KillSrc));
1390 assert(0 && "unimplemented reg-to-reg copy");
1393 void ARM64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1394 MachineBasicBlock::iterator MBBI,
1395 unsigned SrcReg, bool isKill, int FI,
1396 const TargetRegisterClass *RC,
1397 const TargetRegisterInfo *TRI) const {
1399 if (MBBI != MBB.end())
1400 DL = MBBI->getDebugLoc();
1401 MachineFunction &MF = *MBB.getParent();
1402 MachineFrameInfo &MFI = *MF.getFrameInfo();
1403 unsigned Align = MFI.getObjectAlignment(FI);
1405 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1406 MachineMemOperand *MMO = MF.getMachineMemOperand(
1407 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1410 switch (RC->getSize()) {
1412 if (ARM64::FPR8RegClass.hasSubClassEq(RC))
1413 Opc = ARM64::STRBui;
1416 if (ARM64::FPR16RegClass.hasSubClassEq(RC))
1417 Opc = ARM64::STRHui;
1420 if (ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
1421 Opc = ARM64::STRWui;
1422 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1423 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR32RegClass);
1425 assert(SrcReg != ARM64::WSP);
1426 } else if (ARM64::FPR32RegClass.hasSubClassEq(RC))
1427 Opc = ARM64::STRSui;
1430 if (ARM64::GPR64allRegClass.hasSubClassEq(RC)) {
1431 Opc = ARM64::STRXui;
1432 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1433 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass);
1435 assert(SrcReg != ARM64::SP);
1436 } else if (ARM64::FPR64RegClass.hasSubClassEq(RC))
1437 Opc = ARM64::STRDui;
1440 if (ARM64::FPR128RegClass.hasSubClassEq(RC))
1441 Opc = ARM64::STRQui;
1442 else if (ARM64::DDRegClass.hasSubClassEq(RC)) {
1443 assert(getSubTarget().hasNEON() &&
1444 "Unexpected register store without NEON");
1445 Opc = ARM64::ST1Twov1d, Offset = false;
1449 if (ARM64::DDDRegClass.hasSubClassEq(RC)) {
1450 assert(getSubTarget().hasNEON() &&
1451 "Unexpected register store without NEON");
1452 Opc = ARM64::ST1Threev1d, Offset = false;
1456 if (ARM64::DDDDRegClass.hasSubClassEq(RC)) {
1457 assert(getSubTarget().hasNEON() &&
1458 "Unexpected register store without NEON");
1459 Opc = ARM64::ST1Fourv1d, Offset = false;
1460 } else if (ARM64::QQRegClass.hasSubClassEq(RC)) {
1461 assert(getSubTarget().hasNEON() &&
1462 "Unexpected register store without NEON");
1463 Opc = ARM64::ST1Twov2d, Offset = false;
1467 if (ARM64::QQQRegClass.hasSubClassEq(RC)) {
1468 assert(getSubTarget().hasNEON() &&
1469 "Unexpected register store without NEON");
1470 Opc = ARM64::ST1Threev2d, Offset = false;
1474 if (ARM64::QQQQRegClass.hasSubClassEq(RC)) {
1475 assert(getSubTarget().hasNEON() &&
1476 "Unexpected register store without NEON");
1477 Opc = ARM64::ST1Fourv2d, Offset = false;
1481 assert(Opc && "Unknown register class");
1483 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1484 .addReg(SrcReg, getKillRegState(isKill))
1489 MI.addMemOperand(MMO);
1492 void ARM64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1493 MachineBasicBlock::iterator MBBI,
1494 unsigned DestReg, int FI,
1495 const TargetRegisterClass *RC,
1496 const TargetRegisterInfo *TRI) const {
1498 if (MBBI != MBB.end())
1499 DL = MBBI->getDebugLoc();
1500 MachineFunction &MF = *MBB.getParent();
1501 MachineFrameInfo &MFI = *MF.getFrameInfo();
1502 unsigned Align = MFI.getObjectAlignment(FI);
1503 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1504 MachineMemOperand *MMO = MF.getMachineMemOperand(
1505 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1509 switch (RC->getSize()) {
1511 if (ARM64::FPR8RegClass.hasSubClassEq(RC))
1512 Opc = ARM64::LDRBui;
1515 if (ARM64::FPR16RegClass.hasSubClassEq(RC))
1516 Opc = ARM64::LDRHui;
1519 if (ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
1520 Opc = ARM64::LDRWui;
1521 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1522 MF.getRegInfo().constrainRegClass(DestReg, &ARM64::GPR32RegClass);
1524 assert(DestReg != ARM64::WSP);
1525 } else if (ARM64::FPR32RegClass.hasSubClassEq(RC))
1526 Opc = ARM64::LDRSui;
1529 if (ARM64::GPR64allRegClass.hasSubClassEq(RC)) {
1530 Opc = ARM64::LDRXui;
1531 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1532 MF.getRegInfo().constrainRegClass(DestReg, &ARM64::GPR64RegClass);
1534 assert(DestReg != ARM64::SP);
1535 } else if (ARM64::FPR64RegClass.hasSubClassEq(RC))
1536 Opc = ARM64::LDRDui;
1539 if (ARM64::FPR128RegClass.hasSubClassEq(RC))
1540 Opc = ARM64::LDRQui;
1541 else if (ARM64::DDRegClass.hasSubClassEq(RC)) {
1542 assert(getSubTarget().hasNEON() &&
1543 "Unexpected register load without NEON");
1544 Opc = ARM64::LD1Twov1d, Offset = false;
1548 if (ARM64::DDDRegClass.hasSubClassEq(RC)) {
1549 assert(getSubTarget().hasNEON() &&
1550 "Unexpected register load without NEON");
1551 Opc = ARM64::LD1Threev1d, Offset = false;
1555 if (ARM64::DDDDRegClass.hasSubClassEq(RC)) {
1556 assert(getSubTarget().hasNEON() &&
1557 "Unexpected register load without NEON");
1558 Opc = ARM64::LD1Fourv1d, Offset = false;
1559 } else if (ARM64::QQRegClass.hasSubClassEq(RC)) {
1560 assert(getSubTarget().hasNEON() &&
1561 "Unexpected register load without NEON");
1562 Opc = ARM64::LD1Twov2d, Offset = false;
1566 if (ARM64::QQQRegClass.hasSubClassEq(RC)) {
1567 assert(getSubTarget().hasNEON() &&
1568 "Unexpected register load without NEON");
1569 Opc = ARM64::LD1Threev2d, Offset = false;
1573 if (ARM64::QQQQRegClass.hasSubClassEq(RC)) {
1574 assert(getSubTarget().hasNEON() &&
1575 "Unexpected register load without NEON");
1576 Opc = ARM64::LD1Fourv2d, Offset = false;
1580 assert(Opc && "Unknown register class");
1582 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1583 .addReg(DestReg, getDefRegState(true))
1587 MI.addMemOperand(MMO);
1590 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1591 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1592 unsigned DestReg, unsigned SrcReg, int Offset,
1593 const ARM64InstrInfo *TII, MachineInstr::MIFlag Flag,
1595 if (DestReg == SrcReg && Offset == 0)
1598 bool isSub = Offset < 0;
1602 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1603 // scratch register. If DestReg is a virtual register, use it as the
1604 // scratch register; otherwise, create a new virtual register (to be
1605 // replaced by the scavenger at the end of PEI). That case can be optimized
1606 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1607 // register can be loaded with offset%8 and the add/sub can use an extending
1608 // instruction with LSL#3.
1609 // Currently the function handles any offsets but generates a poor sequence
1611 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1615 Opc = isSub ? ARM64::SUBSXri : ARM64::ADDSXri;
1617 Opc = isSub ? ARM64::SUBXri : ARM64::ADDXri;
1618 const unsigned MaxEncoding = 0xfff;
1619 const unsigned ShiftSize = 12;
1620 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1621 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1623 if (((unsigned)Offset) > MaxEncodableValue) {
1624 ThisVal = MaxEncodableValue;
1626 ThisVal = Offset & MaxEncodableValue;
1628 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1629 "Encoding cannot handle value that big");
1630 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1632 .addImm(ThisVal >> ShiftSize)
1633 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftSize))
1641 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1644 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0))
1649 ARM64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1650 const SmallVectorImpl<unsigned> &Ops,
1651 int FrameIndex) const {
1652 // This is a bit of a hack. Consider this instruction:
1654 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1656 // We explicitly chose GPR64all for the virtual register so such a copy might
1657 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1658 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1659 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1661 // To prevent that, we are going to constrain the %vreg0 register class here.
1663 // <rdar://problem/11522048>
1666 unsigned DstReg = MI->getOperand(0).getReg();
1667 unsigned SrcReg = MI->getOperand(1).getReg();
1668 if (SrcReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(DstReg)) {
1669 MF.getRegInfo().constrainRegClass(DstReg, &ARM64::GPR64RegClass);
1672 if (DstReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1673 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass);
1682 int llvm::isARM64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1683 bool *OutUseUnscaledOp,
1684 unsigned *OutUnscaledOp,
1685 int *EmittableOffset) {
1687 bool IsSigned = false;
1688 // The ImmIdx should be changed case by case if it is not 2.
1689 unsigned ImmIdx = 2;
1690 unsigned UnscaledOp = 0;
1691 // Set output values in case of early exit.
1692 if (EmittableOffset)
1693 *EmittableOffset = 0;
1694 if (OutUseUnscaledOp)
1695 *OutUseUnscaledOp = false;
1698 switch (MI.getOpcode()) {
1700 assert(0 && "unhandled opcode in rewriteARM64FrameIndex");
1701 // Vector spills/fills can't take an immediate offset.
1702 case ARM64::LD1Twov2d:
1703 case ARM64::LD1Threev2d:
1704 case ARM64::LD1Fourv2d:
1705 case ARM64::LD1Twov1d:
1706 case ARM64::LD1Threev1d:
1707 case ARM64::LD1Fourv1d:
1708 case ARM64::ST1Twov2d:
1709 case ARM64::ST1Threev2d:
1710 case ARM64::ST1Fourv2d:
1711 case ARM64::ST1Twov1d:
1712 case ARM64::ST1Threev1d:
1713 case ARM64::ST1Fourv1d:
1714 return ARM64FrameOffsetCannotUpdate;
1717 UnscaledOp = ARM64::PRFUMi;
1721 UnscaledOp = ARM64::LDURXi;
1725 UnscaledOp = ARM64::LDURWi;
1729 UnscaledOp = ARM64::LDURBi;
1733 UnscaledOp = ARM64::LDURHi;
1737 UnscaledOp = ARM64::LDURSi;
1741 UnscaledOp = ARM64::LDURDi;
1745 UnscaledOp = ARM64::LDURQi;
1747 case ARM64::LDRBBui:
1749 UnscaledOp = ARM64::LDURBBi;
1751 case ARM64::LDRHHui:
1753 UnscaledOp = ARM64::LDURHHi;
1755 case ARM64::LDRSBXui:
1757 UnscaledOp = ARM64::LDURSBXi;
1759 case ARM64::LDRSBWui:
1761 UnscaledOp = ARM64::LDURSBWi;
1763 case ARM64::LDRSHXui:
1765 UnscaledOp = ARM64::LDURSHXi;
1767 case ARM64::LDRSHWui:
1769 UnscaledOp = ARM64::LDURSHWi;
1771 case ARM64::LDRSWui:
1773 UnscaledOp = ARM64::LDURSWi;
1778 UnscaledOp = ARM64::STURXi;
1782 UnscaledOp = ARM64::STURWi;
1786 UnscaledOp = ARM64::STURBi;
1790 UnscaledOp = ARM64::STURHi;
1794 UnscaledOp = ARM64::STURSi;
1798 UnscaledOp = ARM64::STURDi;
1802 UnscaledOp = ARM64::STURQi;
1804 case ARM64::STRBBui:
1806 UnscaledOp = ARM64::STURBBi;
1808 case ARM64::STRHHui:
1810 UnscaledOp = ARM64::STURHHi;
1840 case ARM64::LDURHHi:
1841 case ARM64::LDURBBi:
1842 case ARM64::LDURSBXi:
1843 case ARM64::LDURSBWi:
1844 case ARM64::LDURSHXi:
1845 case ARM64::LDURSHWi:
1846 case ARM64::LDURSWi:
1854 case ARM64::STURBBi:
1855 case ARM64::STURHHi:
1860 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
1862 bool useUnscaledOp = false;
1863 // If the offset doesn't match the scale, we rewrite the instruction to
1864 // use the unscaled instruction instead. Likewise, if we have a negative
1865 // offset (and have an unscaled op to use).
1866 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
1867 useUnscaledOp = true;
1869 // Use an unscaled addressing mode if the instruction has a negative offset
1870 // (or if the instruction is already using an unscaled addressing mode).
1873 // ldp/stp instructions.
1876 } else if (UnscaledOp == 0 || useUnscaledOp) {
1886 // Attempt to fold address computation.
1887 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
1888 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
1889 if (Offset >= MinOff && Offset <= MaxOff) {
1890 if (EmittableOffset)
1891 *EmittableOffset = Offset;
1894 int NewOff = Offset < 0 ? MinOff : MaxOff;
1895 if (EmittableOffset)
1896 *EmittableOffset = NewOff;
1897 Offset = (Offset - NewOff) * Scale;
1899 if (OutUseUnscaledOp)
1900 *OutUseUnscaledOp = useUnscaledOp;
1902 *OutUnscaledOp = UnscaledOp;
1903 return ARM64FrameOffsetCanUpdate |
1904 (Offset == 0 ? ARM64FrameOffsetIsLegal : 0);
1907 bool llvm::rewriteARM64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1908 unsigned FrameReg, int &Offset,
1909 const ARM64InstrInfo *TII) {
1910 unsigned Opcode = MI.getOpcode();
1911 unsigned ImmIdx = FrameRegIdx + 1;
1913 if (Opcode == ARM64::ADDSXri || Opcode == ARM64::ADDXri) {
1914 Offset += MI.getOperand(ImmIdx).getImm();
1915 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
1916 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
1917 MachineInstr::NoFlags, (Opcode == ARM64::ADDSXri));
1918 MI.eraseFromParent();
1924 unsigned UnscaledOp;
1926 int Status = isARM64FrameOffsetLegal(MI, Offset, &UseUnscaledOp, &UnscaledOp,
1928 if (Status & ARM64FrameOffsetCanUpdate) {
1929 if (Status & ARM64FrameOffsetIsLegal)
1930 // Replace the FrameIndex with FrameReg.
1931 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1933 MI.setDesc(TII->get(UnscaledOp));
1935 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
1942 void ARM64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
1943 NopInst.setOpcode(ARM64::HINT);
1944 NopInst.addOperand(MCOperand::CreateImm(0));