1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
216 uint32_t PStateField;
229 struct ShiftExtendOp {
230 AArch64_AM::ShiftExtendType Type;
232 bool HasExplicitAmount;
242 struct VectorListOp VectorList;
243 struct VectorIndexOp VectorIndex;
245 struct ShiftedImmOp ShiftedImm;
246 struct CondCodeOp CondCode;
247 struct FPImmOp FPImm;
248 struct BarrierOp Barrier;
249 struct SysRegOp SysReg;
250 struct SysCRImmOp SysCRImm;
251 struct PrefetchOp Prefetch;
252 struct ShiftExtendOp ShiftExtend;
255 // Keep the MCContext around as the MCExprs may need manipulated during
256 // the add<>Operands() calls.
260 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
262 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
264 StartLoc = o.StartLoc;
274 ShiftedImm = o.ShiftedImm;
277 CondCode = o.CondCode;
289 VectorList = o.VectorList;
292 VectorIndex = o.VectorIndex;
298 SysCRImm = o.SysCRImm;
301 Prefetch = o.Prefetch;
304 ShiftExtend = o.ShiftExtend;
309 /// getStartLoc - Get the location of the first token of this operand.
310 SMLoc getStartLoc() const override { return StartLoc; }
311 /// getEndLoc - Get the location of the last token of this operand.
312 SMLoc getEndLoc() const override { return EndLoc; }
314 StringRef getToken() const {
315 assert(Kind == k_Token && "Invalid access!");
316 return StringRef(Tok.Data, Tok.Length);
319 bool isTokenSuffix() const {
320 assert(Kind == k_Token && "Invalid access!");
324 const MCExpr *getImm() const {
325 assert(Kind == k_Immediate && "Invalid access!");
329 const MCExpr *getShiftedImmVal() const {
330 assert(Kind == k_ShiftedImm && "Invalid access!");
331 return ShiftedImm.Val;
334 unsigned getShiftedImmShift() const {
335 assert(Kind == k_ShiftedImm && "Invalid access!");
336 return ShiftedImm.ShiftAmount;
339 AArch64CC::CondCode getCondCode() const {
340 assert(Kind == k_CondCode && "Invalid access!");
341 return CondCode.Code;
344 unsigned getFPImm() const {
345 assert(Kind == k_FPImm && "Invalid access!");
349 unsigned getBarrier() const {
350 assert(Kind == k_Barrier && "Invalid access!");
354 StringRef getBarrierName() const {
355 assert(Kind == k_Barrier && "Invalid access!");
356 return StringRef(Barrier.Data, Barrier.Length);
359 unsigned getReg() const override {
360 assert(Kind == k_Register && "Invalid access!");
364 unsigned getVectorListStart() const {
365 assert(Kind == k_VectorList && "Invalid access!");
366 return VectorList.RegNum;
369 unsigned getVectorListCount() const {
370 assert(Kind == k_VectorList && "Invalid access!");
371 return VectorList.Count;
374 unsigned getVectorIndex() const {
375 assert(Kind == k_VectorIndex && "Invalid access!");
376 return VectorIndex.Val;
379 StringRef getSysReg() const {
380 assert(Kind == k_SysReg && "Invalid access!");
381 return StringRef(SysReg.Data, SysReg.Length);
384 unsigned getSysCR() const {
385 assert(Kind == k_SysCR && "Invalid access!");
389 unsigned getPrefetch() const {
390 assert(Kind == k_Prefetch && "Invalid access!");
394 StringRef getPrefetchName() const {
395 assert(Kind == k_Prefetch && "Invalid access!");
396 return StringRef(Prefetch.Data, Prefetch.Length);
399 AArch64_AM::ShiftExtendType getShiftExtendType() const {
400 assert(Kind == k_ShiftExtend && "Invalid access!");
401 return ShiftExtend.Type;
404 unsigned getShiftExtendAmount() const {
405 assert(Kind == k_ShiftExtend && "Invalid access!");
406 return ShiftExtend.Amount;
409 bool hasShiftExtendAmount() const {
410 assert(Kind == k_ShiftExtend && "Invalid access!");
411 return ShiftExtend.HasExplicitAmount;
414 bool isImm() const override { return Kind == k_Immediate; }
415 bool isMem() const override { return false; }
416 bool isSImm9() const {
419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 int64_t Val = MCE->getValue();
423 return (Val >= -256 && Val < 256);
425 bool isSImm7s4() const {
428 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 int64_t Val = MCE->getValue();
432 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
434 bool isSImm7s8() const {
437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 int64_t Val = MCE->getValue();
441 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
443 bool isSImm7s16() const {
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
449 int64_t Val = MCE->getValue();
450 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
453 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
454 AArch64MCExpr::VariantKind ELFRefKind;
455 MCSymbolRefExpr::VariantKind DarwinRefKind;
457 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
459 // If we don't understand the expression, assume the best and
460 // let the fixup and relocation code deal with it.
464 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
465 ELFRefKind == AArch64MCExpr::VK_LO12 ||
466 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
469 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
473 // Note that we don't range-check the addend. It's adjusted modulo page
474 // size when converted, so there is no "out of range" condition when using
476 return Addend >= 0 && (Addend % Scale) == 0;
477 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
478 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
479 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
486 template <int Scale> bool isUImm12Offset() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 return isSymbolicUImm12Offset(getImm(), Scale);
494 int64_t Val = MCE->getValue();
495 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
498 bool isImm0_7() const {
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 8);
507 bool isImm1_8() const {
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 int64_t Val = MCE->getValue();
514 return (Val > 0 && Val < 9);
516 bool isImm0_15() const {
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 int64_t Val = MCE->getValue();
523 return (Val >= 0 && Val < 16);
525 bool isImm1_16() const {
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 int64_t Val = MCE->getValue();
532 return (Val > 0 && Val < 17);
534 bool isImm0_31() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 32);
543 bool isImm1_31() const {
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 int64_t Val = MCE->getValue();
550 return (Val >= 1 && Val < 32);
552 bool isImm1_32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 int64_t Val = MCE->getValue();
559 return (Val >= 1 && Val < 33);
561 bool isImm0_63() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
568 return (Val >= 0 && Val < 64);
570 bool isImm1_63() const {
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 int64_t Val = MCE->getValue();
577 return (Val >= 1 && Val < 64);
579 bool isImm1_64() const {
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 65);
588 bool isImm0_127() const {
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 int64_t Val = MCE->getValue();
595 return (Val >= 0 && Val < 128);
597 bool isImm0_255() const {
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 256);
606 bool isImm0_65535() const {
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 int64_t Val = MCE->getValue();
613 return (Val >= 0 && Val < 65536);
615 bool isImm32_63() const {
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 int64_t Val = MCE->getValue();
622 return (Val >= 32 && Val < 64);
624 bool isLogicalImm32() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 int64_t Val = MCE->getValue();
631 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
634 return AArch64_AM::isLogicalImmediate(Val, 32);
636 bool isLogicalImm64() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
644 bool isLogicalImm32Not() const {
647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
651 return AArch64_AM::isLogicalImmediate(Val, 32);
653 bool isLogicalImm64Not() const {
656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
661 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
662 bool isAddSubImm() const {
663 if (!isShiftedImm() && !isImm())
668 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
669 if (isShiftedImm()) {
670 unsigned Shift = ShiftedImm.ShiftAmount;
671 Expr = ShiftedImm.Val;
672 if (Shift != 0 && Shift != 12)
678 AArch64MCExpr::VariantKind ELFRefKind;
679 MCSymbolRefExpr::VariantKind DarwinRefKind;
681 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
682 DarwinRefKind, Addend)) {
683 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
684 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
685 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
686 || ELFRefKind == AArch64MCExpr::VK_LO12
687 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
690 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
693 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
696 // Otherwise it should be a real immediate in range:
697 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
698 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
700 bool isCondCode() const { return Kind == k_CondCode; }
701 bool isSIMDImmType10() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
709 bool isBranchTarget26() const {
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
715 int64_t Val = MCE->getValue();
718 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
720 bool isPCRelLabel19() const {
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 int64_t Val = MCE->getValue();
729 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
731 bool isBranchTarget14() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 int64_t Val = MCE->getValue();
740 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
744 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
748 AArch64MCExpr::VariantKind ELFRefKind;
749 MCSymbolRefExpr::VariantKind DarwinRefKind;
751 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
752 DarwinRefKind, Addend)) {
755 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
758 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
759 if (ELFRefKind == AllowedModifiers[i])
766 bool isMovZSymbolG3() const {
767 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
770 bool isMovZSymbolG2() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
772 AArch64MCExpr::VK_TPREL_G2,
773 AArch64MCExpr::VK_DTPREL_G2});
776 bool isMovZSymbolG1() const {
777 return isMovWSymbol({
778 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
779 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
780 AArch64MCExpr::VK_DTPREL_G1,
784 bool isMovZSymbolG0() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
786 AArch64MCExpr::VK_TPREL_G0,
787 AArch64MCExpr::VK_DTPREL_G0});
790 bool isMovKSymbolG3() const {
791 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
794 bool isMovKSymbolG2() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
798 bool isMovKSymbolG1() const {
799 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
800 AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC});
804 bool isMovKSymbolG0() const {
806 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
807 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
810 template<int RegWidth, int Shift>
811 bool isMOVZMovAlias() const {
812 if (!isImm()) return false;
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
819 Value &= 0xffffffffULL;
821 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
822 if (Value == 0 && Shift != 0)
825 return (Value & ~(0xffffULL << Shift)) == 0;
828 template<int RegWidth, int Shift>
829 bool isMOVNMovAlias() const {
830 if (!isImm()) return false;
832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833 if (!CE) return false;
834 uint64_t Value = CE->getValue();
836 // MOVZ takes precedence over MOVN.
837 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
838 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 Value &= 0xffffffffULL;
845 return (Value & ~(0xffffULL << Shift)) == 0;
848 bool isFPImm() const { return Kind == k_FPImm; }
849 bool isBarrier() const { return Kind == k_Barrier; }
850 bool isSysReg() const { return Kind == k_SysReg; }
851 bool isMRSSystemRegister() const {
852 if (!isSysReg()) return false;
854 return SysReg.MRSReg != -1U;
856 bool isMSRSystemRegister() const {
857 if (!isSysReg()) return false;
859 return SysReg.MSRReg != -1U;
861 bool isSystemPStateField() const {
862 if (!isSysReg()) return false;
864 return SysReg.PStateField != -1U;
866 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
867 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
868 bool isVectorRegLo() const {
869 return Kind == k_Register && Reg.isVector &&
870 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
873 bool isGPR32as64() const {
874 return Kind == k_Register && !Reg.isVector &&
875 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
878 bool isGPR64sp0() const {
879 return Kind == k_Register && !Reg.isVector &&
880 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
883 /// Is this a vector list with the type implicit (presumably attached to the
884 /// instruction itself)?
885 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
886 return Kind == k_VectorList && VectorList.Count == NumRegs &&
887 !VectorList.ElementKind;
890 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
891 bool isTypedVectorList() const {
892 if (Kind != k_VectorList)
894 if (VectorList.Count != NumRegs)
896 if (VectorList.ElementKind != ElementKind)
898 return VectorList.NumElements == NumElements;
901 bool isVectorIndex1() const {
902 return Kind == k_VectorIndex && VectorIndex.Val == 1;
904 bool isVectorIndexB() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 16;
907 bool isVectorIndexH() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 8;
910 bool isVectorIndexS() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 4;
913 bool isVectorIndexD() const {
914 return Kind == k_VectorIndex && VectorIndex.Val < 2;
916 bool isToken() const override { return Kind == k_Token; }
917 bool isTokenEqual(StringRef Str) const {
918 return Kind == k_Token && getToken() == Str;
920 bool isSysCR() const { return Kind == k_SysCR; }
921 bool isPrefetch() const { return Kind == k_Prefetch; }
922 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
923 bool isShifter() const {
924 if (!isShiftExtend())
927 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
928 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
929 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
930 ST == AArch64_AM::MSL);
932 bool isExtend() const {
933 if (!isShiftExtend())
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
938 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
939 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
940 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
941 ET == AArch64_AM::LSL) &&
942 getShiftExtendAmount() <= 4;
945 bool isExtend64() const {
948 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
949 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
950 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
952 bool isExtendLSL64() const {
955 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
957 ET == AArch64_AM::LSL) &&
958 getShiftExtendAmount() <= 4;
961 template<int Width> bool isMemXExtend() const {
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
966 (getShiftExtendAmount() == Log2_32(Width / 8) ||
967 getShiftExtendAmount() == 0);
970 template<int Width> bool isMemWExtend() const {
973 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
974 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
975 (getShiftExtendAmount() == Log2_32(Width / 8) ||
976 getShiftExtendAmount() == 0);
979 template <unsigned width>
980 bool isArithmeticShifter() const {
984 // An arithmetic shifter is LSL, LSR, or ASR.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
987 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
990 template <unsigned width>
991 bool isLogicalShifter() const {
995 // A logical shifter is LSL, LSR, ASR or ROR.
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
999 getShiftExtendAmount() < width;
1002 bool isMovImm32Shifter() const {
1006 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16);
1014 bool isMovImm64Shifter() const {
1018 // A MOVi shifter is LSL of 0 or 16.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1026 bool isLogicalVecShifter() const {
1030 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1031 unsigned Shift = getShiftExtendAmount();
1032 return getShiftExtendType() == AArch64_AM::LSL &&
1033 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1036 bool isLogicalVecHalfWordShifter() const {
1037 if (!isLogicalVecShifter())
1040 // A logical vector shifter is a left shift by 0 or 8.
1041 unsigned Shift = getShiftExtendAmount();
1042 return getShiftExtendType() == AArch64_AM::LSL &&
1043 (Shift == 0 || Shift == 8);
1046 bool isMoveVecShifter() const {
1047 if (!isShiftExtend())
1050 // A logical vector shifter is a left shift by 8 or 16.
1051 unsigned Shift = getShiftExtendAmount();
1052 return getShiftExtendType() == AArch64_AM::MSL &&
1053 (Shift == 8 || Shift == 16);
1056 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1057 // to LDUR/STUR when the offset is not legal for the former but is for
1058 // the latter. As such, in addition to checking for being a legal unscaled
1059 // address, also check that it is not a legal scaled address. This avoids
1060 // ambiguity in the matcher.
1062 bool isSImm9OffsetFB() const {
1063 return isSImm9() && !isUImm12Offset<Width / 8>();
1066 bool isAdrpLabel() const {
1067 // Validation was handled during parsing, so we just sanity check that
1068 // something didn't go haywire.
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073 int64_t Val = CE->getValue();
1074 int64_t Min = - (4096 * (1LL << (21 - 1)));
1075 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1076 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1082 bool isAdrLabel() const {
1083 // Validation was handled during parsing, so we just sanity check that
1084 // something didn't go haywire.
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Min = - (1LL << (21 - 1));
1091 int64_t Max = ((1LL << (21 - 1)) - 1);
1092 return Val >= Min && Val <= Max;
1098 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1099 // Add as immediates when possible. Null MCExpr = 0.
1101 Inst.addOperand(MCOperand::createImm(0));
1102 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1103 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1105 Inst.addOperand(MCOperand::createExpr(Expr));
1108 void addRegOperands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 Inst.addOperand(MCOperand::createReg(getReg()));
1113 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1118 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1119 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1120 RI->getEncodingValue(getReg()));
1122 Inst.addOperand(MCOperand::createReg(Reg));
1125 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1132 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1135 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1136 Inst.addOperand(MCOperand::createReg(getReg()));
1139 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::createReg(getReg()));
1144 template <unsigned NumRegs>
1145 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1148 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1152 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1155 template <unsigned NumRegs>
1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1159 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1160 unsigned FirstReg = FirstRegs[NumRegs - 1];
1163 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1171 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1176 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1181 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1186 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1191 void addImmOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 // If this is a pageoff symrefexpr with an addend, adjust the addend
1194 // to be only the page-offset portion. Otherwise, just add the expr
1196 addExpr(Inst, getImm());
1199 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 2 && "Invalid number of operands!");
1201 if (isShiftedImm()) {
1202 addExpr(Inst, getShiftedImmVal());
1203 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1205 addExpr(Inst, getImm());
1206 Inst.addOperand(MCOperand::createImm(0));
1210 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::createImm(getCondCode()));
1215 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1224 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1225 addImmOperands(Inst, N);
1229 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 Inst.addOperand(MCOperand::createExpr(getImm()));
1237 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1240 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1246 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1252 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1264 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1270 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1276 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1295 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1301 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1307 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1337 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1343 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1354 Inst.addOperand(MCOperand::createImm(encoding));
1357 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1361 Inst.addOperand(MCOperand::createImm(encoding));
1364 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1369 Inst.addOperand(MCOperand::createImm(encoding));
1372 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::createImm(encoding));
1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1384 Inst.addOperand(MCOperand::createImm(encoding));
1387 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 addExpr(Inst, getImm());
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1401 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1415 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1429 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 Inst.addOperand(MCOperand::createImm(getFPImm()));
1434 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::createImm(getBarrier()));
1439 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1442 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1448 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1451 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1454 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1457 void addSysCROperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1459 Inst.addOperand(MCOperand::createImm(getSysCR()));
1462 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1467 void addShifterOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1470 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1471 Inst.addOperand(MCOperand::createImm(Imm));
1474 void addExtendOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1477 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1478 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::createImm(Imm));
1482 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::createImm(Imm));
1490 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::createImm(IsSigned));
1495 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1498 // For 8-bit load/store instructions with a register offset, both the
1499 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1500 // they're disambiguated by whether the shift was explicit or implicit rather
1502 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::createImm(IsSigned));
1507 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1511 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1514 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1515 uint64_t Value = CE->getValue();
1516 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1520 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1524 uint64_t Value = CE->getValue();
1525 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1528 void print(raw_ostream &OS) const override;
1530 static std::unique_ptr<AArch64Operand>
1531 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1532 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1533 Op->Tok.Data = Str.data();
1534 Op->Tok.Length = Str.size();
1535 Op->Tok.IsSuffix = IsSuffix;
1541 static std::unique_ptr<AArch64Operand>
1542 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1543 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1544 Op->Reg.RegNum = RegNum;
1545 Op->Reg.isVector = isVector;
1551 static std::unique_ptr<AArch64Operand>
1552 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1553 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1555 Op->VectorList.RegNum = RegNum;
1556 Op->VectorList.Count = Count;
1557 Op->VectorList.NumElements = NumElements;
1558 Op->VectorList.ElementKind = ElementKind;
1564 static std::unique_ptr<AArch64Operand>
1565 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1567 Op->VectorIndex.Val = Idx;
1573 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1574 SMLoc E, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1582 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1583 unsigned ShiftAmount,
1586 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1587 Op->ShiftedImm .Val = Val;
1588 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1594 static std::unique_ptr<AArch64Operand>
1595 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1596 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1597 Op->CondCode.Code = Code;
1603 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1605 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1606 Op->FPImm.Val = Val;
1612 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1616 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1617 Op->Barrier.Val = Val;
1618 Op->Barrier.Data = Str.data();
1619 Op->Barrier.Length = Str.size();
1625 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1628 uint32_t PStateField,
1630 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1631 Op->SysReg.Data = Str.data();
1632 Op->SysReg.Length = Str.size();
1633 Op->SysReg.MRSReg = MRSReg;
1634 Op->SysReg.MSRReg = MSRReg;
1635 Op->SysReg.PStateField = PStateField;
1641 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1642 SMLoc E, MCContext &Ctx) {
1643 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1644 Op->SysCRImm.Val = Val;
1650 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1654 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1655 Op->Prefetch.Val = Val;
1656 Op->Barrier.Data = Str.data();
1657 Op->Barrier.Length = Str.size();
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1685 StringRef Name = getBarrierName();
1687 OS << "<barrier " << Name << ">";
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1693 getImm()->print(OS);
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 getShiftedImmVal()->print(OS);
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1703 OS << "<condcode " << getCondCode() << ">";
1706 OS << "<register " << getReg() << ">";
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1720 OS << "<sysreg: " << getSysReg() << '>';
1723 OS << "'" << getToken() << "'";
1726 OS << "c" << getSysCR();
1729 StringRef Name = getPrefetchName();
1731 OS << "<prfop " << Name << ">";
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1736 case k_ShiftExtend: {
1737 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1738 << getShiftExtendAmount();
1739 if (!hasShiftExtendAmount())
1747 /// @name Auto-generated Match Functions
1750 static unsigned MatchRegisterName(StringRef Name);
1754 static unsigned matchVectorRegName(StringRef Name) {
1755 return StringSwitch<unsigned>(Name)
1756 .Case("v0", AArch64::Q0)
1757 .Case("v1", AArch64::Q1)
1758 .Case("v2", AArch64::Q2)
1759 .Case("v3", AArch64::Q3)
1760 .Case("v4", AArch64::Q4)
1761 .Case("v5", AArch64::Q5)
1762 .Case("v6", AArch64::Q6)
1763 .Case("v7", AArch64::Q7)
1764 .Case("v8", AArch64::Q8)
1765 .Case("v9", AArch64::Q9)
1766 .Case("v10", AArch64::Q10)
1767 .Case("v11", AArch64::Q11)
1768 .Case("v12", AArch64::Q12)
1769 .Case("v13", AArch64::Q13)
1770 .Case("v14", AArch64::Q14)
1771 .Case("v15", AArch64::Q15)
1772 .Case("v16", AArch64::Q16)
1773 .Case("v17", AArch64::Q17)
1774 .Case("v18", AArch64::Q18)
1775 .Case("v19", AArch64::Q19)
1776 .Case("v20", AArch64::Q20)
1777 .Case("v21", AArch64::Q21)
1778 .Case("v22", AArch64::Q22)
1779 .Case("v23", AArch64::Q23)
1780 .Case("v24", AArch64::Q24)
1781 .Case("v25", AArch64::Q25)
1782 .Case("v26", AArch64::Q26)
1783 .Case("v27", AArch64::Q27)
1784 .Case("v28", AArch64::Q28)
1785 .Case("v29", AArch64::Q29)
1786 .Case("v30", AArch64::Q30)
1787 .Case("v31", AArch64::Q31)
1791 static bool isValidVectorKind(StringRef Name) {
1792 return StringSwitch<bool>(Name.lower())
1802 // Accept the width neutral ones, too, for verbose syntax. If those
1803 // aren't used in the right places, the token operand won't match so
1804 // all will work out.
1812 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1813 char &ElementKind) {
1814 assert(isValidVectorKind(Name));
1816 ElementKind = Name.lower()[Name.size() - 1];
1819 if (Name.size() == 2)
1822 // Parse the lane count
1823 Name = Name.drop_front();
1824 while (isdigit(Name.front())) {
1825 NumElements = 10 * NumElements + (Name.front() - '0');
1826 Name = Name.drop_front();
1830 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1832 StartLoc = getLoc();
1833 RegNo = tryParseRegister();
1834 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1835 return (RegNo == (unsigned)-1);
1838 // Matches a register name or register alias previously defined by '.req'
1839 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1841 unsigned RegNum = isVector ? matchVectorRegName(Name)
1842 : MatchRegisterName(Name);
1845 // Check for aliases registered via .req. Canonicalize to lower case.
1846 // That's more consistent since register names are case insensitive, and
1847 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1848 auto Entry = RegisterReqs.find(Name.lower());
1849 if (Entry == RegisterReqs.end())
1851 // set RegNum if the match is the right kind of register
1852 if (isVector == Entry->getValue().first)
1853 RegNum = Entry->getValue().second;
1858 /// tryParseRegister - Try to parse a register name. The token must be an
1859 /// Identifier when called, and if it is a register name the token is eaten and
1860 /// the register is added to the operand list.
1861 int AArch64AsmParser::tryParseRegister() {
1862 MCAsmParser &Parser = getParser();
1863 const AsmToken &Tok = Parser.getTok();
1864 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1866 std::string lowerCase = Tok.getString().lower();
1867 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1868 // Also handle a few aliases of registers.
1870 RegNum = StringSwitch<unsigned>(lowerCase)
1871 .Case("fp", AArch64::FP)
1872 .Case("lr", AArch64::LR)
1873 .Case("x31", AArch64::XZR)
1874 .Case("w31", AArch64::WZR)
1880 Parser.Lex(); // Eat identifier token.
1884 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1885 /// kind specifier. If it is a register specifier, eat the token and return it.
1886 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1887 MCAsmParser &Parser = getParser();
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1908 Parser.Lex(); // Eat the register token.
1913 TokError("vector register expected");
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1920 MCAsmParser &Parser = getParser();
1923 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1928 StringRef Tok = Parser.getTok().getIdentifier();
1929 if (Tok[0] != 'c' && Tok[0] != 'C') {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1935 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1936 if (BadNum || CRNum > 15) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1941 Parser.Lex(); // Eat identifier token.
1943 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1944 return MatchOperand_Success;
1947 /// tryParsePrefetch - Try to parse a prefetch operand.
1948 AArch64AsmParser::OperandMatchResultTy
1949 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 MCAsmParser &Parser = getParser();
1952 const AsmToken &Tok = Parser.getTok();
1953 // Either an identifier for named values or a 5-bit immediate.
1954 bool Hash = Tok.is(AsmToken::Hash);
1955 if (Hash || Tok.is(AsmToken::Integer)) {
1957 Parser.Lex(); // Eat hash token.
1958 const MCExpr *ImmVal;
1959 if (getParser().parseExpression(ImmVal))
1960 return MatchOperand_ParseFail;
1962 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1964 TokError("immediate value expected for prefetch operand");
1965 return MatchOperand_ParseFail;
1967 unsigned prfop = MCE->getValue();
1969 TokError("prefetch operand out of range, [0,31] expected");
1970 return MatchOperand_ParseFail;
1974 auto Mapper = AArch64PRFM::PRFMMapper();
1976 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
1977 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1979 return MatchOperand_Success;
1982 if (Tok.isNot(AsmToken::Identifier)) {
1983 TokError("pre-fetch hint expected");
1984 return MatchOperand_ParseFail;
1988 auto Mapper = AArch64PRFM::PRFMMapper();
1990 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
1992 TokError("pre-fetch hint expected");
1993 return MatchOperand_ParseFail;
1996 Parser.Lex(); // Eat identifier token.
1997 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
1999 return MatchOperand_Success;
2002 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2004 AArch64AsmParser::OperandMatchResultTy
2005 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2006 MCAsmParser &Parser = getParser();
2010 if (Parser.getTok().is(AsmToken::Hash)) {
2011 Parser.Lex(); // Eat hash token.
2014 if (parseSymbolicImmVal(Expr))
2015 return MatchOperand_ParseFail;
2017 AArch64MCExpr::VariantKind ELFRefKind;
2018 MCSymbolRefExpr::VariantKind DarwinRefKind;
2020 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2021 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2022 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2023 // No modifier was specified at all; this is the syntax for an ELF basic
2024 // ADRP relocation (unfortunately).
2026 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2027 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2028 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2030 Error(S, "gotpage label reference not allowed an addend");
2031 return MatchOperand_ParseFail;
2032 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2033 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2034 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2035 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2036 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2037 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2038 // The operand must be an @page or @gotpage qualified symbolref.
2039 Error(S, "page or gotpage label reference expected");
2040 return MatchOperand_ParseFail;
2044 // We have either a label reference possibly with addend or an immediate. The
2045 // addend is a raw value here. The linker will adjust it to only reference the
2047 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2048 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2050 return MatchOperand_Success;
2053 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2055 AArch64AsmParser::OperandMatchResultTy
2056 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2057 MCAsmParser &Parser = getParser();
2061 if (Parser.getTok().is(AsmToken::Hash)) {
2062 Parser.Lex(); // Eat hash token.
2065 if (getParser().parseExpression(Expr))
2066 return MatchOperand_ParseFail;
2068 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2069 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2071 return MatchOperand_Success;
2074 /// tryParseFPImm - A floating point immediate expression operand.
2075 AArch64AsmParser::OperandMatchResultTy
2076 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2077 MCAsmParser &Parser = getParser();
2081 if (Parser.getTok().is(AsmToken::Hash)) {
2082 Parser.Lex(); // Eat '#'
2086 // Handle negation, as that still comes through as a separate token.
2087 bool isNegative = false;
2088 if (Parser.getTok().is(AsmToken::Minus)) {
2092 const AsmToken &Tok = Parser.getTok();
2093 if (Tok.is(AsmToken::Real)) {
2094 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2096 RealVal.changeSign();
2098 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2099 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2100 Parser.Lex(); // Eat the token.
2101 // Check for out of range values. As an exception, we let Zero through,
2102 // as we handle that special case in post-processing before matching in
2103 // order to use the zero register for it.
2104 if (Val == -1 && !RealVal.isPosZero()) {
2105 TokError("expected compatible register or floating-point constant");
2106 return MatchOperand_ParseFail;
2108 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2109 return MatchOperand_Success;
2111 if (Tok.is(AsmToken::Integer)) {
2113 if (!isNegative && Tok.getString().startswith("0x")) {
2114 Val = Tok.getIntVal();
2115 if (Val > 255 || Val < 0) {
2116 TokError("encoded floating point value out of range");
2117 return MatchOperand_ParseFail;
2120 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2121 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2122 // If we had a '-' in front, toggle the sign bit.
2123 IntVal ^= (uint64_t)isNegative << 63;
2124 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2126 Parser.Lex(); // Eat the token.
2127 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128 return MatchOperand_Success;
2132 return MatchOperand_NoMatch;
2134 TokError("invalid floating point immediate");
2135 return MatchOperand_ParseFail;
2138 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2139 AArch64AsmParser::OperandMatchResultTy
2140 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2141 MCAsmParser &Parser = getParser();
2144 if (Parser.getTok().is(AsmToken::Hash))
2145 Parser.Lex(); // Eat '#'
2146 else if (Parser.getTok().isNot(AsmToken::Integer))
2147 // Operand should start from # or should be integer, emit error otherwise.
2148 return MatchOperand_NoMatch;
2151 if (parseSymbolicImmVal(Imm))
2152 return MatchOperand_ParseFail;
2153 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2154 uint64_t ShiftAmount = 0;
2155 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2157 int64_t Val = MCE->getValue();
2158 if (Val > 0xfff && (Val & 0xfff) == 0) {
2159 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2163 SMLoc E = Parser.getTok().getLoc();
2164 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2166 return MatchOperand_Success;
2172 // The optional operand must be "lsl #N" where N is non-negative.
2173 if (!Parser.getTok().is(AsmToken::Identifier) ||
2174 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2175 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2176 return MatchOperand_ParseFail;
2182 if (Parser.getTok().is(AsmToken::Hash)) {
2186 if (Parser.getTok().isNot(AsmToken::Integer)) {
2187 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2188 return MatchOperand_ParseFail;
2191 int64_t ShiftAmount = Parser.getTok().getIntVal();
2193 if (ShiftAmount < 0) {
2194 Error(Parser.getTok().getLoc(), "positive shift amount required");
2195 return MatchOperand_ParseFail;
2197 Parser.Lex(); // Eat the number
2199 SMLoc E = Parser.getTok().getLoc();
2200 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2201 S, E, getContext()));
2202 return MatchOperand_Success;
2205 /// parseCondCodeString - Parse a Condition Code string.
2206 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2207 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2208 .Case("eq", AArch64CC::EQ)
2209 .Case("ne", AArch64CC::NE)
2210 .Case("cs", AArch64CC::HS)
2211 .Case("hs", AArch64CC::HS)
2212 .Case("cc", AArch64CC::LO)
2213 .Case("lo", AArch64CC::LO)
2214 .Case("mi", AArch64CC::MI)
2215 .Case("pl", AArch64CC::PL)
2216 .Case("vs", AArch64CC::VS)
2217 .Case("vc", AArch64CC::VC)
2218 .Case("hi", AArch64CC::HI)
2219 .Case("ls", AArch64CC::LS)
2220 .Case("ge", AArch64CC::GE)
2221 .Case("lt", AArch64CC::LT)
2222 .Case("gt", AArch64CC::GT)
2223 .Case("le", AArch64CC::LE)
2224 .Case("al", AArch64CC::AL)
2225 .Case("nv", AArch64CC::NV)
2226 .Default(AArch64CC::Invalid);
2230 /// parseCondCode - Parse a Condition Code operand.
2231 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2232 bool invertCondCode) {
2233 MCAsmParser &Parser = getParser();
2235 const AsmToken &Tok = Parser.getTok();
2236 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2238 StringRef Cond = Tok.getString();
2239 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2240 if (CC == AArch64CC::Invalid)
2241 return TokError("invalid condition code");
2242 Parser.Lex(); // Eat identifier token.
2244 if (invertCondCode) {
2245 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2246 return TokError("condition codes AL and NV are invalid for this instruction");
2247 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2251 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2255 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2256 /// them if present.
2257 AArch64AsmParser::OperandMatchResultTy
2258 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2259 MCAsmParser &Parser = getParser();
2260 const AsmToken &Tok = Parser.getTok();
2261 std::string LowerID = Tok.getString().lower();
2262 AArch64_AM::ShiftExtendType ShOp =
2263 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2264 .Case("lsl", AArch64_AM::LSL)
2265 .Case("lsr", AArch64_AM::LSR)
2266 .Case("asr", AArch64_AM::ASR)
2267 .Case("ror", AArch64_AM::ROR)
2268 .Case("msl", AArch64_AM::MSL)
2269 .Case("uxtb", AArch64_AM::UXTB)
2270 .Case("uxth", AArch64_AM::UXTH)
2271 .Case("uxtw", AArch64_AM::UXTW)
2272 .Case("uxtx", AArch64_AM::UXTX)
2273 .Case("sxtb", AArch64_AM::SXTB)
2274 .Case("sxth", AArch64_AM::SXTH)
2275 .Case("sxtw", AArch64_AM::SXTW)
2276 .Case("sxtx", AArch64_AM::SXTX)
2277 .Default(AArch64_AM::InvalidShiftExtend);
2279 if (ShOp == AArch64_AM::InvalidShiftExtend)
2280 return MatchOperand_NoMatch;
2282 SMLoc S = Tok.getLoc();
2285 bool Hash = getLexer().is(AsmToken::Hash);
2286 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2287 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2288 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2289 ShOp == AArch64_AM::MSL) {
2290 // We expect a number here.
2291 TokError("expected #imm after shift specifier");
2292 return MatchOperand_ParseFail;
2295 // "extend" type operatoins don't need an immediate, #0 is implicit.
2296 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2298 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2299 return MatchOperand_Success;
2303 Parser.Lex(); // Eat the '#'.
2305 // Make sure we do actually have a number or a parenthesized expression.
2306 SMLoc E = Parser.getTok().getLoc();
2307 if (!Parser.getTok().is(AsmToken::Integer) &&
2308 !Parser.getTok().is(AsmToken::LParen)) {
2309 Error(E, "expected integer shift amount");
2310 return MatchOperand_ParseFail;
2313 const MCExpr *ImmVal;
2314 if (getParser().parseExpression(ImmVal))
2315 return MatchOperand_ParseFail;
2317 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2319 Error(E, "expected constant '#imm' after shift specifier");
2320 return MatchOperand_ParseFail;
2323 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324 Operands.push_back(AArch64Operand::CreateShiftExtend(
2325 ShOp, MCE->getValue(), true, S, E, getContext()));
2326 return MatchOperand_Success;
2329 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2330 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2331 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2332 OperandVector &Operands) {
2333 if (Name.find('.') != StringRef::npos)
2334 return TokError("invalid operand");
2338 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2340 MCAsmParser &Parser = getParser();
2341 const AsmToken &Tok = Parser.getTok();
2342 StringRef Op = Tok.getString();
2343 SMLoc S = Tok.getLoc();
2345 const MCExpr *Expr = nullptr;
2347 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2349 Expr = MCConstantExpr::Create(op1, getContext()); \
2350 Operands.push_back( \
2351 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2352 Operands.push_back( \
2353 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2354 Operands.push_back( \
2355 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2356 Expr = MCConstantExpr::Create(op2, getContext()); \
2357 Operands.push_back( \
2358 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2361 if (Mnemonic == "ic") {
2362 if (!Op.compare_lower("ialluis")) {
2363 // SYS #0, C7, C1, #0
2364 SYS_ALIAS(0, 7, 1, 0);
2365 } else if (!Op.compare_lower("iallu")) {
2366 // SYS #0, C7, C5, #0
2367 SYS_ALIAS(0, 7, 5, 0);
2368 } else if (!Op.compare_lower("ivau")) {
2369 // SYS #3, C7, C5, #1
2370 SYS_ALIAS(3, 7, 5, 1);
2372 return TokError("invalid operand for IC instruction");
2374 } else if (Mnemonic == "dc") {
2375 if (!Op.compare_lower("zva")) {
2376 // SYS #3, C7, C4, #1
2377 SYS_ALIAS(3, 7, 4, 1);
2378 } else if (!Op.compare_lower("ivac")) {
2379 // SYS #3, C7, C6, #1
2380 SYS_ALIAS(0, 7, 6, 1);
2381 } else if (!Op.compare_lower("isw")) {
2382 // SYS #0, C7, C6, #2
2383 SYS_ALIAS(0, 7, 6, 2);
2384 } else if (!Op.compare_lower("cvac")) {
2385 // SYS #3, C7, C10, #1
2386 SYS_ALIAS(3, 7, 10, 1);
2387 } else if (!Op.compare_lower("csw")) {
2388 // SYS #0, C7, C10, #2
2389 SYS_ALIAS(0, 7, 10, 2);
2390 } else if (!Op.compare_lower("cvau")) {
2391 // SYS #3, C7, C11, #1
2392 SYS_ALIAS(3, 7, 11, 1);
2393 } else if (!Op.compare_lower("civac")) {
2394 // SYS #3, C7, C14, #1
2395 SYS_ALIAS(3, 7, 14, 1);
2396 } else if (!Op.compare_lower("cisw")) {
2397 // SYS #0, C7, C14, #2
2398 SYS_ALIAS(0, 7, 14, 2);
2400 return TokError("invalid operand for DC instruction");
2402 } else if (Mnemonic == "at") {
2403 if (!Op.compare_lower("s1e1r")) {
2404 // SYS #0, C7, C8, #0
2405 SYS_ALIAS(0, 7, 8, 0);
2406 } else if (!Op.compare_lower("s1e2r")) {
2407 // SYS #4, C7, C8, #0
2408 SYS_ALIAS(4, 7, 8, 0);
2409 } else if (!Op.compare_lower("s1e3r")) {
2410 // SYS #6, C7, C8, #0
2411 SYS_ALIAS(6, 7, 8, 0);
2412 } else if (!Op.compare_lower("s1e1w")) {
2413 // SYS #0, C7, C8, #1
2414 SYS_ALIAS(0, 7, 8, 1);
2415 } else if (!Op.compare_lower("s1e2w")) {
2416 // SYS #4, C7, C8, #1
2417 SYS_ALIAS(4, 7, 8, 1);
2418 } else if (!Op.compare_lower("s1e3w")) {
2419 // SYS #6, C7, C8, #1
2420 SYS_ALIAS(6, 7, 8, 1);
2421 } else if (!Op.compare_lower("s1e0r")) {
2422 // SYS #0, C7, C8, #3
2423 SYS_ALIAS(0, 7, 8, 2);
2424 } else if (!Op.compare_lower("s1e0w")) {
2425 // SYS #0, C7, C8, #3
2426 SYS_ALIAS(0, 7, 8, 3);
2427 } else if (!Op.compare_lower("s12e1r")) {
2428 // SYS #4, C7, C8, #4
2429 SYS_ALIAS(4, 7, 8, 4);
2430 } else if (!Op.compare_lower("s12e1w")) {
2431 // SYS #4, C7, C8, #5
2432 SYS_ALIAS(4, 7, 8, 5);
2433 } else if (!Op.compare_lower("s12e0r")) {
2434 // SYS #4, C7, C8, #6
2435 SYS_ALIAS(4, 7, 8, 6);
2436 } else if (!Op.compare_lower("s12e0w")) {
2437 // SYS #4, C7, C8, #7
2438 SYS_ALIAS(4, 7, 8, 7);
2440 return TokError("invalid operand for AT instruction");
2442 } else if (Mnemonic == "tlbi") {
2443 if (!Op.compare_lower("vmalle1is")) {
2444 // SYS #0, C8, C3, #0
2445 SYS_ALIAS(0, 8, 3, 0);
2446 } else if (!Op.compare_lower("alle2is")) {
2447 // SYS #4, C8, C3, #0
2448 SYS_ALIAS(4, 8, 3, 0);
2449 } else if (!Op.compare_lower("alle3is")) {
2450 // SYS #6, C8, C3, #0
2451 SYS_ALIAS(6, 8, 3, 0);
2452 } else if (!Op.compare_lower("vae1is")) {
2453 // SYS #0, C8, C3, #1
2454 SYS_ALIAS(0, 8, 3, 1);
2455 } else if (!Op.compare_lower("vae2is")) {
2456 // SYS #4, C8, C3, #1
2457 SYS_ALIAS(4, 8, 3, 1);
2458 } else if (!Op.compare_lower("vae3is")) {
2459 // SYS #6, C8, C3, #1
2460 SYS_ALIAS(6, 8, 3, 1);
2461 } else if (!Op.compare_lower("aside1is")) {
2462 // SYS #0, C8, C3, #2
2463 SYS_ALIAS(0, 8, 3, 2);
2464 } else if (!Op.compare_lower("vaae1is")) {
2465 // SYS #0, C8, C3, #3
2466 SYS_ALIAS(0, 8, 3, 3);
2467 } else if (!Op.compare_lower("alle1is")) {
2468 // SYS #4, C8, C3, #4
2469 SYS_ALIAS(4, 8, 3, 4);
2470 } else if (!Op.compare_lower("vale1is")) {
2471 // SYS #0, C8, C3, #5
2472 SYS_ALIAS(0, 8, 3, 5);
2473 } else if (!Op.compare_lower("vaale1is")) {
2474 // SYS #0, C8, C3, #7
2475 SYS_ALIAS(0, 8, 3, 7);
2476 } else if (!Op.compare_lower("vmalle1")) {
2477 // SYS #0, C8, C7, #0
2478 SYS_ALIAS(0, 8, 7, 0);
2479 } else if (!Op.compare_lower("alle2")) {
2480 // SYS #4, C8, C7, #0
2481 SYS_ALIAS(4, 8, 7, 0);
2482 } else if (!Op.compare_lower("vale2is")) {
2483 // SYS #4, C8, C3, #5
2484 SYS_ALIAS(4, 8, 3, 5);
2485 } else if (!Op.compare_lower("vale3is")) {
2486 // SYS #6, C8, C3, #5
2487 SYS_ALIAS(6, 8, 3, 5);
2488 } else if (!Op.compare_lower("alle3")) {
2489 // SYS #6, C8, C7, #0
2490 SYS_ALIAS(6, 8, 7, 0);
2491 } else if (!Op.compare_lower("vae1")) {
2492 // SYS #0, C8, C7, #1
2493 SYS_ALIAS(0, 8, 7, 1);
2494 } else if (!Op.compare_lower("vae2")) {
2495 // SYS #4, C8, C7, #1
2496 SYS_ALIAS(4, 8, 7, 1);
2497 } else if (!Op.compare_lower("vae3")) {
2498 // SYS #6, C8, C7, #1
2499 SYS_ALIAS(6, 8, 7, 1);
2500 } else if (!Op.compare_lower("aside1")) {
2501 // SYS #0, C8, C7, #2
2502 SYS_ALIAS(0, 8, 7, 2);
2503 } else if (!Op.compare_lower("vaae1")) {
2504 // SYS #0, C8, C7, #3
2505 SYS_ALIAS(0, 8, 7, 3);
2506 } else if (!Op.compare_lower("alle1")) {
2507 // SYS #4, C8, C7, #4
2508 SYS_ALIAS(4, 8, 7, 4);
2509 } else if (!Op.compare_lower("vale1")) {
2510 // SYS #0, C8, C7, #5
2511 SYS_ALIAS(0, 8, 7, 5);
2512 } else if (!Op.compare_lower("vale2")) {
2513 // SYS #4, C8, C7, #5
2514 SYS_ALIAS(4, 8, 7, 5);
2515 } else if (!Op.compare_lower("vale3")) {
2516 // SYS #6, C8, C7, #5
2517 SYS_ALIAS(6, 8, 7, 5);
2518 } else if (!Op.compare_lower("vaale1")) {
2519 // SYS #0, C8, C7, #7
2520 SYS_ALIAS(0, 8, 7, 7);
2521 } else if (!Op.compare_lower("ipas2e1")) {
2522 // SYS #4, C8, C4, #1
2523 SYS_ALIAS(4, 8, 4, 1);
2524 } else if (!Op.compare_lower("ipas2le1")) {
2525 // SYS #4, C8, C4, #5
2526 SYS_ALIAS(4, 8, 4, 5);
2527 } else if (!Op.compare_lower("ipas2e1is")) {
2528 // SYS #4, C8, C4, #1
2529 SYS_ALIAS(4, 8, 0, 1);
2530 } else if (!Op.compare_lower("ipas2le1is")) {
2531 // SYS #4, C8, C4, #5
2532 SYS_ALIAS(4, 8, 0, 5);
2533 } else if (!Op.compare_lower("vmalls12e1")) {
2534 // SYS #4, C8, C7, #6
2535 SYS_ALIAS(4, 8, 7, 6);
2536 } else if (!Op.compare_lower("vmalls12e1is")) {
2537 // SYS #4, C8, C3, #6
2538 SYS_ALIAS(4, 8, 3, 6);
2540 return TokError("invalid operand for TLBI instruction");
2546 Parser.Lex(); // Eat operand.
2548 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2549 bool HasRegister = false;
2551 // Check for the optional register operand.
2552 if (getLexer().is(AsmToken::Comma)) {
2553 Parser.Lex(); // Eat comma.
2555 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2556 return TokError("expected register operand");
2561 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2562 Parser.eatToEndOfStatement();
2563 return TokError("unexpected token in argument list");
2566 if (ExpectRegister && !HasRegister) {
2567 return TokError("specified " + Mnemonic + " op requires a register");
2569 else if (!ExpectRegister && HasRegister) {
2570 return TokError("specified " + Mnemonic + " op does not use a register");
2573 Parser.Lex(); // Consume the EndOfStatement
2577 AArch64AsmParser::OperandMatchResultTy
2578 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2579 MCAsmParser &Parser = getParser();
2580 const AsmToken &Tok = Parser.getTok();
2582 // Can be either a #imm style literal or an option name
2583 bool Hash = Tok.is(AsmToken::Hash);
2584 if (Hash || Tok.is(AsmToken::Integer)) {
2585 // Immediate operand.
2587 Parser.Lex(); // Eat the '#'
2588 const MCExpr *ImmVal;
2589 SMLoc ExprLoc = getLoc();
2590 if (getParser().parseExpression(ImmVal))
2591 return MatchOperand_ParseFail;
2592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2594 Error(ExprLoc, "immediate value expected for barrier operand");
2595 return MatchOperand_ParseFail;
2597 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2598 Error(ExprLoc, "barrier operand out of range");
2599 return MatchOperand_ParseFail;
2602 auto Mapper = AArch64DB::DBarrierMapper();
2604 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2605 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2606 ExprLoc, getContext()));
2607 return MatchOperand_Success;
2610 if (Tok.isNot(AsmToken::Identifier)) {
2611 TokError("invalid operand for instruction");
2612 return MatchOperand_ParseFail;
2616 auto Mapper = AArch64DB::DBarrierMapper();
2618 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2620 TokError("invalid barrier option name");
2621 return MatchOperand_ParseFail;
2624 // The only valid named option for ISB is 'sy'
2625 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2626 TokError("'sy' or #imm operand expected");
2627 return MatchOperand_ParseFail;
2630 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2631 getLoc(), getContext()));
2632 Parser.Lex(); // Consume the option
2634 return MatchOperand_Success;
2637 AArch64AsmParser::OperandMatchResultTy
2638 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2639 MCAsmParser &Parser = getParser();
2640 const AsmToken &Tok = Parser.getTok();
2642 if (Tok.isNot(AsmToken::Identifier))
2643 return MatchOperand_NoMatch;
2646 auto MRSMapper = AArch64SysReg::MRSMapper();
2647 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2649 assert(IsKnown == (MRSReg != -1U) &&
2650 "register should be -1 if and only if it's unknown");
2652 auto MSRMapper = AArch64SysReg::MSRMapper();
2653 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2655 assert(IsKnown == (MSRReg != -1U) &&
2656 "register should be -1 if and only if it's unknown");
2658 auto PStateMapper = AArch64PState::PStateMapper();
2659 uint32_t PStateField =
2660 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2661 assert(IsKnown == (PStateField != -1U) &&
2662 "register should be -1 if and only if it's unknown");
2664 Operands.push_back(AArch64Operand::CreateSysReg(
2665 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2666 Parser.Lex(); // Eat identifier
2668 return MatchOperand_Success;
2671 /// tryParseVectorRegister - Parse a vector register operand.
2672 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2673 MCAsmParser &Parser = getParser();
2674 if (Parser.getTok().isNot(AsmToken::Identifier))
2678 // Check for a vector register specifier first.
2680 int64_t Reg = tryMatchVectorRegister(Kind, false);
2684 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2685 // If there was an explicit qualifier, that goes on as a literal text
2689 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2691 // If there is an index specifier following the register, parse that too.
2692 if (Parser.getTok().is(AsmToken::LBrac)) {
2693 SMLoc SIdx = getLoc();
2694 Parser.Lex(); // Eat left bracket token.
2696 const MCExpr *ImmVal;
2697 if (getParser().parseExpression(ImmVal))
2699 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2701 TokError("immediate value expected for vector index");
2706 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2707 Error(E, "']' expected");
2711 Parser.Lex(); // Eat right bracket token.
2713 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2720 /// parseRegister - Parse a non-vector register operand.
2721 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2722 MCAsmParser &Parser = getParser();
2724 // Try for a vector register.
2725 if (!tryParseVectorRegister(Operands))
2728 // Try for a scalar register.
2729 int64_t Reg = tryParseRegister();
2733 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2735 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2736 // as a string token in the instruction itself.
2737 if (getLexer().getKind() == AsmToken::LBrac) {
2738 SMLoc LBracS = getLoc();
2740 const AsmToken &Tok = Parser.getTok();
2741 if (Tok.is(AsmToken::Integer)) {
2742 SMLoc IntS = getLoc();
2743 int64_t Val = Tok.getIntVal();
2746 if (getLexer().getKind() == AsmToken::RBrac) {
2747 SMLoc RBracS = getLoc();
2750 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2752 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2754 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2764 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2765 MCAsmParser &Parser = getParser();
2766 bool HasELFModifier = false;
2767 AArch64MCExpr::VariantKind RefKind;
2769 if (Parser.getTok().is(AsmToken::Colon)) {
2770 Parser.Lex(); // Eat ':"
2771 HasELFModifier = true;
2773 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2774 Error(Parser.getTok().getLoc(),
2775 "expect relocation specifier in operand after ':'");
2779 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2780 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2781 .Case("lo12", AArch64MCExpr::VK_LO12)
2782 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2783 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2784 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2785 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2786 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2787 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2788 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2789 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2790 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2791 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2792 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2793 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2794 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2795 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2796 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2797 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2798 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2799 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2800 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2801 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2802 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2803 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2804 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2805 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2806 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2807 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2808 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2809 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2810 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2811 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2812 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2813 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2814 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2815 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2816 .Default(AArch64MCExpr::VK_INVALID);
2818 if (RefKind == AArch64MCExpr::VK_INVALID) {
2819 Error(Parser.getTok().getLoc(),
2820 "expect relocation specifier in operand after ':'");
2824 Parser.Lex(); // Eat identifier
2826 if (Parser.getTok().isNot(AsmToken::Colon)) {
2827 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2830 Parser.Lex(); // Eat ':'
2833 if (getParser().parseExpression(ImmVal))
2837 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2842 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2843 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2844 MCAsmParser &Parser = getParser();
2845 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2847 Parser.Lex(); // Eat left bracket token.
2849 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2852 int64_t PrevReg = FirstReg;
2855 if (Parser.getTok().is(AsmToken::Minus)) {
2856 Parser.Lex(); // Eat the minus.
2858 SMLoc Loc = getLoc();
2860 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2863 // Any Kind suffices must match on all regs in the list.
2864 if (Kind != NextKind)
2865 return Error(Loc, "mismatched register size suffix");
2867 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2869 if (Space == 0 || Space > 3) {
2870 return Error(Loc, "invalid number of vectors");
2876 while (Parser.getTok().is(AsmToken::Comma)) {
2877 Parser.Lex(); // Eat the comma token.
2879 SMLoc Loc = getLoc();
2881 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2884 // Any Kind suffices must match on all regs in the list.
2885 if (Kind != NextKind)
2886 return Error(Loc, "mismatched register size suffix");
2888 // Registers must be incremental (with wraparound at 31)
2889 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2890 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2891 return Error(Loc, "registers must be sequential");
2898 if (Parser.getTok().isNot(AsmToken::RCurly))
2899 return Error(getLoc(), "'}' expected");
2900 Parser.Lex(); // Eat the '}' token.
2903 return Error(S, "invalid number of vectors");
2905 unsigned NumElements = 0;
2906 char ElementKind = 0;
2908 parseValidVectorKind(Kind, NumElements, ElementKind);
2910 Operands.push_back(AArch64Operand::CreateVectorList(
2911 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2913 // If there is an index specifier following the list, parse that too.
2914 if (Parser.getTok().is(AsmToken::LBrac)) {
2915 SMLoc SIdx = getLoc();
2916 Parser.Lex(); // Eat left bracket token.
2918 const MCExpr *ImmVal;
2919 if (getParser().parseExpression(ImmVal))
2921 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2923 TokError("immediate value expected for vector index");
2928 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2929 Error(E, "']' expected");
2933 Parser.Lex(); // Eat right bracket token.
2935 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2941 AArch64AsmParser::OperandMatchResultTy
2942 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2943 MCAsmParser &Parser = getParser();
2944 const AsmToken &Tok = Parser.getTok();
2945 if (!Tok.is(AsmToken::Identifier))
2946 return MatchOperand_NoMatch;
2948 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2950 MCContext &Ctx = getContext();
2951 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2952 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2953 return MatchOperand_NoMatch;
2956 Parser.Lex(); // Eat register
2958 if (Parser.getTok().isNot(AsmToken::Comma)) {
2960 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2961 return MatchOperand_Success;
2963 Parser.Lex(); // Eat comma.
2965 if (Parser.getTok().is(AsmToken::Hash))
2966 Parser.Lex(); // Eat hash
2968 if (Parser.getTok().isNot(AsmToken::Integer)) {
2969 Error(getLoc(), "index must be absent or #0");
2970 return MatchOperand_ParseFail;
2973 const MCExpr *ImmVal;
2974 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2975 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2976 Error(getLoc(), "index must be absent or #0");
2977 return MatchOperand_ParseFail;
2981 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2982 return MatchOperand_Success;
2985 /// parseOperand - Parse a arm instruction operand. For now this parses the
2986 /// operand regardless of the mnemonic.
2987 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2988 bool invertCondCode) {
2989 MCAsmParser &Parser = getParser();
2990 // Check if the current operand has a custom associated parser, if so, try to
2991 // custom parse the operand, or fallback to the general approach.
2992 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2993 if (ResTy == MatchOperand_Success)
2995 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2996 // there was a match, but an error occurred, in which case, just return that
2997 // the operand parsing failed.
2998 if (ResTy == MatchOperand_ParseFail)
3001 // Nothing custom, so do general case parsing.
3003 switch (getLexer().getKind()) {
3007 if (parseSymbolicImmVal(Expr))
3008 return Error(S, "invalid operand");
3010 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3014 case AsmToken::LBrac: {
3015 SMLoc Loc = Parser.getTok().getLoc();
3016 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3018 Parser.Lex(); // Eat '['
3020 // There's no comma after a '[', so we can parse the next operand
3022 return parseOperand(Operands, false, false);
3024 case AsmToken::LCurly:
3025 return parseVectorList(Operands);
3026 case AsmToken::Identifier: {
3027 // If we're expecting a Condition Code operand, then just parse that.
3029 return parseCondCode(Operands, invertCondCode);
3031 // If it's a register name, parse it.
3032 if (!parseRegister(Operands))
3035 // This could be an optional "shift" or "extend" operand.
3036 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3037 // We can only continue if no tokens were eaten.
3038 if (GotShift != MatchOperand_NoMatch)
3041 // This was not a register so parse other operands that start with an
3042 // identifier (like labels) as expressions and create them as immediates.
3043 const MCExpr *IdVal;
3045 if (getParser().parseExpression(IdVal))
3048 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3049 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3052 case AsmToken::Integer:
3053 case AsmToken::Real:
3054 case AsmToken::Hash: {
3055 // #42 -> immediate.
3057 if (getLexer().is(AsmToken::Hash))
3060 // Parse a negative sign
3061 bool isNegative = false;
3062 if (Parser.getTok().is(AsmToken::Minus)) {
3064 // We need to consume this token only when we have a Real, otherwise
3065 // we let parseSymbolicImmVal take care of it
3066 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3070 // The only Real that should come through here is a literal #0.0 for
3071 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3072 // so convert the value.
3073 const AsmToken &Tok = Parser.getTok();
3074 if (Tok.is(AsmToken::Real)) {
3075 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3076 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3077 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3078 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3079 Mnemonic != "fcmlt")
3080 return TokError("unexpected floating point literal");
3081 else if (IntVal != 0 || isNegative)
3082 return TokError("expected floating-point constant #0.0");
3083 Parser.Lex(); // Eat the token.
3086 AArch64Operand::CreateToken("#0", false, S, getContext()));
3088 AArch64Operand::CreateToken(".0", false, S, getContext()));
3092 const MCExpr *ImmVal;
3093 if (parseSymbolicImmVal(ImmVal))
3096 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3097 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3100 case AsmToken::Equal: {
3101 SMLoc Loc = Parser.getTok().getLoc();
3102 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3103 return Error(Loc, "unexpected token in operand");
3104 Parser.Lex(); // Eat '='
3105 const MCExpr *SubExprVal;
3106 if (getParser().parseExpression(SubExprVal))
3109 if (Operands.size() < 2 ||
3110 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3114 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3115 Operands[1]->getReg());
3117 MCContext& Ctx = getContext();
3118 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3119 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3120 if (isa<MCConstantExpr>(SubExprVal)) {
3121 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3122 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3123 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3127 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3128 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3129 Operands.push_back(AArch64Operand::CreateImm(
3130 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3132 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3133 ShiftAmt, true, S, E, Ctx));
3136 APInt Simm = APInt(64, Imm << ShiftAmt);
3137 // check if the immediate is an unsigned or signed 32-bit int for W regs
3138 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3139 return Error(Loc, "Immediate too large for register");
3141 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3142 const MCExpr *CPLoc =
3143 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3144 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3150 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3152 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3153 StringRef Name, SMLoc NameLoc,
3154 OperandVector &Operands) {
3155 MCAsmParser &Parser = getParser();
3156 Name = StringSwitch<StringRef>(Name.lower())
3157 .Case("beq", "b.eq")
3158 .Case("bne", "b.ne")
3159 .Case("bhs", "b.hs")
3160 .Case("bcs", "b.cs")
3161 .Case("blo", "b.lo")
3162 .Case("bcc", "b.cc")
3163 .Case("bmi", "b.mi")
3164 .Case("bpl", "b.pl")
3165 .Case("bvs", "b.vs")
3166 .Case("bvc", "b.vc")
3167 .Case("bhi", "b.hi")
3168 .Case("bls", "b.ls")
3169 .Case("bge", "b.ge")
3170 .Case("blt", "b.lt")
3171 .Case("bgt", "b.gt")
3172 .Case("ble", "b.le")
3173 .Case("bal", "b.al")
3174 .Case("bnv", "b.nv")
3177 // First check for the AArch64-specific .req directive.
3178 if (Parser.getTok().is(AsmToken::Identifier) &&
3179 Parser.getTok().getIdentifier() == ".req") {
3180 parseDirectiveReq(Name, NameLoc);
3181 // We always return 'error' for this, as we're done with this
3182 // statement and don't need to match the 'instruction."
3186 // Create the leading tokens for the mnemonic, split by '.' characters.
3187 size_t Start = 0, Next = Name.find('.');
3188 StringRef Head = Name.slice(Start, Next);
3190 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3191 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3192 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3193 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3194 Parser.eatToEndOfStatement();
3199 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3202 // Handle condition codes for a branch mnemonic
3203 if (Head == "b" && Next != StringRef::npos) {
3205 Next = Name.find('.', Start + 1);
3206 Head = Name.slice(Start + 1, Next);
3208 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3209 (Head.data() - Name.data()));
3210 AArch64CC::CondCode CC = parseCondCodeString(Head);
3211 if (CC == AArch64CC::Invalid)
3212 return Error(SuffixLoc, "invalid condition code");
3214 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3216 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3219 // Add the remaining tokens in the mnemonic.
3220 while (Next != StringRef::npos) {
3222 Next = Name.find('.', Start + 1);
3223 Head = Name.slice(Start, Next);
3224 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3225 (Head.data() - Name.data()) + 1);
3227 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3230 // Conditional compare instructions have a Condition Code operand, which needs
3231 // to be parsed and an immediate operand created.
3232 bool condCodeFourthOperand =
3233 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3234 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3235 Head == "csinc" || Head == "csinv" || Head == "csneg");
3237 // These instructions are aliases to some of the conditional select
3238 // instructions. However, the condition code is inverted in the aliased
3241 // FIXME: Is this the correct way to handle these? Or should the parser
3242 // generate the aliased instructions directly?
3243 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3244 bool condCodeThirdOperand =
3245 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3247 // Read the remaining operands.
3248 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3249 // Read the first operand.
3250 if (parseOperand(Operands, false, false)) {
3251 Parser.eatToEndOfStatement();
3256 while (getLexer().is(AsmToken::Comma)) {
3257 Parser.Lex(); // Eat the comma.
3259 // Parse and remember the operand.
3260 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3261 (N == 3 && condCodeThirdOperand) ||
3262 (N == 2 && condCodeSecondOperand),
3263 condCodeSecondOperand || condCodeThirdOperand)) {
3264 Parser.eatToEndOfStatement();
3268 // After successfully parsing some operands there are two special cases to
3269 // consider (i.e. notional operands not separated by commas). Both are due
3270 // to memory specifiers:
3271 // + An RBrac will end an address for load/store/prefetch
3272 // + An '!' will indicate a pre-indexed operation.
3274 // It's someone else's responsibility to make sure these tokens are sane
3275 // in the given context!
3276 if (Parser.getTok().is(AsmToken::RBrac)) {
3277 SMLoc Loc = Parser.getTok().getLoc();
3278 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3283 if (Parser.getTok().is(AsmToken::Exclaim)) {
3284 SMLoc Loc = Parser.getTok().getLoc();
3285 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3294 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3295 SMLoc Loc = Parser.getTok().getLoc();
3296 Parser.eatToEndOfStatement();
3297 return Error(Loc, "unexpected token in argument list");
3300 Parser.Lex(); // Consume the EndOfStatement
3304 // FIXME: This entire function is a giant hack to provide us with decent
3305 // operand range validation/diagnostics until TableGen/MC can be extended
3306 // to support autogeneration of this kind of validation.
3307 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3308 SmallVectorImpl<SMLoc> &Loc) {
3309 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3310 // Check for indexed addressing modes w/ the base register being the
3311 // same as a destination/source register or pair load where
3312 // the Rt == Rt2. All of those are undefined behaviour.
3313 switch (Inst.getOpcode()) {
3314 case AArch64::LDPSWpre:
3315 case AArch64::LDPWpost:
3316 case AArch64::LDPWpre:
3317 case AArch64::LDPXpost:
3318 case AArch64::LDPXpre: {
3319 unsigned Rt = Inst.getOperand(1).getReg();
3320 unsigned Rt2 = Inst.getOperand(2).getReg();
3321 unsigned Rn = Inst.getOperand(3).getReg();
3322 if (RI->isSubRegisterEq(Rn, Rt))
3323 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3324 "is also a destination");
3325 if (RI->isSubRegisterEq(Rn, Rt2))
3326 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3327 "is also a destination");
3330 case AArch64::LDPDi:
3331 case AArch64::LDPQi:
3332 case AArch64::LDPSi:
3333 case AArch64::LDPSWi:
3334 case AArch64::LDPWi:
3335 case AArch64::LDPXi: {
3336 unsigned Rt = Inst.getOperand(0).getReg();
3337 unsigned Rt2 = Inst.getOperand(1).getReg();
3339 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3342 case AArch64::LDPDpost:
3343 case AArch64::LDPDpre:
3344 case AArch64::LDPQpost:
3345 case AArch64::LDPQpre:
3346 case AArch64::LDPSpost:
3347 case AArch64::LDPSpre:
3348 case AArch64::LDPSWpost: {
3349 unsigned Rt = Inst.getOperand(1).getReg();
3350 unsigned Rt2 = Inst.getOperand(2).getReg();
3352 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3355 case AArch64::STPDpost:
3356 case AArch64::STPDpre:
3357 case AArch64::STPQpost:
3358 case AArch64::STPQpre:
3359 case AArch64::STPSpost:
3360 case AArch64::STPSpre:
3361 case AArch64::STPWpost:
3362 case AArch64::STPWpre:
3363 case AArch64::STPXpost:
3364 case AArch64::STPXpre: {
3365 unsigned Rt = Inst.getOperand(1).getReg();
3366 unsigned Rt2 = Inst.getOperand(2).getReg();
3367 unsigned Rn = Inst.getOperand(3).getReg();
3368 if (RI->isSubRegisterEq(Rn, Rt))
3369 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3370 "is also a source");
3371 if (RI->isSubRegisterEq(Rn, Rt2))
3372 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3373 "is also a source");
3376 case AArch64::LDRBBpre:
3377 case AArch64::LDRBpre:
3378 case AArch64::LDRHHpre:
3379 case AArch64::LDRHpre:
3380 case AArch64::LDRSBWpre:
3381 case AArch64::LDRSBXpre:
3382 case AArch64::LDRSHWpre:
3383 case AArch64::LDRSHXpre:
3384 case AArch64::LDRSWpre:
3385 case AArch64::LDRWpre:
3386 case AArch64::LDRXpre:
3387 case AArch64::LDRBBpost:
3388 case AArch64::LDRBpost:
3389 case AArch64::LDRHHpost:
3390 case AArch64::LDRHpost:
3391 case AArch64::LDRSBWpost:
3392 case AArch64::LDRSBXpost:
3393 case AArch64::LDRSHWpost:
3394 case AArch64::LDRSHXpost:
3395 case AArch64::LDRSWpost:
3396 case AArch64::LDRWpost:
3397 case AArch64::LDRXpost: {
3398 unsigned Rt = Inst.getOperand(1).getReg();
3399 unsigned Rn = Inst.getOperand(2).getReg();
3400 if (RI->isSubRegisterEq(Rn, Rt))
3401 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3402 "is also a source");
3405 case AArch64::STRBBpost:
3406 case AArch64::STRBpost:
3407 case AArch64::STRHHpost:
3408 case AArch64::STRHpost:
3409 case AArch64::STRWpost:
3410 case AArch64::STRXpost:
3411 case AArch64::STRBBpre:
3412 case AArch64::STRBpre:
3413 case AArch64::STRHHpre:
3414 case AArch64::STRHpre:
3415 case AArch64::STRWpre:
3416 case AArch64::STRXpre: {
3417 unsigned Rt = Inst.getOperand(1).getReg();
3418 unsigned Rn = Inst.getOperand(2).getReg();
3419 if (RI->isSubRegisterEq(Rn, Rt))
3420 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3421 "is also a source");
3426 // Now check immediate ranges. Separate from the above as there is overlap
3427 // in the instructions being checked and this keeps the nested conditionals
3429 switch (Inst.getOpcode()) {
3430 case AArch64::ADDSWri:
3431 case AArch64::ADDSXri:
3432 case AArch64::ADDWri:
3433 case AArch64::ADDXri:
3434 case AArch64::SUBSWri:
3435 case AArch64::SUBSXri:
3436 case AArch64::SUBWri:
3437 case AArch64::SUBXri: {
3438 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3439 // some slight duplication here.
3440 if (Inst.getOperand(2).isExpr()) {
3441 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3442 AArch64MCExpr::VariantKind ELFRefKind;
3443 MCSymbolRefExpr::VariantKind DarwinRefKind;
3445 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3446 return Error(Loc[2], "invalid immediate expression");
3449 // Only allow these with ADDXri.
3450 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3451 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3452 Inst.getOpcode() == AArch64::ADDXri)
3455 // Only allow these with ADDXri/ADDWri
3456 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3457 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3458 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3460 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3463 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3464 (Inst.getOpcode() == AArch64::ADDXri ||
3465 Inst.getOpcode() == AArch64::ADDWri))
3468 // Don't allow expressions in the immediate field otherwise
3469 return Error(Loc[2], "invalid immediate expression");
3478 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3480 case Match_MissingFeature:
3482 "instruction requires a CPU feature not currently enabled");
3483 case Match_InvalidOperand:
3484 return Error(Loc, "invalid operand for instruction");
3485 case Match_InvalidSuffix:
3486 return Error(Loc, "invalid type suffix for instruction");
3487 case Match_InvalidCondCode:
3488 return Error(Loc, "expected AArch64 condition code");
3489 case Match_AddSubRegExtendSmall:
3491 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3492 case Match_AddSubRegExtendLarge:
3494 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3495 case Match_AddSubSecondSource:
3497 "expected compatible register, symbol or integer in range [0, 4095]");
3498 case Match_LogicalSecondSource:
3499 return Error(Loc, "expected compatible register or logical immediate");
3500 case Match_InvalidMovImm32Shift:
3501 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3502 case Match_InvalidMovImm64Shift:
3503 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3504 case Match_AddSubRegShift32:
3506 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3507 case Match_AddSubRegShift64:
3509 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3510 case Match_InvalidFPImm:
3512 "expected compatible register or floating-point constant");
3513 case Match_InvalidMemoryIndexedSImm9:
3514 return Error(Loc, "index must be an integer in range [-256, 255].");
3515 case Match_InvalidMemoryIndexed4SImm7:
3516 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3517 case Match_InvalidMemoryIndexed8SImm7:
3518 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3519 case Match_InvalidMemoryIndexed16SImm7:
3520 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3521 case Match_InvalidMemoryWExtend8:
3523 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3524 case Match_InvalidMemoryWExtend16:
3526 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3527 case Match_InvalidMemoryWExtend32:
3529 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3530 case Match_InvalidMemoryWExtend64:
3532 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3533 case Match_InvalidMemoryWExtend128:
3535 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3536 case Match_InvalidMemoryXExtend8:
3538 "expected 'lsl' or 'sxtx' with optional shift of #0");
3539 case Match_InvalidMemoryXExtend16:
3541 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3542 case Match_InvalidMemoryXExtend32:
3544 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3545 case Match_InvalidMemoryXExtend64:
3547 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3548 case Match_InvalidMemoryXExtend128:
3550 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3551 case Match_InvalidMemoryIndexed1:
3552 return Error(Loc, "index must be an integer in range [0, 4095].");
3553 case Match_InvalidMemoryIndexed2:
3554 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3555 case Match_InvalidMemoryIndexed4:
3556 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3557 case Match_InvalidMemoryIndexed8:
3558 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3559 case Match_InvalidMemoryIndexed16:
3560 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3561 case Match_InvalidImm0_7:
3562 return Error(Loc, "immediate must be an integer in range [0, 7].");
3563 case Match_InvalidImm0_15:
3564 return Error(Loc, "immediate must be an integer in range [0, 15].");
3565 case Match_InvalidImm0_31:
3566 return Error(Loc, "immediate must be an integer in range [0, 31].");
3567 case Match_InvalidImm0_63:
3568 return Error(Loc, "immediate must be an integer in range [0, 63].");
3569 case Match_InvalidImm0_127:
3570 return Error(Loc, "immediate must be an integer in range [0, 127].");
3571 case Match_InvalidImm0_65535:
3572 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3573 case Match_InvalidImm1_8:
3574 return Error(Loc, "immediate must be an integer in range [1, 8].");
3575 case Match_InvalidImm1_16:
3576 return Error(Loc, "immediate must be an integer in range [1, 16].");
3577 case Match_InvalidImm1_32:
3578 return Error(Loc, "immediate must be an integer in range [1, 32].");
3579 case Match_InvalidImm1_64:
3580 return Error(Loc, "immediate must be an integer in range [1, 64].");
3581 case Match_InvalidIndex1:
3582 return Error(Loc, "expected lane specifier '[1]'");
3583 case Match_InvalidIndexB:
3584 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3585 case Match_InvalidIndexH:
3586 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3587 case Match_InvalidIndexS:
3588 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3589 case Match_InvalidIndexD:
3590 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3591 case Match_InvalidLabel:
3592 return Error(Loc, "expected label or encodable integer pc offset");
3594 return Error(Loc, "expected readable system register");
3596 return Error(Loc, "expected writable system register or pstate");
3597 case Match_MnemonicFail:
3598 return Error(Loc, "unrecognized instruction mnemonic");
3600 llvm_unreachable("unexpected error code!");
3604 static const char *getSubtargetFeatureName(uint64_t Val);
3606 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3607 OperandVector &Operands,
3609 uint64_t &ErrorInfo,
3610 bool MatchingInlineAsm) {
3611 assert(!Operands.empty() && "Unexpect empty operand list!");
3612 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3613 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3615 StringRef Tok = Op.getToken();
3616 unsigned NumOperands = Operands.size();
3618 if (NumOperands == 4 && Tok == "lsl") {
3619 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3620 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3621 if (Op2.isReg() && Op3.isImm()) {
3622 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3624 uint64_t Op3Val = Op3CE->getValue();
3625 uint64_t NewOp3Val = 0;
3626 uint64_t NewOp4Val = 0;
3627 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3629 NewOp3Val = (32 - Op3Val) & 0x1f;
3630 NewOp4Val = 31 - Op3Val;
3632 NewOp3Val = (64 - Op3Val) & 0x3f;
3633 NewOp4Val = 63 - Op3Val;
3636 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3637 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3639 Operands[0] = AArch64Operand::CreateToken(
3640 "ubfm", false, Op.getStartLoc(), getContext());
3641 Operands.push_back(AArch64Operand::CreateImm(
3642 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3643 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3644 Op3.getEndLoc(), getContext());
3647 } else if (NumOperands == 4 && Tok == "bfc") {
3648 // FIXME: Horrible hack to handle BFC->BFM alias.
3649 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3650 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3651 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3653 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3654 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3655 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3657 if (LSBCE && WidthCE) {
3658 uint64_t LSB = LSBCE->getValue();
3659 uint64_t Width = WidthCE->getValue();
3661 uint64_t RegWidth = 0;
3662 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3668 if (LSB >= RegWidth)
3669 return Error(LSBOp.getStartLoc(),
3670 "expected integer in range [0, 31]");
3671 if (Width < 1 || Width > RegWidth)
3672 return Error(WidthOp.getStartLoc(),
3673 "expected integer in range [1, 32]");
3677 ImmR = (32 - LSB) & 0x1f;
3679 ImmR = (64 - LSB) & 0x3f;
3681 uint64_t ImmS = Width - 1;
3683 if (ImmR != 0 && ImmS >= ImmR)
3684 return Error(WidthOp.getStartLoc(),
3685 "requested insert overflows register");
3687 const MCExpr *ImmRExpr = MCConstantExpr::Create(ImmR, getContext());
3688 const MCExpr *ImmSExpr = MCConstantExpr::Create(ImmS, getContext());
3689 Operands[0] = AArch64Operand::CreateToken(
3690 "bfm", false, Op.getStartLoc(), getContext());
3691 Operands[2] = AArch64Operand::CreateReg(
3692 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3693 SMLoc(), getContext());
3694 Operands[3] = AArch64Operand::CreateImm(
3695 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3696 Operands.emplace_back(
3697 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3698 WidthOp.getEndLoc(), getContext()));
3701 } else if (NumOperands == 5) {
3702 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3703 // UBFIZ -> UBFM aliases.
3704 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3705 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3706 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3707 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3709 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3710 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3711 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3713 if (Op3CE && Op4CE) {
3714 uint64_t Op3Val = Op3CE->getValue();
3715 uint64_t Op4Val = Op4CE->getValue();
3717 uint64_t RegWidth = 0;
3718 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3724 if (Op3Val >= RegWidth)
3725 return Error(Op3.getStartLoc(),
3726 "expected integer in range [0, 31]");
3727 if (Op4Val < 1 || Op4Val > RegWidth)
3728 return Error(Op4.getStartLoc(),
3729 "expected integer in range [1, 32]");
3731 uint64_t NewOp3Val = 0;
3733 NewOp3Val = (32 - Op3Val) & 0x1f;
3735 NewOp3Val = (64 - Op3Val) & 0x3f;
3737 uint64_t NewOp4Val = Op4Val - 1;
3739 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3740 return Error(Op4.getStartLoc(),
3741 "requested insert overflows register");
3743 const MCExpr *NewOp3 =
3744 MCConstantExpr::Create(NewOp3Val, getContext());
3745 const MCExpr *NewOp4 =
3746 MCConstantExpr::Create(NewOp4Val, getContext());
3747 Operands[3] = AArch64Operand::CreateImm(
3748 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3749 Operands[4] = AArch64Operand::CreateImm(
3750 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3752 Operands[0] = AArch64Operand::CreateToken(
3753 "bfm", false, Op.getStartLoc(), getContext());
3754 else if (Tok == "sbfiz")
3755 Operands[0] = AArch64Operand::CreateToken(
3756 "sbfm", false, Op.getStartLoc(), getContext());
3757 else if (Tok == "ubfiz")
3758 Operands[0] = AArch64Operand::CreateToken(
3759 "ubfm", false, Op.getStartLoc(), getContext());
3761 llvm_unreachable("No valid mnemonic for alias?");
3765 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3766 // UBFX -> UBFM aliases.
3767 } else if (NumOperands == 5 &&
3768 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3769 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3770 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3771 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3773 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3774 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3775 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3777 if (Op3CE && Op4CE) {
3778 uint64_t Op3Val = Op3CE->getValue();
3779 uint64_t Op4Val = Op4CE->getValue();
3781 uint64_t RegWidth = 0;
3782 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3788 if (Op3Val >= RegWidth)
3789 return Error(Op3.getStartLoc(),
3790 "expected integer in range [0, 31]");
3791 if (Op4Val < 1 || Op4Val > RegWidth)
3792 return Error(Op4.getStartLoc(),
3793 "expected integer in range [1, 32]");
3795 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3797 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3798 return Error(Op4.getStartLoc(),
3799 "requested extract overflows register");
3801 const MCExpr *NewOp4 =
3802 MCConstantExpr::Create(NewOp4Val, getContext());
3803 Operands[4] = AArch64Operand::CreateImm(
3804 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3806 Operands[0] = AArch64Operand::CreateToken(
3807 "bfm", false, Op.getStartLoc(), getContext());
3808 else if (Tok == "sbfx")
3809 Operands[0] = AArch64Operand::CreateToken(
3810 "sbfm", false, Op.getStartLoc(), getContext());
3811 else if (Tok == "ubfx")
3812 Operands[0] = AArch64Operand::CreateToken(
3813 "ubfm", false, Op.getStartLoc(), getContext());
3815 llvm_unreachable("No valid mnemonic for alias?");
3820 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3821 // InstAlias can't quite handle this since the reg classes aren't
3823 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3824 // The source register can be Wn here, but the matcher expects a
3825 // GPR64. Twiddle it here if necessary.
3826 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3828 unsigned Reg = getXRegFromWReg(Op.getReg());
3829 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3830 Op.getEndLoc(), getContext());
3833 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3834 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3835 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3837 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3839 // The source register can be Wn here, but the matcher expects a
3840 // GPR64. Twiddle it here if necessary.
3841 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3843 unsigned Reg = getXRegFromWReg(Op.getReg());
3844 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3845 Op.getEndLoc(), getContext());
3849 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3850 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3851 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3853 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3855 // The source register can be Wn here, but the matcher expects a
3856 // GPR32. Twiddle it here if necessary.
3857 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3859 unsigned Reg = getWRegFromXReg(Op.getReg());
3860 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3861 Op.getEndLoc(), getContext());
3866 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3867 if (NumOperands == 3 && Tok == "fmov") {
3868 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3869 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3870 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3872 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3876 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3877 Op.getEndLoc(), getContext());
3882 // First try to match against the secondary set of tables containing the
3883 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3884 unsigned MatchResult =
3885 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3887 // If that fails, try against the alternate table containing long-form NEON:
3888 // "fadd v0.2s, v1.2s, v2.2s"
3889 if (MatchResult != Match_Success)
3891 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3893 switch (MatchResult) {
3894 case Match_Success: {
3895 // Perform range checking and other semantic validations
3896 SmallVector<SMLoc, 8> OperandLocs;
3897 NumOperands = Operands.size();
3898 for (unsigned i = 1; i < NumOperands; ++i)
3899 OperandLocs.push_back(Operands[i]->getStartLoc());
3900 if (validateInstruction(Inst, OperandLocs))
3904 Out.EmitInstruction(Inst, STI);
3907 case Match_MissingFeature: {
3908 assert(ErrorInfo && "Unknown missing feature!");
3909 // Special case the error message for the very common case where only
3910 // a single subtarget feature is missing (neon, e.g.).
3911 std::string Msg = "instruction requires:";
3913 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3914 if (ErrorInfo & Mask) {
3916 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3920 return Error(IDLoc, Msg);
3922 case Match_MnemonicFail:
3923 return showMatchError(IDLoc, MatchResult);
3924 case Match_InvalidOperand: {
3925 SMLoc ErrorLoc = IDLoc;
3926 if (ErrorInfo != ~0ULL) {
3927 if (ErrorInfo >= Operands.size())
3928 return Error(IDLoc, "too few operands for instruction");
3930 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3931 if (ErrorLoc == SMLoc())
3934 // If the match failed on a suffix token operand, tweak the diagnostic
3936 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3937 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3938 MatchResult = Match_InvalidSuffix;
3940 return showMatchError(ErrorLoc, MatchResult);
3942 case Match_InvalidMemoryIndexed1:
3943 case Match_InvalidMemoryIndexed2:
3944 case Match_InvalidMemoryIndexed4:
3945 case Match_InvalidMemoryIndexed8:
3946 case Match_InvalidMemoryIndexed16:
3947 case Match_InvalidCondCode:
3948 case Match_AddSubRegExtendSmall:
3949 case Match_AddSubRegExtendLarge:
3950 case Match_AddSubSecondSource:
3951 case Match_LogicalSecondSource:
3952 case Match_AddSubRegShift32:
3953 case Match_AddSubRegShift64:
3954 case Match_InvalidMovImm32Shift:
3955 case Match_InvalidMovImm64Shift:
3956 case Match_InvalidFPImm:
3957 case Match_InvalidMemoryWExtend8:
3958 case Match_InvalidMemoryWExtend16:
3959 case Match_InvalidMemoryWExtend32:
3960 case Match_InvalidMemoryWExtend64:
3961 case Match_InvalidMemoryWExtend128:
3962 case Match_InvalidMemoryXExtend8:
3963 case Match_InvalidMemoryXExtend16:
3964 case Match_InvalidMemoryXExtend32:
3965 case Match_InvalidMemoryXExtend64:
3966 case Match_InvalidMemoryXExtend128:
3967 case Match_InvalidMemoryIndexed4SImm7:
3968 case Match_InvalidMemoryIndexed8SImm7:
3969 case Match_InvalidMemoryIndexed16SImm7:
3970 case Match_InvalidMemoryIndexedSImm9:
3971 case Match_InvalidImm0_7:
3972 case Match_InvalidImm0_15:
3973 case Match_InvalidImm0_31:
3974 case Match_InvalidImm0_63:
3975 case Match_InvalidImm0_127:
3976 case Match_InvalidImm0_65535:
3977 case Match_InvalidImm1_8:
3978 case Match_InvalidImm1_16:
3979 case Match_InvalidImm1_32:
3980 case Match_InvalidImm1_64:
3981 case Match_InvalidIndex1:
3982 case Match_InvalidIndexB:
3983 case Match_InvalidIndexH:
3984 case Match_InvalidIndexS:
3985 case Match_InvalidIndexD:
3986 case Match_InvalidLabel:
3989 if (ErrorInfo >= Operands.size())
3990 return Error(IDLoc, "too few operands for instruction");
3991 // Any time we get here, there's nothing fancy to do. Just get the
3992 // operand SMLoc and display the diagnostic.
3993 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3994 if (ErrorLoc == SMLoc())
3996 return showMatchError(ErrorLoc, MatchResult);
4000 llvm_unreachable("Implement any new match types added!");
4003 /// ParseDirective parses the arm specific directives
4004 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4005 const MCObjectFileInfo::Environment Format =
4006 getContext().getObjectFileInfo()->getObjectFileType();
4007 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4008 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4010 StringRef IDVal = DirectiveID.getIdentifier();
4011 SMLoc Loc = DirectiveID.getLoc();
4012 if (IDVal == ".hword")
4013 return parseDirectiveWord(2, Loc);
4014 if (IDVal == ".word")
4015 return parseDirectiveWord(4, Loc);
4016 if (IDVal == ".xword")
4017 return parseDirectiveWord(8, Loc);
4018 if (IDVal == ".tlsdesccall")
4019 return parseDirectiveTLSDescCall(Loc);
4020 if (IDVal == ".ltorg" || IDVal == ".pool")
4021 return parseDirectiveLtorg(Loc);
4022 if (IDVal == ".unreq")
4023 return parseDirectiveUnreq(DirectiveID.getLoc());
4025 if (!IsMachO && !IsCOFF) {
4026 if (IDVal == ".inst")
4027 return parseDirectiveInst(Loc);
4030 return parseDirectiveLOH(IDVal, Loc);
4033 /// parseDirectiveWord
4034 /// ::= .word [ expression (, expression)* ]
4035 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4036 MCAsmParser &Parser = getParser();
4037 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4039 const MCExpr *Value;
4040 if (getParser().parseExpression(Value))
4043 getParser().getStreamer().EmitValue(Value, Size);
4045 if (getLexer().is(AsmToken::EndOfStatement))
4048 // FIXME: Improve diagnostic.
4049 if (getLexer().isNot(AsmToken::Comma))
4050 return Error(L, "unexpected token in directive");
4059 /// parseDirectiveInst
4060 /// ::= .inst opcode [, ...]
4061 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4062 MCAsmParser &Parser = getParser();
4063 if (getLexer().is(AsmToken::EndOfStatement)) {
4064 Parser.eatToEndOfStatement();
4065 Error(Loc, "expected expression following directive");
4072 if (getParser().parseExpression(Expr)) {
4073 Error(Loc, "expected expression");
4077 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4079 Error(Loc, "expected constant expression");
4083 getTargetStreamer().emitInst(Value->getValue());
4085 if (getLexer().is(AsmToken::EndOfStatement))
4088 if (getLexer().isNot(AsmToken::Comma)) {
4089 Error(Loc, "unexpected token in directive");
4093 Parser.Lex(); // Eat comma.
4100 // parseDirectiveTLSDescCall:
4101 // ::= .tlsdesccall symbol
4102 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4104 if (getParser().parseIdentifier(Name))
4105 return Error(L, "expected symbol after directive");
4107 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4108 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4109 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4112 Inst.setOpcode(AArch64::TLSDESCCALL);
4113 Inst.addOperand(MCOperand::createExpr(Expr));
4115 getParser().getStreamer().EmitInstruction(Inst, STI);
4119 /// ::= .loh <lohName | lohId> label1, ..., labelN
4120 /// The number of arguments depends on the loh identifier.
4121 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4122 if (IDVal != MCLOHDirectiveName())
4125 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4126 if (getParser().getTok().isNot(AsmToken::Integer))
4127 return TokError("expected an identifier or a number in directive");
4128 // We successfully get a numeric value for the identifier.
4129 // Check if it is valid.
4130 int64_t Id = getParser().getTok().getIntVal();
4131 if (Id <= -1U && !isValidMCLOHType(Id))
4132 return TokError("invalid numeric identifier in directive");
4133 Kind = (MCLOHType)Id;
4135 StringRef Name = getTok().getIdentifier();
4136 // We successfully parse an identifier.
4137 // Check if it is a recognized one.
4138 int Id = MCLOHNameToId(Name);
4141 return TokError("invalid identifier in directive");
4142 Kind = (MCLOHType)Id;
4144 // Consume the identifier.
4146 // Get the number of arguments of this LOH.
4147 int NbArgs = MCLOHIdToNbArgs(Kind);
4149 assert(NbArgs != -1 && "Invalid number of arguments");
4151 SmallVector<MCSymbol *, 3> Args;
4152 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4154 if (getParser().parseIdentifier(Name))
4155 return TokError("expected identifier in directive");
4156 Args.push_back(getContext().getOrCreateSymbol(Name));
4158 if (Idx + 1 == NbArgs)
4160 if (getLexer().isNot(AsmToken::Comma))
4161 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4164 if (getLexer().isNot(AsmToken::EndOfStatement))
4165 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4167 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4171 /// parseDirectiveLtorg
4172 /// ::= .ltorg | .pool
4173 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4174 getTargetStreamer().emitCurrentConstantPool();
4178 /// parseDirectiveReq
4179 /// ::= name .req registername
4180 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4181 MCAsmParser &Parser = getParser();
4182 Parser.Lex(); // Eat the '.req' token.
4183 SMLoc SRegLoc = getLoc();
4184 unsigned RegNum = tryParseRegister();
4185 bool IsVector = false;
4187 if (RegNum == static_cast<unsigned>(-1)) {
4189 RegNum = tryMatchVectorRegister(Kind, false);
4190 if (!Kind.empty()) {
4191 Error(SRegLoc, "vector register without type specifier expected");
4197 if (RegNum == static_cast<unsigned>(-1)) {
4198 Parser.eatToEndOfStatement();
4199 Error(SRegLoc, "register name or alias expected");
4203 // Shouldn't be anything else.
4204 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4205 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4206 Parser.eatToEndOfStatement();
4210 Parser.Lex(); // Consume the EndOfStatement
4212 auto pair = std::make_pair(IsVector, RegNum);
4213 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4214 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4219 /// parseDirectiveUneq
4220 /// ::= .unreq registername
4221 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4222 MCAsmParser &Parser = getParser();
4223 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4224 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4225 Parser.eatToEndOfStatement();
4228 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4229 Parser.Lex(); // Eat the identifier.
4234 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4235 AArch64MCExpr::VariantKind &ELFRefKind,
4236 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4238 ELFRefKind = AArch64MCExpr::VK_INVALID;
4239 DarwinRefKind = MCSymbolRefExpr::VK_None;
4242 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4243 ELFRefKind = AE->getKind();
4244 Expr = AE->getSubExpr();
4247 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4249 // It's a simple symbol reference with no addend.
4250 DarwinRefKind = SE->getKind();
4254 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4258 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4261 DarwinRefKind = SE->getKind();
4263 if (BE->getOpcode() != MCBinaryExpr::Add &&
4264 BE->getOpcode() != MCBinaryExpr::Sub)
4267 // See if the addend is is a constant, otherwise there's more going
4268 // on here than we can deal with.
4269 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4273 Addend = AddendExpr->getValue();
4274 if (BE->getOpcode() == MCBinaryExpr::Sub)
4277 // It's some symbol reference + a constant addend, but really
4278 // shouldn't use both Darwin and ELF syntax.
4279 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4280 DarwinRefKind == MCSymbolRefExpr::VK_None;
4283 /// Force static initialization.
4284 extern "C" void LLVMInitializeAArch64AsmParser() {
4285 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4286 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4287 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4290 #define GET_REGISTER_MATCHER
4291 #define GET_SUBTARGET_FEATURE_NAME
4292 #define GET_MATCHER_IMPLEMENTATION
4293 #include "AArch64GenAsmMatcher.inc"
4295 // Define this matcher function after the auto-generated include so we
4296 // have the match class enum definitions.
4297 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4299 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4300 // If the kind is a token for a literal immediate, check if our asm
4301 // operand matches. This is for InstAliases which have a fixed-value
4302 // immediate in the syntax.
4303 int64_t ExpectedVal;
4306 return Match_InvalidOperand;
4348 return Match_InvalidOperand;
4349 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4351 return Match_InvalidOperand;
4352 if (CE->getValue() == ExpectedVal)
4353 return Match_Success;
4354 return Match_InvalidOperand;