1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
113 enum AArch64MatchResultTy {
114 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
118 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
119 const MCInstrInfo &MII, const MCTargetOptions &Options)
120 : MCTargetAsmParser(Options, STI) {
121 MCAsmParserExtension::Initialize(Parser);
122 MCStreamer &S = getParser().getStreamer();
123 if (S.getTargetStreamer() == nullptr)
124 new AArch64TargetStreamer(S);
126 // Initialize the set of available features.
127 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
130 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131 SMLoc NameLoc, OperandVector &Operands) override;
132 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133 bool ParseDirective(AsmToken DirectiveID) override;
134 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135 unsigned Kind) override;
137 static bool classifySymbolRef(const MCExpr *Expr,
138 AArch64MCExpr::VariantKind &ELFRefKind,
139 MCSymbolRefExpr::VariantKind &DarwinRefKind,
142 } // end anonymous namespace
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
148 class AArch64Operand : public MCParsedAsmOperand {
167 SMLoc StartLoc, EndLoc;
172 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
180 struct VectorListOp {
183 unsigned NumElements;
184 unsigned ElementKind;
187 struct VectorIndexOp {
195 struct ShiftedImmOp {
197 unsigned ShiftAmount;
201 AArch64CC::CondCode Code;
205 unsigned Val; // Encoded 8-bit representation.
209 unsigned Val; // Not the enum since not all values have names.
219 uint32_t PStateField;
238 struct ShiftExtendOp {
239 AArch64_AM::ShiftExtendType Type;
241 bool HasExplicitAmount;
251 struct VectorListOp VectorList;
252 struct VectorIndexOp VectorIndex;
254 struct ShiftedImmOp ShiftedImm;
255 struct CondCodeOp CondCode;
256 struct FPImmOp FPImm;
257 struct BarrierOp Barrier;
258 struct SysRegOp SysReg;
259 struct SysCRImmOp SysCRImm;
260 struct PrefetchOp Prefetch;
261 struct PSBHintOp PSBHint;
262 struct ShiftExtendOp ShiftExtend;
265 // Keep the MCContext around as the MCExprs may need manipulated during
266 // the add<>Operands() calls.
270 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
272 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
274 StartLoc = o.StartLoc;
284 ShiftedImm = o.ShiftedImm;
287 CondCode = o.CondCode;
299 VectorList = o.VectorList;
302 VectorIndex = o.VectorIndex;
308 SysCRImm = o.SysCRImm;
311 Prefetch = o.Prefetch;
317 ShiftExtend = o.ShiftExtend;
322 /// getStartLoc - Get the location of the first token of this operand.
323 SMLoc getStartLoc() const override { return StartLoc; }
324 /// getEndLoc - Get the location of the last token of this operand.
325 SMLoc getEndLoc() const override { return EndLoc; }
327 StringRef getToken() const {
328 assert(Kind == k_Token && "Invalid access!");
329 return StringRef(Tok.Data, Tok.Length);
332 bool isTokenSuffix() const {
333 assert(Kind == k_Token && "Invalid access!");
337 const MCExpr *getImm() const {
338 assert(Kind == k_Immediate && "Invalid access!");
342 const MCExpr *getShiftedImmVal() const {
343 assert(Kind == k_ShiftedImm && "Invalid access!");
344 return ShiftedImm.Val;
347 unsigned getShiftedImmShift() const {
348 assert(Kind == k_ShiftedImm && "Invalid access!");
349 return ShiftedImm.ShiftAmount;
352 AArch64CC::CondCode getCondCode() const {
353 assert(Kind == k_CondCode && "Invalid access!");
354 return CondCode.Code;
357 unsigned getFPImm() const {
358 assert(Kind == k_FPImm && "Invalid access!");
362 unsigned getBarrier() const {
363 assert(Kind == k_Barrier && "Invalid access!");
367 StringRef getBarrierName() const {
368 assert(Kind == k_Barrier && "Invalid access!");
369 return StringRef(Barrier.Data, Barrier.Length);
372 unsigned getReg() const override {
373 assert(Kind == k_Register && "Invalid access!");
377 unsigned getVectorListStart() const {
378 assert(Kind == k_VectorList && "Invalid access!");
379 return VectorList.RegNum;
382 unsigned getVectorListCount() const {
383 assert(Kind == k_VectorList && "Invalid access!");
384 return VectorList.Count;
387 unsigned getVectorIndex() const {
388 assert(Kind == k_VectorIndex && "Invalid access!");
389 return VectorIndex.Val;
392 StringRef getSysReg() const {
393 assert(Kind == k_SysReg && "Invalid access!");
394 return StringRef(SysReg.Data, SysReg.Length);
397 unsigned getSysCR() const {
398 assert(Kind == k_SysCR && "Invalid access!");
402 unsigned getPrefetch() const {
403 assert(Kind == k_Prefetch && "Invalid access!");
407 unsigned getPSBHint() const {
408 assert(Kind == k_PSBHint && "Invalid access!");
412 StringRef getPSBHintName() const {
413 assert(Kind == k_PSBHint && "Invalid access!");
414 return StringRef(PSBHint.Data, PSBHint.Length);
417 StringRef getPrefetchName() const {
418 assert(Kind == k_Prefetch && "Invalid access!");
419 return StringRef(Prefetch.Data, Prefetch.Length);
422 AArch64_AM::ShiftExtendType getShiftExtendType() const {
423 assert(Kind == k_ShiftExtend && "Invalid access!");
424 return ShiftExtend.Type;
427 unsigned getShiftExtendAmount() const {
428 assert(Kind == k_ShiftExtend && "Invalid access!");
429 return ShiftExtend.Amount;
432 bool hasShiftExtendAmount() const {
433 assert(Kind == k_ShiftExtend && "Invalid access!");
434 return ShiftExtend.HasExplicitAmount;
437 bool isImm() const override { return Kind == k_Immediate; }
438 bool isMem() const override { return false; }
439 bool isSImm9() const {
442 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
445 int64_t Val = MCE->getValue();
446 return (Val >= -256 && Val < 256);
448 bool isSImm7s4() const {
451 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
454 int64_t Val = MCE->getValue();
455 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
457 bool isSImm7s8() const {
460 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
463 int64_t Val = MCE->getValue();
464 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
466 bool isSImm7s16() const {
469 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
472 int64_t Val = MCE->getValue();
473 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
476 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
477 AArch64MCExpr::VariantKind ELFRefKind;
478 MCSymbolRefExpr::VariantKind DarwinRefKind;
480 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
482 // If we don't understand the expression, assume the best and
483 // let the fixup and relocation code deal with it.
487 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
488 ELFRefKind == AArch64MCExpr::VK_LO12 ||
489 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
490 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
491 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
492 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
493 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
494 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
495 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
496 // Note that we don't range-check the addend. It's adjusted modulo page
497 // size when converted, so there is no "out of range" condition when using
499 return Addend >= 0 && (Addend % Scale) == 0;
500 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
501 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
502 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
509 template <int Scale> bool isUImm12Offset() const {
513 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 return isSymbolicUImm12Offset(getImm(), Scale);
517 int64_t Val = MCE->getValue();
518 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
521 bool isImm0_1() const {
524 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
527 int64_t Val = MCE->getValue();
528 return (Val >= 0 && Val < 2);
530 bool isImm0_7() const {
533 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
536 int64_t Val = MCE->getValue();
537 return (Val >= 0 && Val < 8);
539 bool isImm1_8() const {
542 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
545 int64_t Val = MCE->getValue();
546 return (Val > 0 && Val < 9);
548 bool isImm0_15() const {
551 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
554 int64_t Val = MCE->getValue();
555 return (Val >= 0 && Val < 16);
557 bool isImm1_16() const {
560 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563 int64_t Val = MCE->getValue();
564 return (Val > 0 && Val < 17);
566 bool isImm0_31() const {
569 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
572 int64_t Val = MCE->getValue();
573 return (Val >= 0 && Val < 32);
575 bool isImm1_31() const {
578 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
581 int64_t Val = MCE->getValue();
582 return (Val >= 1 && Val < 32);
584 bool isImm1_32() const {
587 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
590 int64_t Val = MCE->getValue();
591 return (Val >= 1 && Val < 33);
593 bool isImm0_63() const {
596 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
599 int64_t Val = MCE->getValue();
600 return (Val >= 0 && Val < 64);
602 bool isImm1_63() const {
605 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
608 int64_t Val = MCE->getValue();
609 return (Val >= 1 && Val < 64);
611 bool isImm1_64() const {
614 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
617 int64_t Val = MCE->getValue();
618 return (Val >= 1 && Val < 65);
620 bool isImm0_127() const {
623 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
626 int64_t Val = MCE->getValue();
627 return (Val >= 0 && Val < 128);
629 bool isImm0_255() const {
632 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635 int64_t Val = MCE->getValue();
636 return (Val >= 0 && Val < 256);
638 bool isImm0_65535() const {
641 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
644 int64_t Val = MCE->getValue();
645 return (Val >= 0 && Val < 65536);
647 bool isImm32_63() const {
650 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
653 int64_t Val = MCE->getValue();
654 return (Val >= 32 && Val < 64);
656 bool isLogicalImm32() const {
659 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
662 int64_t Val = MCE->getValue();
663 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
666 return AArch64_AM::isLogicalImmediate(Val, 32);
668 bool isLogicalImm64() const {
671 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
676 bool isLogicalImm32Not() const {
679 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
682 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
683 return AArch64_AM::isLogicalImmediate(Val, 32);
685 bool isLogicalImm64Not() const {
688 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
691 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
693 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
694 bool isAddSubImm() const {
695 if (!isShiftedImm() && !isImm())
700 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
701 if (isShiftedImm()) {
702 unsigned Shift = ShiftedImm.ShiftAmount;
703 Expr = ShiftedImm.Val;
704 if (Shift != 0 && Shift != 12)
710 AArch64MCExpr::VariantKind ELFRefKind;
711 MCSymbolRefExpr::VariantKind DarwinRefKind;
713 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
714 DarwinRefKind, Addend)) {
715 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
716 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
717 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
718 || ELFRefKind == AArch64MCExpr::VK_LO12
719 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
720 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
721 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
722 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
723 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
724 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
725 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
728 // Otherwise it should be a real immediate in range:
729 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
730 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
732 bool isAddSubImmNeg() const {
733 if (!isShiftedImm() && !isImm())
738 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
739 if (isShiftedImm()) {
740 unsigned Shift = ShiftedImm.ShiftAmount;
741 Expr = ShiftedImm.Val;
742 if (Shift != 0 && Shift != 12)
747 // Otherwise it should be a real negative immediate in range:
748 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
749 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
751 bool isCondCode() const { return Kind == k_CondCode; }
752 bool isSIMDImmType10() const {
755 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
758 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
760 bool isBranchTarget26() const {
763 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
766 int64_t Val = MCE->getValue();
769 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
771 bool isPCRelLabel19() const {
774 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
777 int64_t Val = MCE->getValue();
780 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
782 bool isBranchTarget14() const {
785 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
788 int64_t Val = MCE->getValue();
791 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
795 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
799 AArch64MCExpr::VariantKind ELFRefKind;
800 MCSymbolRefExpr::VariantKind DarwinRefKind;
802 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
803 DarwinRefKind, Addend)) {
806 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
809 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
810 if (ELFRefKind == AllowedModifiers[i])
817 bool isMovZSymbolG3() const {
818 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
821 bool isMovZSymbolG2() const {
822 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
823 AArch64MCExpr::VK_TPREL_G2,
824 AArch64MCExpr::VK_DTPREL_G2});
827 bool isMovZSymbolG1() const {
828 return isMovWSymbol({
829 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
830 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
831 AArch64MCExpr::VK_DTPREL_G1,
835 bool isMovZSymbolG0() const {
836 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
837 AArch64MCExpr::VK_TPREL_G0,
838 AArch64MCExpr::VK_DTPREL_G0});
841 bool isMovKSymbolG3() const {
842 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
845 bool isMovKSymbolG2() const {
846 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
849 bool isMovKSymbolG1() const {
850 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
851 AArch64MCExpr::VK_TPREL_G1_NC,
852 AArch64MCExpr::VK_DTPREL_G1_NC});
855 bool isMovKSymbolG0() const {
857 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
858 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
861 template<int RegWidth, int Shift>
862 bool isMOVZMovAlias() const {
863 if (!isImm()) return false;
865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
866 if (!CE) return false;
867 uint64_t Value = CE->getValue();
870 Value &= 0xffffffffULL;
872 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
873 if (Value == 0 && Shift != 0)
876 return (Value & ~(0xffffULL << Shift)) == 0;
879 template<int RegWidth, int Shift>
880 bool isMOVNMovAlias() const {
881 if (!isImm()) return false;
883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
884 if (!CE) return false;
885 uint64_t Value = CE->getValue();
887 // MOVZ takes precedence over MOVN.
888 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
889 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
894 Value &= 0xffffffffULL;
896 return (Value & ~(0xffffULL << Shift)) == 0;
899 bool isFPImm() const { return Kind == k_FPImm; }
900 bool isBarrier() const { return Kind == k_Barrier; }
901 bool isSysReg() const { return Kind == k_SysReg; }
902 bool isMRSSystemRegister() const {
903 if (!isSysReg()) return false;
905 return SysReg.MRSReg != -1U;
907 bool isMSRSystemRegister() const {
908 if (!isSysReg()) return false;
909 return SysReg.MSRReg != -1U;
911 bool isSystemPStateFieldWithImm0_1() const {
912 if (!isSysReg()) return false;
913 return (SysReg.PStateField == AArch64PState::PAN ||
914 SysReg.PStateField == AArch64PState::UAO);
916 bool isSystemPStateFieldWithImm0_15() const {
917 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
918 return SysReg.PStateField != -1U;
920 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
921 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
922 bool isVectorRegLo() const {
923 return Kind == k_Register && Reg.isVector &&
924 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
927 bool isGPR32as64() const {
928 return Kind == k_Register && !Reg.isVector &&
929 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
931 bool isWSeqPair() const {
932 return Kind == k_Register && !Reg.isVector &&
933 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
936 bool isXSeqPair() const {
937 return Kind == k_Register && !Reg.isVector &&
938 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
942 bool isGPR64sp0() const {
943 return Kind == k_Register && !Reg.isVector &&
944 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
947 /// Is this a vector list with the type implicit (presumably attached to the
948 /// instruction itself)?
949 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
950 return Kind == k_VectorList && VectorList.Count == NumRegs &&
951 !VectorList.ElementKind;
954 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
955 bool isTypedVectorList() const {
956 if (Kind != k_VectorList)
958 if (VectorList.Count != NumRegs)
960 if (VectorList.ElementKind != ElementKind)
962 return VectorList.NumElements == NumElements;
965 bool isVectorIndex1() const {
966 return Kind == k_VectorIndex && VectorIndex.Val == 1;
968 bool isVectorIndexB() const {
969 return Kind == k_VectorIndex && VectorIndex.Val < 16;
971 bool isVectorIndexH() const {
972 return Kind == k_VectorIndex && VectorIndex.Val < 8;
974 bool isVectorIndexS() const {
975 return Kind == k_VectorIndex && VectorIndex.Val < 4;
977 bool isVectorIndexD() const {
978 return Kind == k_VectorIndex && VectorIndex.Val < 2;
980 bool isToken() const override { return Kind == k_Token; }
981 bool isTokenEqual(StringRef Str) const {
982 return Kind == k_Token && getToken() == Str;
984 bool isSysCR() const { return Kind == k_SysCR; }
985 bool isPrefetch() const { return Kind == k_Prefetch; }
986 bool isPSBHint() const { return Kind == k_PSBHint; }
987 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
988 bool isShifter() const {
989 if (!isShiftExtend())
992 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
993 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
994 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
995 ST == AArch64_AM::MSL);
997 bool isExtend() const {
998 if (!isShiftExtend())
1001 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1002 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1003 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1004 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1005 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1006 ET == AArch64_AM::LSL) &&
1007 getShiftExtendAmount() <= 4;
1010 bool isExtend64() const {
1013 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1014 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1015 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1017 bool isExtendLSL64() const {
1020 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1021 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1022 ET == AArch64_AM::LSL) &&
1023 getShiftExtendAmount() <= 4;
1026 template<int Width> bool isMemXExtend() const {
1029 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1030 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1031 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1032 getShiftExtendAmount() == 0);
1035 template<int Width> bool isMemWExtend() const {
1038 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1039 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1040 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1041 getShiftExtendAmount() == 0);
1044 template <unsigned width>
1045 bool isArithmeticShifter() const {
1049 // An arithmetic shifter is LSL, LSR, or ASR.
1050 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1051 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1052 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1055 template <unsigned width>
1056 bool isLogicalShifter() const {
1060 // A logical shifter is LSL, LSR, ASR or ROR.
1061 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1062 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1063 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1064 getShiftExtendAmount() < width;
1067 bool isMovImm32Shifter() const {
1071 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1072 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1073 if (ST != AArch64_AM::LSL)
1075 uint64_t Val = getShiftExtendAmount();
1076 return (Val == 0 || Val == 16);
1079 bool isMovImm64Shifter() const {
1083 // A MOVi shifter is LSL of 0 or 16.
1084 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1085 if (ST != AArch64_AM::LSL)
1087 uint64_t Val = getShiftExtendAmount();
1088 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1091 bool isLogicalVecShifter() const {
1095 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1096 unsigned Shift = getShiftExtendAmount();
1097 return getShiftExtendType() == AArch64_AM::LSL &&
1098 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1101 bool isLogicalVecHalfWordShifter() const {
1102 if (!isLogicalVecShifter())
1105 // A logical vector shifter is a left shift by 0 or 8.
1106 unsigned Shift = getShiftExtendAmount();
1107 return getShiftExtendType() == AArch64_AM::LSL &&
1108 (Shift == 0 || Shift == 8);
1111 bool isMoveVecShifter() const {
1112 if (!isShiftExtend())
1115 // A logical vector shifter is a left shift by 8 or 16.
1116 unsigned Shift = getShiftExtendAmount();
1117 return getShiftExtendType() == AArch64_AM::MSL &&
1118 (Shift == 8 || Shift == 16);
1121 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1122 // to LDUR/STUR when the offset is not legal for the former but is for
1123 // the latter. As such, in addition to checking for being a legal unscaled
1124 // address, also check that it is not a legal scaled address. This avoids
1125 // ambiguity in the matcher.
1127 bool isSImm9OffsetFB() const {
1128 return isSImm9() && !isUImm12Offset<Width / 8>();
1131 bool isAdrpLabel() const {
1132 // Validation was handled during parsing, so we just sanity check that
1133 // something didn't go haywire.
1137 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1138 int64_t Val = CE->getValue();
1139 int64_t Min = - (4096 * (1LL << (21 - 1)));
1140 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1141 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1147 bool isAdrLabel() const {
1148 // Validation was handled during parsing, so we just sanity check that
1149 // something didn't go haywire.
1153 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1154 int64_t Val = CE->getValue();
1155 int64_t Min = - (1LL << (21 - 1));
1156 int64_t Max = ((1LL << (21 - 1)) - 1);
1157 return Val >= Min && Val <= Max;
1163 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1164 // Add as immediates when possible. Null MCExpr = 0.
1166 Inst.addOperand(MCOperand::createImm(0));
1167 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1168 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1170 Inst.addOperand(MCOperand::createExpr(Expr));
1173 void addRegOperands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1175 Inst.addOperand(MCOperand::createReg(getReg()));
1178 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!");
1181 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1183 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1184 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1185 RI->getEncodingValue(getReg()));
1187 Inst.addOperand(MCOperand::createReg(Reg));
1190 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1193 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1194 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1197 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1198 assert(N == 1 && "Invalid number of operands!");
1200 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1201 Inst.addOperand(MCOperand::createReg(getReg()));
1204 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1205 assert(N == 1 && "Invalid number of operands!");
1206 Inst.addOperand(MCOperand::createReg(getReg()));
1209 template <unsigned NumRegs>
1210 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 static const unsigned FirstRegs[] = { AArch64::D0,
1215 AArch64::D0_D1_D2_D3 };
1216 unsigned FirstReg = FirstRegs[NumRegs - 1];
1219 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1222 template <unsigned NumRegs>
1223 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1224 assert(N == 1 && "Invalid number of operands!");
1225 static const unsigned FirstRegs[] = { AArch64::Q0,
1228 AArch64::Q0_Q1_Q2_Q3 };
1229 unsigned FirstReg = FirstRegs[NumRegs - 1];
1232 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1235 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1236 assert(N == 1 && "Invalid number of operands!");
1237 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1240 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1245 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1246 assert(N == 1 && "Invalid number of operands!");
1247 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1250 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1251 assert(N == 1 && "Invalid number of operands!");
1252 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1255 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
1257 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1260 void addImmOperands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 // If this is a pageoff symrefexpr with an addend, adjust the addend
1263 // to be only the page-offset portion. Otherwise, just add the expr
1265 addExpr(Inst, getImm());
1268 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1269 assert(N == 2 && "Invalid number of operands!");
1270 if (isShiftedImm()) {
1271 addExpr(Inst, getShiftedImmVal());
1272 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1274 addExpr(Inst, getImm());
1275 Inst.addOperand(MCOperand::createImm(0));
1279 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1280 assert(N == 2 && "Invalid number of operands!");
1282 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1283 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1284 int64_t Val = -CE->getValue();
1285 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1287 Inst.addOperand(MCOperand::createImm(Val));
1288 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1291 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1292 assert(N == 1 && "Invalid number of operands!");
1293 Inst.addOperand(MCOperand::createImm(getCondCode()));
1296 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1300 addExpr(Inst, getImm());
1302 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1305 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1306 addImmOperands(Inst, N);
1310 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1315 Inst.addOperand(MCOperand::createExpr(getImm()));
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1321 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1327 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1333 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1339 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1345 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1351 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1353 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1357 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1363 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1366 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1369 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1370 assert(N == 1 && "Invalid number of operands!");
1371 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1372 assert(MCE && "Invalid constant immediate operand!");
1373 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1376 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1382 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1383 assert(N == 1 && "Invalid number of operands!");
1384 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1385 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1388 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1389 assert(N == 1 && "Invalid number of operands!");
1390 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1394 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1400 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1406 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1412 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1413 assert(N == 1 && "Invalid number of operands!");
1414 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1415 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1418 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1421 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1424 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1425 assert(N == 1 && "Invalid number of operands!");
1426 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1427 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1430 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1432 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1433 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1436 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1437 assert(N == 1 && "Invalid number of operands!");
1438 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1441 Inst.addOperand(MCOperand::createImm(encoding));
1444 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1447 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1448 Inst.addOperand(MCOperand::createImm(encoding));
1451 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1453 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1454 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1455 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1456 Inst.addOperand(MCOperand::createImm(encoding));
1459 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1460 assert(N == 1 && "Invalid number of operands!");
1461 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1463 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1464 Inst.addOperand(MCOperand::createImm(encoding));
1467 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1469 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1470 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1471 Inst.addOperand(MCOperand::createImm(encoding));
1474 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1475 // Branch operands don't encode the low bits, so shift them off
1476 // here. If it's a label, however, just put it on directly as there's
1477 // not enough information now to do anything.
1478 assert(N == 1 && "Invalid number of operands!");
1479 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1481 addExpr(Inst, getImm());
1484 assert(MCE && "Invalid constant immediate operand!");
1485 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1488 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1489 // Branch operands don't encode the low bits, so shift them off
1490 // here. If it's a label, however, just put it on directly as there's
1491 // not enough information now to do anything.
1492 assert(N == 1 && "Invalid number of operands!");
1493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1495 addExpr(Inst, getImm());
1498 assert(MCE && "Invalid constant immediate operand!");
1499 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1502 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1503 // Branch operands don't encode the low bits, so shift them off
1504 // here. If it's a label, however, just put it on directly as there's
1505 // not enough information now to do anything.
1506 assert(N == 1 && "Invalid number of operands!");
1507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1509 addExpr(Inst, getImm());
1512 assert(MCE && "Invalid constant immediate operand!");
1513 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1516 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1517 assert(N == 1 && "Invalid number of operands!");
1518 Inst.addOperand(MCOperand::createImm(getFPImm()));
1521 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1523 Inst.addOperand(MCOperand::createImm(getBarrier()));
1526 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1527 assert(N == 1 && "Invalid number of operands!");
1529 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1532 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!");
1535 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1538 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!");
1541 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1544 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1545 assert(N == 1 && "Invalid number of operands!");
1547 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1550 void addSysCROperands(MCInst &Inst, unsigned N) const {
1551 assert(N == 1 && "Invalid number of operands!");
1552 Inst.addOperand(MCOperand::createImm(getSysCR()));
1555 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1556 assert(N == 1 && "Invalid number of operands!");
1557 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1560 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1561 assert(N == 1 && "Invalid number of operands!");
1562 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1565 void addShifterOperands(MCInst &Inst, unsigned N) const {
1566 assert(N == 1 && "Invalid number of operands!");
1568 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1569 Inst.addOperand(MCOperand::createImm(Imm));
1572 void addExtendOperands(MCInst &Inst, unsigned N) const {
1573 assert(N == 1 && "Invalid number of operands!");
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1576 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1577 Inst.addOperand(MCOperand::createImm(Imm));
1580 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1581 assert(N == 1 && "Invalid number of operands!");
1582 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1583 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1584 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1585 Inst.addOperand(MCOperand::createImm(Imm));
1588 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1589 assert(N == 2 && "Invalid number of operands!");
1590 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1591 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1592 Inst.addOperand(MCOperand::createImm(IsSigned));
1593 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1596 // For 8-bit load/store instructions with a register offset, both the
1597 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1598 // they're disambiguated by whether the shift was explicit or implicit rather
1600 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1601 assert(N == 2 && "Invalid number of operands!");
1602 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1603 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1604 Inst.addOperand(MCOperand::createImm(IsSigned));
1605 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1609 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1610 assert(N == 1 && "Invalid number of operands!");
1612 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1613 uint64_t Value = CE->getValue();
1614 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1618 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1619 assert(N == 1 && "Invalid number of operands!");
1621 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1622 uint64_t Value = CE->getValue();
1623 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1626 void print(raw_ostream &OS) const override;
1628 static std::unique_ptr<AArch64Operand>
1629 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1630 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1631 Op->Tok.Data = Str.data();
1632 Op->Tok.Length = Str.size();
1633 Op->Tok.IsSuffix = IsSuffix;
1639 static std::unique_ptr<AArch64Operand>
1640 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1641 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1642 Op->Reg.RegNum = RegNum;
1643 Op->Reg.isVector = isVector;
1649 static std::unique_ptr<AArch64Operand>
1650 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1651 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1652 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1653 Op->VectorList.RegNum = RegNum;
1654 Op->VectorList.Count = Count;
1655 Op->VectorList.NumElements = NumElements;
1656 Op->VectorList.ElementKind = ElementKind;
1662 static std::unique_ptr<AArch64Operand>
1663 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1664 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1665 Op->VectorIndex.Val = Idx;
1671 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1672 SMLoc E, MCContext &Ctx) {
1673 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1680 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1681 unsigned ShiftAmount,
1684 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1685 Op->ShiftedImm .Val = Val;
1686 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1692 static std::unique_ptr<AArch64Operand>
1693 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1694 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1695 Op->CondCode.Code = Code;
1701 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1703 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1704 Op->FPImm.Val = Val;
1710 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1714 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1715 Op->Barrier.Val = Val;
1716 Op->Barrier.Data = Str.data();
1717 Op->Barrier.Length = Str.size();
1723 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1726 uint32_t PStateField,
1728 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1729 Op->SysReg.Data = Str.data();
1730 Op->SysReg.Length = Str.size();
1731 Op->SysReg.MRSReg = MRSReg;
1732 Op->SysReg.MSRReg = MSRReg;
1733 Op->SysReg.PStateField = PStateField;
1739 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1740 SMLoc E, MCContext &Ctx) {
1741 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1742 Op->SysCRImm.Val = Val;
1748 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1752 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1753 Op->Prefetch.Val = Val;
1754 Op->Barrier.Data = Str.data();
1755 Op->Barrier.Length = Str.size();
1761 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1765 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1766 Op->PSBHint.Val = Val;
1767 Op->PSBHint.Data = Str.data();
1768 Op->PSBHint.Length = Str.size();
1774 static std::unique_ptr<AArch64Operand>
1775 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1776 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1777 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1778 Op->ShiftExtend.Type = ShOp;
1779 Op->ShiftExtend.Amount = Val;
1780 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1787 } // end anonymous namespace.
1789 void AArch64Operand::print(raw_ostream &OS) const {
1792 OS << "<fpimm " << getFPImm() << "("
1793 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1796 StringRef Name = getBarrierName();
1798 OS << "<barrier " << Name << ">";
1800 OS << "<barrier invalid #" << getBarrier() << ">";
1806 case k_ShiftedImm: {
1807 unsigned Shift = getShiftedImmShift();
1808 OS << "<shiftedimm ";
1809 OS << *getShiftedImmVal();
1810 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1814 OS << "<condcode " << getCondCode() << ">";
1817 OS << "<register " << getReg() << ">";
1819 case k_VectorList: {
1820 OS << "<vectorlist ";
1821 unsigned Reg = getVectorListStart();
1822 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1823 OS << Reg + i << " ";
1828 OS << "<vectorindex " << getVectorIndex() << ">";
1831 OS << "<sysreg: " << getSysReg() << '>';
1834 OS << "'" << getToken() << "'";
1837 OS << "c" << getSysCR();
1840 StringRef Name = getPrefetchName();
1842 OS << "<prfop " << Name << ">";
1844 OS << "<prfop invalid #" << getPrefetch() << ">";
1848 OS << getPSBHintName();
1851 case k_ShiftExtend: {
1852 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1853 << getShiftExtendAmount();
1854 if (!hasShiftExtendAmount())
1862 /// @name Auto-generated Match Functions
1865 static unsigned MatchRegisterName(StringRef Name);
1869 static unsigned matchVectorRegName(StringRef Name) {
1870 return StringSwitch<unsigned>(Name.lower())
1871 .Case("v0", AArch64::Q0)
1872 .Case("v1", AArch64::Q1)
1873 .Case("v2", AArch64::Q2)
1874 .Case("v3", AArch64::Q3)
1875 .Case("v4", AArch64::Q4)
1876 .Case("v5", AArch64::Q5)
1877 .Case("v6", AArch64::Q6)
1878 .Case("v7", AArch64::Q7)
1879 .Case("v8", AArch64::Q8)
1880 .Case("v9", AArch64::Q9)
1881 .Case("v10", AArch64::Q10)
1882 .Case("v11", AArch64::Q11)
1883 .Case("v12", AArch64::Q12)
1884 .Case("v13", AArch64::Q13)
1885 .Case("v14", AArch64::Q14)
1886 .Case("v15", AArch64::Q15)
1887 .Case("v16", AArch64::Q16)
1888 .Case("v17", AArch64::Q17)
1889 .Case("v18", AArch64::Q18)
1890 .Case("v19", AArch64::Q19)
1891 .Case("v20", AArch64::Q20)
1892 .Case("v21", AArch64::Q21)
1893 .Case("v22", AArch64::Q22)
1894 .Case("v23", AArch64::Q23)
1895 .Case("v24", AArch64::Q24)
1896 .Case("v25", AArch64::Q25)
1897 .Case("v26", AArch64::Q26)
1898 .Case("v27", AArch64::Q27)
1899 .Case("v28", AArch64::Q28)
1900 .Case("v29", AArch64::Q29)
1901 .Case("v30", AArch64::Q30)
1902 .Case("v31", AArch64::Q31)
1906 static bool isValidVectorKind(StringRef Name) {
1907 return StringSwitch<bool>(Name.lower())
1917 // Accept the width neutral ones, too, for verbose syntax. If those
1918 // aren't used in the right places, the token operand won't match so
1919 // all will work out.
1924 // Needed for fp16 scalar pairwise reductions
1929 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1930 char &ElementKind) {
1931 assert(isValidVectorKind(Name));
1933 ElementKind = Name.lower()[Name.size() - 1];
1936 if (Name.size() == 2)
1939 // Parse the lane count
1940 Name = Name.drop_front();
1941 while (isdigit(Name.front())) {
1942 NumElements = 10 * NumElements + (Name.front() - '0');
1943 Name = Name.drop_front();
1947 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1949 StartLoc = getLoc();
1950 RegNo = tryParseRegister();
1951 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1952 return (RegNo == (unsigned)-1);
1955 // Matches a register name or register alias previously defined by '.req'
1956 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1958 unsigned RegNum = isVector ? matchVectorRegName(Name)
1959 : MatchRegisterName(Name);
1962 // Check for aliases registered via .req. Canonicalize to lower case.
1963 // That's more consistent since register names are case insensitive, and
1964 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1965 auto Entry = RegisterReqs.find(Name.lower());
1966 if (Entry == RegisterReqs.end())
1968 // set RegNum if the match is the right kind of register
1969 if (isVector == Entry->getValue().first)
1970 RegNum = Entry->getValue().second;
1975 /// tryParseRegister - Try to parse a register name. The token must be an
1976 /// Identifier when called, and if it is a register name the token is eaten and
1977 /// the register is added to the operand list.
1978 int AArch64AsmParser::tryParseRegister() {
1979 MCAsmParser &Parser = getParser();
1980 const AsmToken &Tok = Parser.getTok();
1981 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1983 std::string lowerCase = Tok.getString().lower();
1984 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1985 // Also handle a few aliases of registers.
1987 RegNum = StringSwitch<unsigned>(lowerCase)
1988 .Case("fp", AArch64::FP)
1989 .Case("lr", AArch64::LR)
1990 .Case("x31", AArch64::XZR)
1991 .Case("w31", AArch64::WZR)
1997 Parser.Lex(); // Eat identifier token.
2001 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2002 /// kind specifier. If it is a register specifier, eat the token and return it.
2003 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2004 MCAsmParser &Parser = getParser();
2005 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2006 TokError("vector register expected");
2010 StringRef Name = Parser.getTok().getString();
2011 // If there is a kind specifier, it's separated from the register name by
2013 size_t Start = 0, Next = Name.find('.');
2014 StringRef Head = Name.slice(Start, Next);
2015 unsigned RegNum = matchRegisterNameAlias(Head, true);
2018 if (Next != StringRef::npos) {
2019 Kind = Name.slice(Next, StringRef::npos);
2020 if (!isValidVectorKind(Kind)) {
2021 TokError("invalid vector kind qualifier");
2025 Parser.Lex(); // Eat the register token.
2030 TokError("vector register expected");
2034 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2035 AArch64AsmParser::OperandMatchResultTy
2036 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2037 MCAsmParser &Parser = getParser();
2040 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2041 Error(S, "Expected cN operand where 0 <= N <= 15");
2042 return MatchOperand_ParseFail;
2045 StringRef Tok = Parser.getTok().getIdentifier();
2046 if (Tok[0] != 'c' && Tok[0] != 'C') {
2047 Error(S, "Expected cN operand where 0 <= N <= 15");
2048 return MatchOperand_ParseFail;
2052 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2053 if (BadNum || CRNum > 15) {
2054 Error(S, "Expected cN operand where 0 <= N <= 15");
2055 return MatchOperand_ParseFail;
2058 Parser.Lex(); // Eat identifier token.
2060 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2061 return MatchOperand_Success;
2064 /// tryParsePrefetch - Try to parse a prefetch operand.
2065 AArch64AsmParser::OperandMatchResultTy
2066 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2067 MCAsmParser &Parser = getParser();
2069 const AsmToken &Tok = Parser.getTok();
2070 // Either an identifier for named values or a 5-bit immediate.
2071 bool Hash = Tok.is(AsmToken::Hash);
2072 if (Hash || Tok.is(AsmToken::Integer)) {
2074 Parser.Lex(); // Eat hash token.
2075 const MCExpr *ImmVal;
2076 if (getParser().parseExpression(ImmVal))
2077 return MatchOperand_ParseFail;
2079 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2081 TokError("immediate value expected for prefetch operand");
2082 return MatchOperand_ParseFail;
2084 unsigned prfop = MCE->getValue();
2086 TokError("prefetch operand out of range, [0,31] expected");
2087 return MatchOperand_ParseFail;
2091 auto Mapper = AArch64PRFM::PRFMMapper();
2093 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2094 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2096 return MatchOperand_Success;
2099 if (Tok.isNot(AsmToken::Identifier)) {
2100 TokError("pre-fetch hint expected");
2101 return MatchOperand_ParseFail;
2105 auto Mapper = AArch64PRFM::PRFMMapper();
2107 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2109 TokError("pre-fetch hint expected");
2110 return MatchOperand_ParseFail;
2113 Parser.Lex(); // Eat identifier token.
2114 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2116 return MatchOperand_Success;
2119 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2120 AArch64AsmParser::OperandMatchResultTy
2121 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2122 MCAsmParser &Parser = getParser();
2124 const AsmToken &Tok = Parser.getTok();
2125 if (Tok.isNot(AsmToken::Identifier)) {
2126 TokError("invalid operand for instruction");
2127 return MatchOperand_ParseFail;
2131 auto Mapper = AArch64PSBHint::PSBHintMapper();
2133 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2135 TokError("invalid operand for instruction");
2136 return MatchOperand_ParseFail;
2139 Parser.Lex(); // Eat identifier token.
2140 Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(),
2142 return MatchOperand_Success;
2145 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2147 AArch64AsmParser::OperandMatchResultTy
2148 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2149 MCAsmParser &Parser = getParser();
2153 if (Parser.getTok().is(AsmToken::Hash)) {
2154 Parser.Lex(); // Eat hash token.
2157 if (parseSymbolicImmVal(Expr))
2158 return MatchOperand_ParseFail;
2160 AArch64MCExpr::VariantKind ELFRefKind;
2161 MCSymbolRefExpr::VariantKind DarwinRefKind;
2163 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2164 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2165 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2166 // No modifier was specified at all; this is the syntax for an ELF basic
2167 // ADRP relocation (unfortunately).
2169 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2170 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2171 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2173 Error(S, "gotpage label reference not allowed an addend");
2174 return MatchOperand_ParseFail;
2175 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2176 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2177 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2178 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2179 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2180 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2181 // The operand must be an @page or @gotpage qualified symbolref.
2182 Error(S, "page or gotpage label reference expected");
2183 return MatchOperand_ParseFail;
2187 // We have either a label reference possibly with addend or an immediate. The
2188 // addend is a raw value here. The linker will adjust it to only reference the
2190 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2191 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2193 return MatchOperand_Success;
2196 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2198 AArch64AsmParser::OperandMatchResultTy
2199 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2200 MCAsmParser &Parser = getParser();
2204 if (Parser.getTok().is(AsmToken::Hash)) {
2205 Parser.Lex(); // Eat hash token.
2208 if (getParser().parseExpression(Expr))
2209 return MatchOperand_ParseFail;
2211 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2212 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2214 return MatchOperand_Success;
2217 /// tryParseFPImm - A floating point immediate expression operand.
2218 AArch64AsmParser::OperandMatchResultTy
2219 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2220 MCAsmParser &Parser = getParser();
2224 if (Parser.getTok().is(AsmToken::Hash)) {
2225 Parser.Lex(); // Eat '#'
2229 // Handle negation, as that still comes through as a separate token.
2230 bool isNegative = false;
2231 if (Parser.getTok().is(AsmToken::Minus)) {
2235 const AsmToken &Tok = Parser.getTok();
2236 if (Tok.is(AsmToken::Real)) {
2237 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2239 RealVal.changeSign();
2241 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2242 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2243 Parser.Lex(); // Eat the token.
2244 // Check for out of range values. As an exception, we let Zero through,
2245 // as we handle that special case in post-processing before matching in
2246 // order to use the zero register for it.
2247 if (Val == -1 && !RealVal.isPosZero()) {
2248 TokError("expected compatible register or floating-point constant");
2249 return MatchOperand_ParseFail;
2251 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2252 return MatchOperand_Success;
2254 if (Tok.is(AsmToken::Integer)) {
2256 if (!isNegative && Tok.getString().startswith("0x")) {
2257 Val = Tok.getIntVal();
2258 if (Val > 255 || Val < 0) {
2259 TokError("encoded floating point value out of range");
2260 return MatchOperand_ParseFail;
2263 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2264 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2265 // If we had a '-' in front, toggle the sign bit.
2266 IntVal ^= (uint64_t)isNegative << 63;
2267 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2269 Parser.Lex(); // Eat the token.
2270 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2271 return MatchOperand_Success;
2275 return MatchOperand_NoMatch;
2277 TokError("invalid floating point immediate");
2278 return MatchOperand_ParseFail;
2281 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2282 AArch64AsmParser::OperandMatchResultTy
2283 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2284 MCAsmParser &Parser = getParser();
2287 if (Parser.getTok().is(AsmToken::Hash))
2288 Parser.Lex(); // Eat '#'
2289 else if (Parser.getTok().isNot(AsmToken::Integer))
2290 // Operand should start from # or should be integer, emit error otherwise.
2291 return MatchOperand_NoMatch;
2294 if (parseSymbolicImmVal(Imm))
2295 return MatchOperand_ParseFail;
2296 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2297 uint64_t ShiftAmount = 0;
2298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2300 int64_t Val = MCE->getValue();
2301 if (Val > 0xfff && (Val & 0xfff) == 0) {
2302 Imm = MCConstantExpr::create(Val >> 12, getContext());
2306 SMLoc E = Parser.getTok().getLoc();
2307 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2309 return MatchOperand_Success;
2315 // The optional operand must be "lsl #N" where N is non-negative.
2316 if (!Parser.getTok().is(AsmToken::Identifier) ||
2317 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2318 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2319 return MatchOperand_ParseFail;
2325 if (Parser.getTok().is(AsmToken::Hash)) {
2329 if (Parser.getTok().isNot(AsmToken::Integer)) {
2330 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2331 return MatchOperand_ParseFail;
2334 int64_t ShiftAmount = Parser.getTok().getIntVal();
2336 if (ShiftAmount < 0) {
2337 Error(Parser.getTok().getLoc(), "positive shift amount required");
2338 return MatchOperand_ParseFail;
2340 Parser.Lex(); // Eat the number
2342 SMLoc E = Parser.getTok().getLoc();
2343 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2344 S, E, getContext()));
2345 return MatchOperand_Success;
2348 /// parseCondCodeString - Parse a Condition Code string.
2349 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2350 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2351 .Case("eq", AArch64CC::EQ)
2352 .Case("ne", AArch64CC::NE)
2353 .Case("cs", AArch64CC::HS)
2354 .Case("hs", AArch64CC::HS)
2355 .Case("cc", AArch64CC::LO)
2356 .Case("lo", AArch64CC::LO)
2357 .Case("mi", AArch64CC::MI)
2358 .Case("pl", AArch64CC::PL)
2359 .Case("vs", AArch64CC::VS)
2360 .Case("vc", AArch64CC::VC)
2361 .Case("hi", AArch64CC::HI)
2362 .Case("ls", AArch64CC::LS)
2363 .Case("ge", AArch64CC::GE)
2364 .Case("lt", AArch64CC::LT)
2365 .Case("gt", AArch64CC::GT)
2366 .Case("le", AArch64CC::LE)
2367 .Case("al", AArch64CC::AL)
2368 .Case("nv", AArch64CC::NV)
2369 .Default(AArch64CC::Invalid);
2373 /// parseCondCode - Parse a Condition Code operand.
2374 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2375 bool invertCondCode) {
2376 MCAsmParser &Parser = getParser();
2378 const AsmToken &Tok = Parser.getTok();
2379 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2381 StringRef Cond = Tok.getString();
2382 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2383 if (CC == AArch64CC::Invalid)
2384 return TokError("invalid condition code");
2385 Parser.Lex(); // Eat identifier token.
2387 if (invertCondCode) {
2388 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2389 return TokError("condition codes AL and NV are invalid for this instruction");
2390 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2394 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2398 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2399 /// them if present.
2400 AArch64AsmParser::OperandMatchResultTy
2401 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2402 MCAsmParser &Parser = getParser();
2403 const AsmToken &Tok = Parser.getTok();
2404 std::string LowerID = Tok.getString().lower();
2405 AArch64_AM::ShiftExtendType ShOp =
2406 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2407 .Case("lsl", AArch64_AM::LSL)
2408 .Case("lsr", AArch64_AM::LSR)
2409 .Case("asr", AArch64_AM::ASR)
2410 .Case("ror", AArch64_AM::ROR)
2411 .Case("msl", AArch64_AM::MSL)
2412 .Case("uxtb", AArch64_AM::UXTB)
2413 .Case("uxth", AArch64_AM::UXTH)
2414 .Case("uxtw", AArch64_AM::UXTW)
2415 .Case("uxtx", AArch64_AM::UXTX)
2416 .Case("sxtb", AArch64_AM::SXTB)
2417 .Case("sxth", AArch64_AM::SXTH)
2418 .Case("sxtw", AArch64_AM::SXTW)
2419 .Case("sxtx", AArch64_AM::SXTX)
2420 .Default(AArch64_AM::InvalidShiftExtend);
2422 if (ShOp == AArch64_AM::InvalidShiftExtend)
2423 return MatchOperand_NoMatch;
2425 SMLoc S = Tok.getLoc();
2428 bool Hash = getLexer().is(AsmToken::Hash);
2429 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2430 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2431 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2432 ShOp == AArch64_AM::MSL) {
2433 // We expect a number here.
2434 TokError("expected #imm after shift specifier");
2435 return MatchOperand_ParseFail;
2438 // "extend" type operatoins don't need an immediate, #0 is implicit.
2439 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2441 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2442 return MatchOperand_Success;
2446 Parser.Lex(); // Eat the '#'.
2448 // Make sure we do actually have a number or a parenthesized expression.
2449 SMLoc E = Parser.getTok().getLoc();
2450 if (!Parser.getTok().is(AsmToken::Integer) &&
2451 !Parser.getTok().is(AsmToken::LParen)) {
2452 Error(E, "expected integer shift amount");
2453 return MatchOperand_ParseFail;
2456 const MCExpr *ImmVal;
2457 if (getParser().parseExpression(ImmVal))
2458 return MatchOperand_ParseFail;
2460 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2462 Error(E, "expected constant '#imm' after shift specifier");
2463 return MatchOperand_ParseFail;
2466 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2467 Operands.push_back(AArch64Operand::CreateShiftExtend(
2468 ShOp, MCE->getValue(), true, S, E, getContext()));
2469 return MatchOperand_Success;
2472 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2473 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2474 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2475 OperandVector &Operands) {
2476 if (Name.find('.') != StringRef::npos)
2477 return TokError("invalid operand");
2481 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2483 MCAsmParser &Parser = getParser();
2484 const AsmToken &Tok = Parser.getTok();
2485 StringRef Op = Tok.getString();
2486 SMLoc S = Tok.getLoc();
2488 const MCExpr *Expr = nullptr;
2490 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2492 Expr = MCConstantExpr::create(op1, getContext()); \
2493 Operands.push_back( \
2494 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2495 Operands.push_back( \
2496 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2497 Operands.push_back( \
2498 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2499 Expr = MCConstantExpr::create(op2, getContext()); \
2500 Operands.push_back( \
2501 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2504 if (Mnemonic == "ic") {
2505 if (!Op.compare_lower("ialluis")) {
2506 // SYS #0, C7, C1, #0
2507 SYS_ALIAS(0, 7, 1, 0);
2508 } else if (!Op.compare_lower("iallu")) {
2509 // SYS #0, C7, C5, #0
2510 SYS_ALIAS(0, 7, 5, 0);
2511 } else if (!Op.compare_lower("ivau")) {
2512 // SYS #3, C7, C5, #1
2513 SYS_ALIAS(3, 7, 5, 1);
2515 return TokError("invalid operand for IC instruction");
2517 } else if (Mnemonic == "dc") {
2518 if (!Op.compare_lower("zva")) {
2519 // SYS #3, C7, C4, #1
2520 SYS_ALIAS(3, 7, 4, 1);
2521 } else if (!Op.compare_lower("ivac")) {
2522 // SYS #3, C7, C6, #1
2523 SYS_ALIAS(0, 7, 6, 1);
2524 } else if (!Op.compare_lower("isw")) {
2525 // SYS #0, C7, C6, #2
2526 SYS_ALIAS(0, 7, 6, 2);
2527 } else if (!Op.compare_lower("cvac")) {
2528 // SYS #3, C7, C10, #1
2529 SYS_ALIAS(3, 7, 10, 1);
2530 } else if (!Op.compare_lower("csw")) {
2531 // SYS #0, C7, C10, #2
2532 SYS_ALIAS(0, 7, 10, 2);
2533 } else if (!Op.compare_lower("cvau")) {
2534 // SYS #3, C7, C11, #1
2535 SYS_ALIAS(3, 7, 11, 1);
2536 } else if (!Op.compare_lower("civac")) {
2537 // SYS #3, C7, C14, #1
2538 SYS_ALIAS(3, 7, 14, 1);
2539 } else if (!Op.compare_lower("cisw")) {
2540 // SYS #0, C7, C14, #2
2541 SYS_ALIAS(0, 7, 14, 2);
2542 } else if (!Op.compare_lower("cvap")) {
2543 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2544 // SYS #3, C7, C12, #1
2545 SYS_ALIAS(3, 7, 12, 1);
2547 return TokError("DC CVAP requires ARMv8.2a");
2550 return TokError("invalid operand for DC instruction");
2552 } else if (Mnemonic == "at") {
2553 if (!Op.compare_lower("s1e1r")) {
2554 // SYS #0, C7, C8, #0
2555 SYS_ALIAS(0, 7, 8, 0);
2556 } else if (!Op.compare_lower("s1e2r")) {
2557 // SYS #4, C7, C8, #0
2558 SYS_ALIAS(4, 7, 8, 0);
2559 } else if (!Op.compare_lower("s1e3r")) {
2560 // SYS #6, C7, C8, #0
2561 SYS_ALIAS(6, 7, 8, 0);
2562 } else if (!Op.compare_lower("s1e1w")) {
2563 // SYS #0, C7, C8, #1
2564 SYS_ALIAS(0, 7, 8, 1);
2565 } else if (!Op.compare_lower("s1e2w")) {
2566 // SYS #4, C7, C8, #1
2567 SYS_ALIAS(4, 7, 8, 1);
2568 } else if (!Op.compare_lower("s1e3w")) {
2569 // SYS #6, C7, C8, #1
2570 SYS_ALIAS(6, 7, 8, 1);
2571 } else if (!Op.compare_lower("s1e0r")) {
2572 // SYS #0, C7, C8, #3
2573 SYS_ALIAS(0, 7, 8, 2);
2574 } else if (!Op.compare_lower("s1e0w")) {
2575 // SYS #0, C7, C8, #3
2576 SYS_ALIAS(0, 7, 8, 3);
2577 } else if (!Op.compare_lower("s12e1r")) {
2578 // SYS #4, C7, C8, #4
2579 SYS_ALIAS(4, 7, 8, 4);
2580 } else if (!Op.compare_lower("s12e1w")) {
2581 // SYS #4, C7, C8, #5
2582 SYS_ALIAS(4, 7, 8, 5);
2583 } else if (!Op.compare_lower("s12e0r")) {
2584 // SYS #4, C7, C8, #6
2585 SYS_ALIAS(4, 7, 8, 6);
2586 } else if (!Op.compare_lower("s12e0w")) {
2587 // SYS #4, C7, C8, #7
2588 SYS_ALIAS(4, 7, 8, 7);
2589 } else if (!Op.compare_lower("s1e1rp")) {
2590 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2591 // SYS #0, C7, C9, #0
2592 SYS_ALIAS(0, 7, 9, 0);
2594 return TokError("AT S1E1RP requires ARMv8.2a");
2596 } else if (!Op.compare_lower("s1e1wp")) {
2597 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2598 // SYS #0, C7, C9, #1
2599 SYS_ALIAS(0, 7, 9, 1);
2601 return TokError("AT S1E1WP requires ARMv8.2a");
2604 return TokError("invalid operand for AT instruction");
2606 } else if (Mnemonic == "tlbi") {
2607 if (!Op.compare_lower("vmalle1is")) {
2608 // SYS #0, C8, C3, #0
2609 SYS_ALIAS(0, 8, 3, 0);
2610 } else if (!Op.compare_lower("alle2is")) {
2611 // SYS #4, C8, C3, #0
2612 SYS_ALIAS(4, 8, 3, 0);
2613 } else if (!Op.compare_lower("alle3is")) {
2614 // SYS #6, C8, C3, #0
2615 SYS_ALIAS(6, 8, 3, 0);
2616 } else if (!Op.compare_lower("vae1is")) {
2617 // SYS #0, C8, C3, #1
2618 SYS_ALIAS(0, 8, 3, 1);
2619 } else if (!Op.compare_lower("vae2is")) {
2620 // SYS #4, C8, C3, #1
2621 SYS_ALIAS(4, 8, 3, 1);
2622 } else if (!Op.compare_lower("vae3is")) {
2623 // SYS #6, C8, C3, #1
2624 SYS_ALIAS(6, 8, 3, 1);
2625 } else if (!Op.compare_lower("aside1is")) {
2626 // SYS #0, C8, C3, #2
2627 SYS_ALIAS(0, 8, 3, 2);
2628 } else if (!Op.compare_lower("vaae1is")) {
2629 // SYS #0, C8, C3, #3
2630 SYS_ALIAS(0, 8, 3, 3);
2631 } else if (!Op.compare_lower("alle1is")) {
2632 // SYS #4, C8, C3, #4
2633 SYS_ALIAS(4, 8, 3, 4);
2634 } else if (!Op.compare_lower("vale1is")) {
2635 // SYS #0, C8, C3, #5
2636 SYS_ALIAS(0, 8, 3, 5);
2637 } else if (!Op.compare_lower("vaale1is")) {
2638 // SYS #0, C8, C3, #7
2639 SYS_ALIAS(0, 8, 3, 7);
2640 } else if (!Op.compare_lower("vmalle1")) {
2641 // SYS #0, C8, C7, #0
2642 SYS_ALIAS(0, 8, 7, 0);
2643 } else if (!Op.compare_lower("alle2")) {
2644 // SYS #4, C8, C7, #0
2645 SYS_ALIAS(4, 8, 7, 0);
2646 } else if (!Op.compare_lower("vale2is")) {
2647 // SYS #4, C8, C3, #5
2648 SYS_ALIAS(4, 8, 3, 5);
2649 } else if (!Op.compare_lower("vale3is")) {
2650 // SYS #6, C8, C3, #5
2651 SYS_ALIAS(6, 8, 3, 5);
2652 } else if (!Op.compare_lower("alle3")) {
2653 // SYS #6, C8, C7, #0
2654 SYS_ALIAS(6, 8, 7, 0);
2655 } else if (!Op.compare_lower("vae1")) {
2656 // SYS #0, C8, C7, #1
2657 SYS_ALIAS(0, 8, 7, 1);
2658 } else if (!Op.compare_lower("vae2")) {
2659 // SYS #4, C8, C7, #1
2660 SYS_ALIAS(4, 8, 7, 1);
2661 } else if (!Op.compare_lower("vae3")) {
2662 // SYS #6, C8, C7, #1
2663 SYS_ALIAS(6, 8, 7, 1);
2664 } else if (!Op.compare_lower("aside1")) {
2665 // SYS #0, C8, C7, #2
2666 SYS_ALIAS(0, 8, 7, 2);
2667 } else if (!Op.compare_lower("vaae1")) {
2668 // SYS #0, C8, C7, #3
2669 SYS_ALIAS(0, 8, 7, 3);
2670 } else if (!Op.compare_lower("alle1")) {
2671 // SYS #4, C8, C7, #4
2672 SYS_ALIAS(4, 8, 7, 4);
2673 } else if (!Op.compare_lower("vale1")) {
2674 // SYS #0, C8, C7, #5
2675 SYS_ALIAS(0, 8, 7, 5);
2676 } else if (!Op.compare_lower("vale2")) {
2677 // SYS #4, C8, C7, #5
2678 SYS_ALIAS(4, 8, 7, 5);
2679 } else if (!Op.compare_lower("vale3")) {
2680 // SYS #6, C8, C7, #5
2681 SYS_ALIAS(6, 8, 7, 5);
2682 } else if (!Op.compare_lower("vaale1")) {
2683 // SYS #0, C8, C7, #7
2684 SYS_ALIAS(0, 8, 7, 7);
2685 } else if (!Op.compare_lower("ipas2e1")) {
2686 // SYS #4, C8, C4, #1
2687 SYS_ALIAS(4, 8, 4, 1);
2688 } else if (!Op.compare_lower("ipas2le1")) {
2689 // SYS #4, C8, C4, #5
2690 SYS_ALIAS(4, 8, 4, 5);
2691 } else if (!Op.compare_lower("ipas2e1is")) {
2692 // SYS #4, C8, C4, #1
2693 SYS_ALIAS(4, 8, 0, 1);
2694 } else if (!Op.compare_lower("ipas2le1is")) {
2695 // SYS #4, C8, C4, #5
2696 SYS_ALIAS(4, 8, 0, 5);
2697 } else if (!Op.compare_lower("vmalls12e1")) {
2698 // SYS #4, C8, C7, #6
2699 SYS_ALIAS(4, 8, 7, 6);
2700 } else if (!Op.compare_lower("vmalls12e1is")) {
2701 // SYS #4, C8, C3, #6
2702 SYS_ALIAS(4, 8, 3, 6);
2704 return TokError("invalid operand for TLBI instruction");
2710 Parser.Lex(); // Eat operand.
2712 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2713 bool HasRegister = false;
2715 // Check for the optional register operand.
2716 if (getLexer().is(AsmToken::Comma)) {
2717 Parser.Lex(); // Eat comma.
2719 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2720 return TokError("expected register operand");
2725 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2726 Parser.eatToEndOfStatement();
2727 return TokError("unexpected token in argument list");
2730 if (ExpectRegister && !HasRegister) {
2731 return TokError("specified " + Mnemonic + " op requires a register");
2733 else if (!ExpectRegister && HasRegister) {
2734 return TokError("specified " + Mnemonic + " op does not use a register");
2737 Parser.Lex(); // Consume the EndOfStatement
2741 AArch64AsmParser::OperandMatchResultTy
2742 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2743 MCAsmParser &Parser = getParser();
2744 const AsmToken &Tok = Parser.getTok();
2746 // Can be either a #imm style literal or an option name
2747 bool Hash = Tok.is(AsmToken::Hash);
2748 if (Hash || Tok.is(AsmToken::Integer)) {
2749 // Immediate operand.
2751 Parser.Lex(); // Eat the '#'
2752 const MCExpr *ImmVal;
2753 SMLoc ExprLoc = getLoc();
2754 if (getParser().parseExpression(ImmVal))
2755 return MatchOperand_ParseFail;
2756 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2758 Error(ExprLoc, "immediate value expected for barrier operand");
2759 return MatchOperand_ParseFail;
2761 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2762 Error(ExprLoc, "barrier operand out of range");
2763 return MatchOperand_ParseFail;
2766 auto Mapper = AArch64DB::DBarrierMapper();
2768 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2769 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2770 ExprLoc, getContext()));
2771 return MatchOperand_Success;
2774 if (Tok.isNot(AsmToken::Identifier)) {
2775 TokError("invalid operand for instruction");
2776 return MatchOperand_ParseFail;
2780 auto Mapper = AArch64DB::DBarrierMapper();
2782 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2784 TokError("invalid barrier option name");
2785 return MatchOperand_ParseFail;
2788 // The only valid named option for ISB is 'sy'
2789 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2790 TokError("'sy' or #imm operand expected");
2791 return MatchOperand_ParseFail;
2794 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2795 getLoc(), getContext()));
2796 Parser.Lex(); // Consume the option
2798 return MatchOperand_Success;
2801 AArch64AsmParser::OperandMatchResultTy
2802 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2803 MCAsmParser &Parser = getParser();
2804 const AsmToken &Tok = Parser.getTok();
2806 if (Tok.isNot(AsmToken::Identifier))
2807 return MatchOperand_NoMatch;
2810 auto MRSMapper = AArch64SysReg::MRSMapper();
2811 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2812 getSTI().getFeatureBits(), IsKnown);
2813 assert(IsKnown == (MRSReg != -1U) &&
2814 "register should be -1 if and only if it's unknown");
2816 auto MSRMapper = AArch64SysReg::MSRMapper();
2817 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2818 getSTI().getFeatureBits(), IsKnown);
2819 assert(IsKnown == (MSRReg != -1U) &&
2820 "register should be -1 if and only if it's unknown");
2822 auto PStateMapper = AArch64PState::PStateMapper();
2823 uint32_t PStateField =
2824 PStateMapper.fromString(Tok.getString(),
2825 getSTI().getFeatureBits(), IsKnown);
2826 assert(IsKnown == (PStateField != -1U) &&
2827 "register should be -1 if and only if it's unknown");
2829 Operands.push_back(AArch64Operand::CreateSysReg(
2830 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2831 Parser.Lex(); // Eat identifier
2833 return MatchOperand_Success;
2836 /// tryParseVectorRegister - Parse a vector register operand.
2837 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2838 MCAsmParser &Parser = getParser();
2839 if (Parser.getTok().isNot(AsmToken::Identifier))
2843 // Check for a vector register specifier first.
2845 int64_t Reg = tryMatchVectorRegister(Kind, false);
2849 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2850 // If there was an explicit qualifier, that goes on as a literal text
2854 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2856 // If there is an index specifier following the register, parse that too.
2857 if (Parser.getTok().is(AsmToken::LBrac)) {
2858 SMLoc SIdx = getLoc();
2859 Parser.Lex(); // Eat left bracket token.
2861 const MCExpr *ImmVal;
2862 if (getParser().parseExpression(ImmVal))
2864 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2866 TokError("immediate value expected for vector index");
2871 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2872 Error(E, "']' expected");
2876 Parser.Lex(); // Eat right bracket token.
2878 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2885 /// parseRegister - Parse a non-vector register operand.
2886 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2887 MCAsmParser &Parser = getParser();
2889 // Try for a vector register.
2890 if (!tryParseVectorRegister(Operands))
2893 // Try for a scalar register.
2894 int64_t Reg = tryParseRegister();
2898 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2900 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2901 // as a string token in the instruction itself.
2902 if (getLexer().getKind() == AsmToken::LBrac) {
2903 SMLoc LBracS = getLoc();
2905 const AsmToken &Tok = Parser.getTok();
2906 if (Tok.is(AsmToken::Integer)) {
2907 SMLoc IntS = getLoc();
2908 int64_t Val = Tok.getIntVal();
2911 if (getLexer().getKind() == AsmToken::RBrac) {
2912 SMLoc RBracS = getLoc();
2915 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2917 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2919 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2929 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2930 MCAsmParser &Parser = getParser();
2931 bool HasELFModifier = false;
2932 AArch64MCExpr::VariantKind RefKind;
2934 if (Parser.getTok().is(AsmToken::Colon)) {
2935 Parser.Lex(); // Eat ':"
2936 HasELFModifier = true;
2938 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2939 Error(Parser.getTok().getLoc(),
2940 "expect relocation specifier in operand after ':'");
2944 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2945 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2946 .Case("lo12", AArch64MCExpr::VK_LO12)
2947 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2948 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2949 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2950 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2951 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2952 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2953 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2954 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2955 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2956 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2957 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2958 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2959 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2960 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2961 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2962 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2963 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2964 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2965 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2966 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2967 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2968 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2969 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2970 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2971 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2972 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2973 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2974 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2975 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2976 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2977 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2978 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2979 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2980 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2981 .Default(AArch64MCExpr::VK_INVALID);
2983 if (RefKind == AArch64MCExpr::VK_INVALID) {
2984 Error(Parser.getTok().getLoc(),
2985 "expect relocation specifier in operand after ':'");
2989 Parser.Lex(); // Eat identifier
2991 if (Parser.getTok().isNot(AsmToken::Colon)) {
2992 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2995 Parser.Lex(); // Eat ':'
2998 if (getParser().parseExpression(ImmVal))
3002 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3007 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3008 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
3009 MCAsmParser &Parser = getParser();
3010 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3012 Parser.Lex(); // Eat left bracket token.
3014 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3017 int64_t PrevReg = FirstReg;
3020 if (Parser.getTok().is(AsmToken::Minus)) {
3021 Parser.Lex(); // Eat the minus.
3023 SMLoc Loc = getLoc();
3025 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3028 // Any Kind suffices must match on all regs in the list.
3029 if (Kind != NextKind)
3030 return Error(Loc, "mismatched register size suffix");
3032 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3034 if (Space == 0 || Space > 3) {
3035 return Error(Loc, "invalid number of vectors");
3041 while (Parser.getTok().is(AsmToken::Comma)) {
3042 Parser.Lex(); // Eat the comma token.
3044 SMLoc Loc = getLoc();
3046 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3049 // Any Kind suffices must match on all regs in the list.
3050 if (Kind != NextKind)
3051 return Error(Loc, "mismatched register size suffix");
3053 // Registers must be incremental (with wraparound at 31)
3054 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3055 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3056 return Error(Loc, "registers must be sequential");
3063 if (Parser.getTok().isNot(AsmToken::RCurly))
3064 return Error(getLoc(), "'}' expected");
3065 Parser.Lex(); // Eat the '}' token.
3068 return Error(S, "invalid number of vectors");
3070 unsigned NumElements = 0;
3071 char ElementKind = 0;
3073 parseValidVectorKind(Kind, NumElements, ElementKind);
3075 Operands.push_back(AArch64Operand::CreateVectorList(
3076 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3078 // If there is an index specifier following the list, parse that too.
3079 if (Parser.getTok().is(AsmToken::LBrac)) {
3080 SMLoc SIdx = getLoc();
3081 Parser.Lex(); // Eat left bracket token.
3083 const MCExpr *ImmVal;
3084 if (getParser().parseExpression(ImmVal))
3086 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3088 TokError("immediate value expected for vector index");
3093 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3094 Error(E, "']' expected");
3098 Parser.Lex(); // Eat right bracket token.
3100 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3106 AArch64AsmParser::OperandMatchResultTy
3107 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3108 MCAsmParser &Parser = getParser();
3109 const AsmToken &Tok = Parser.getTok();
3110 if (!Tok.is(AsmToken::Identifier))
3111 return MatchOperand_NoMatch;
3113 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3115 MCContext &Ctx = getContext();
3116 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3117 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3118 return MatchOperand_NoMatch;
3121 Parser.Lex(); // Eat register
3123 if (Parser.getTok().isNot(AsmToken::Comma)) {
3125 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3126 return MatchOperand_Success;
3128 Parser.Lex(); // Eat comma.
3130 if (Parser.getTok().is(AsmToken::Hash))
3131 Parser.Lex(); // Eat hash
3133 if (Parser.getTok().isNot(AsmToken::Integer)) {
3134 Error(getLoc(), "index must be absent or #0");
3135 return MatchOperand_ParseFail;
3138 const MCExpr *ImmVal;
3139 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3140 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3141 Error(getLoc(), "index must be absent or #0");
3142 return MatchOperand_ParseFail;
3146 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3147 return MatchOperand_Success;
3150 /// parseOperand - Parse a arm instruction operand. For now this parses the
3151 /// operand regardless of the mnemonic.
3152 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3153 bool invertCondCode) {
3154 MCAsmParser &Parser = getParser();
3155 // Check if the current operand has a custom associated parser, if so, try to
3156 // custom parse the operand, or fallback to the general approach.
3157 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3158 if (ResTy == MatchOperand_Success)
3160 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3161 // there was a match, but an error occurred, in which case, just return that
3162 // the operand parsing failed.
3163 if (ResTy == MatchOperand_ParseFail)
3166 // Nothing custom, so do general case parsing.
3168 switch (getLexer().getKind()) {
3172 if (parseSymbolicImmVal(Expr))
3173 return Error(S, "invalid operand");
3175 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3176 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3179 case AsmToken::LBrac: {
3180 SMLoc Loc = Parser.getTok().getLoc();
3181 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3183 Parser.Lex(); // Eat '['
3185 // There's no comma after a '[', so we can parse the next operand
3187 return parseOperand(Operands, false, false);
3189 case AsmToken::LCurly:
3190 return parseVectorList(Operands);
3191 case AsmToken::Identifier: {
3192 // If we're expecting a Condition Code operand, then just parse that.
3194 return parseCondCode(Operands, invertCondCode);
3196 // If it's a register name, parse it.
3197 if (!parseRegister(Operands))
3200 // This could be an optional "shift" or "extend" operand.
3201 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3202 // We can only continue if no tokens were eaten.
3203 if (GotShift != MatchOperand_NoMatch)
3206 // This was not a register so parse other operands that start with an
3207 // identifier (like labels) as expressions and create them as immediates.
3208 const MCExpr *IdVal;
3210 if (getParser().parseExpression(IdVal))
3213 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3214 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3217 case AsmToken::Integer:
3218 case AsmToken::Real:
3219 case AsmToken::Hash: {
3220 // #42 -> immediate.
3222 if (getLexer().is(AsmToken::Hash))
3225 // Parse a negative sign
3226 bool isNegative = false;
3227 if (Parser.getTok().is(AsmToken::Minus)) {
3229 // We need to consume this token only when we have a Real, otherwise
3230 // we let parseSymbolicImmVal take care of it
3231 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3235 // The only Real that should come through here is a literal #0.0 for
3236 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3237 // so convert the value.
3238 const AsmToken &Tok = Parser.getTok();
3239 if (Tok.is(AsmToken::Real)) {
3240 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3241 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3242 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3243 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3244 Mnemonic != "fcmlt")
3245 return TokError("unexpected floating point literal");
3246 else if (IntVal != 0 || isNegative)
3247 return TokError("expected floating-point constant #0.0");
3248 Parser.Lex(); // Eat the token.
3251 AArch64Operand::CreateToken("#0", false, S, getContext()));
3253 AArch64Operand::CreateToken(".0", false, S, getContext()));
3257 const MCExpr *ImmVal;
3258 if (parseSymbolicImmVal(ImmVal))
3261 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3262 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3265 case AsmToken::Equal: {
3266 SMLoc Loc = Parser.getTok().getLoc();
3267 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3268 return Error(Loc, "unexpected token in operand");
3269 Parser.Lex(); // Eat '='
3270 const MCExpr *SubExprVal;
3271 if (getParser().parseExpression(SubExprVal))
3274 if (Operands.size() < 2 ||
3275 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3276 return Error(Loc, "Only valid when first operand is register");
3279 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3280 Operands[1]->getReg());
3282 MCContext& Ctx = getContext();
3283 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3284 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3285 if (isa<MCConstantExpr>(SubExprVal)) {
3286 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3287 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3288 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3292 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3293 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3294 Operands.push_back(AArch64Operand::CreateImm(
3295 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3297 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3298 ShiftAmt, true, S, E, Ctx));
3301 APInt Simm = APInt(64, Imm << ShiftAmt);
3302 // check if the immediate is an unsigned or signed 32-bit int for W regs
3303 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3304 return Error(Loc, "Immediate too large for register");
3306 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3307 const MCExpr *CPLoc =
3308 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3309 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3315 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3317 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3318 StringRef Name, SMLoc NameLoc,
3319 OperandVector &Operands) {
3320 MCAsmParser &Parser = getParser();
3321 Name = StringSwitch<StringRef>(Name.lower())
3322 .Case("beq", "b.eq")
3323 .Case("bne", "b.ne")
3324 .Case("bhs", "b.hs")
3325 .Case("bcs", "b.cs")
3326 .Case("blo", "b.lo")
3327 .Case("bcc", "b.cc")
3328 .Case("bmi", "b.mi")
3329 .Case("bpl", "b.pl")
3330 .Case("bvs", "b.vs")
3331 .Case("bvc", "b.vc")
3332 .Case("bhi", "b.hi")
3333 .Case("bls", "b.ls")
3334 .Case("bge", "b.ge")
3335 .Case("blt", "b.lt")
3336 .Case("bgt", "b.gt")
3337 .Case("ble", "b.le")
3338 .Case("bal", "b.al")
3339 .Case("bnv", "b.nv")
3342 // First check for the AArch64-specific .req directive.
3343 if (Parser.getTok().is(AsmToken::Identifier) &&
3344 Parser.getTok().getIdentifier() == ".req") {
3345 parseDirectiveReq(Name, NameLoc);
3346 // We always return 'error' for this, as we're done with this
3347 // statement and don't need to match the 'instruction."
3351 // Create the leading tokens for the mnemonic, split by '.' characters.
3352 size_t Start = 0, Next = Name.find('.');
3353 StringRef Head = Name.slice(Start, Next);
3355 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3356 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3357 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3358 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3359 Parser.eatToEndOfStatement();
3364 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3367 // Handle condition codes for a branch mnemonic
3368 if (Head == "b" && Next != StringRef::npos) {
3370 Next = Name.find('.', Start + 1);
3371 Head = Name.slice(Start + 1, Next);
3373 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3374 (Head.data() - Name.data()));
3375 AArch64CC::CondCode CC = parseCondCodeString(Head);
3376 if (CC == AArch64CC::Invalid)
3377 return Error(SuffixLoc, "invalid condition code");
3379 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3381 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3384 // Add the remaining tokens in the mnemonic.
3385 while (Next != StringRef::npos) {
3387 Next = Name.find('.', Start + 1);
3388 Head = Name.slice(Start, Next);
3389 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3390 (Head.data() - Name.data()) + 1);
3392 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3395 // Conditional compare instructions have a Condition Code operand, which needs
3396 // to be parsed and an immediate operand created.
3397 bool condCodeFourthOperand =
3398 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3399 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3400 Head == "csinc" || Head == "csinv" || Head == "csneg");
3402 // These instructions are aliases to some of the conditional select
3403 // instructions. However, the condition code is inverted in the aliased
3406 // FIXME: Is this the correct way to handle these? Or should the parser
3407 // generate the aliased instructions directly?
3408 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3409 bool condCodeThirdOperand =
3410 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3412 // Read the remaining operands.
3413 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3414 // Read the first operand.
3415 if (parseOperand(Operands, false, false)) {
3416 Parser.eatToEndOfStatement();
3421 while (getLexer().is(AsmToken::Comma)) {
3422 Parser.Lex(); // Eat the comma.
3424 // Parse and remember the operand.
3425 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3426 (N == 3 && condCodeThirdOperand) ||
3427 (N == 2 && condCodeSecondOperand),
3428 condCodeSecondOperand || condCodeThirdOperand)) {
3429 Parser.eatToEndOfStatement();
3433 // After successfully parsing some operands there are two special cases to
3434 // consider (i.e. notional operands not separated by commas). Both are due
3435 // to memory specifiers:
3436 // + An RBrac will end an address for load/store/prefetch
3437 // + An '!' will indicate a pre-indexed operation.
3439 // It's someone else's responsibility to make sure these tokens are sane
3440 // in the given context!
3441 if (Parser.getTok().is(AsmToken::RBrac)) {
3442 SMLoc Loc = Parser.getTok().getLoc();
3443 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3448 if (Parser.getTok().is(AsmToken::Exclaim)) {
3449 SMLoc Loc = Parser.getTok().getLoc();
3450 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3459 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3460 SMLoc Loc = Parser.getTok().getLoc();
3461 Parser.eatToEndOfStatement();
3462 return Error(Loc, "unexpected token in argument list");
3465 Parser.Lex(); // Consume the EndOfStatement
3469 // FIXME: This entire function is a giant hack to provide us with decent
3470 // operand range validation/diagnostics until TableGen/MC can be extended
3471 // to support autogeneration of this kind of validation.
3472 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3473 SmallVectorImpl<SMLoc> &Loc) {
3474 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3475 // Check for indexed addressing modes w/ the base register being the
3476 // same as a destination/source register or pair load where
3477 // the Rt == Rt2. All of those are undefined behaviour.
3478 switch (Inst.getOpcode()) {
3479 case AArch64::LDPSWpre:
3480 case AArch64::LDPWpost:
3481 case AArch64::LDPWpre:
3482 case AArch64::LDPXpost:
3483 case AArch64::LDPXpre: {
3484 unsigned Rt = Inst.getOperand(1).getReg();
3485 unsigned Rt2 = Inst.getOperand(2).getReg();
3486 unsigned Rn = Inst.getOperand(3).getReg();
3487 if (RI->isSubRegisterEq(Rn, Rt))
3488 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3489 "is also a destination");
3490 if (RI->isSubRegisterEq(Rn, Rt2))
3491 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3492 "is also a destination");
3495 case AArch64::LDPDi:
3496 case AArch64::LDPQi:
3497 case AArch64::LDPSi:
3498 case AArch64::LDPSWi:
3499 case AArch64::LDPWi:
3500 case AArch64::LDPXi: {
3501 unsigned Rt = Inst.getOperand(0).getReg();
3502 unsigned Rt2 = Inst.getOperand(1).getReg();
3504 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3507 case AArch64::LDPDpost:
3508 case AArch64::LDPDpre:
3509 case AArch64::LDPQpost:
3510 case AArch64::LDPQpre:
3511 case AArch64::LDPSpost:
3512 case AArch64::LDPSpre:
3513 case AArch64::LDPSWpost: {
3514 unsigned Rt = Inst.getOperand(1).getReg();
3515 unsigned Rt2 = Inst.getOperand(2).getReg();
3517 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3520 case AArch64::STPDpost:
3521 case AArch64::STPDpre:
3522 case AArch64::STPQpost:
3523 case AArch64::STPQpre:
3524 case AArch64::STPSpost:
3525 case AArch64::STPSpre:
3526 case AArch64::STPWpost:
3527 case AArch64::STPWpre:
3528 case AArch64::STPXpost:
3529 case AArch64::STPXpre: {
3530 unsigned Rt = Inst.getOperand(1).getReg();
3531 unsigned Rt2 = Inst.getOperand(2).getReg();
3532 unsigned Rn = Inst.getOperand(3).getReg();
3533 if (RI->isSubRegisterEq(Rn, Rt))
3534 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3535 "is also a source");
3536 if (RI->isSubRegisterEq(Rn, Rt2))
3537 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3538 "is also a source");
3541 case AArch64::LDRBBpre:
3542 case AArch64::LDRBpre:
3543 case AArch64::LDRHHpre:
3544 case AArch64::LDRHpre:
3545 case AArch64::LDRSBWpre:
3546 case AArch64::LDRSBXpre:
3547 case AArch64::LDRSHWpre:
3548 case AArch64::LDRSHXpre:
3549 case AArch64::LDRSWpre:
3550 case AArch64::LDRWpre:
3551 case AArch64::LDRXpre:
3552 case AArch64::LDRBBpost:
3553 case AArch64::LDRBpost:
3554 case AArch64::LDRHHpost:
3555 case AArch64::LDRHpost:
3556 case AArch64::LDRSBWpost:
3557 case AArch64::LDRSBXpost:
3558 case AArch64::LDRSHWpost:
3559 case AArch64::LDRSHXpost:
3560 case AArch64::LDRSWpost:
3561 case AArch64::LDRWpost:
3562 case AArch64::LDRXpost: {
3563 unsigned Rt = Inst.getOperand(1).getReg();
3564 unsigned Rn = Inst.getOperand(2).getReg();
3565 if (RI->isSubRegisterEq(Rn, Rt))
3566 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3567 "is also a source");
3570 case AArch64::STRBBpost:
3571 case AArch64::STRBpost:
3572 case AArch64::STRHHpost:
3573 case AArch64::STRHpost:
3574 case AArch64::STRWpost:
3575 case AArch64::STRXpost:
3576 case AArch64::STRBBpre:
3577 case AArch64::STRBpre:
3578 case AArch64::STRHHpre:
3579 case AArch64::STRHpre:
3580 case AArch64::STRWpre:
3581 case AArch64::STRXpre: {
3582 unsigned Rt = Inst.getOperand(1).getReg();
3583 unsigned Rn = Inst.getOperand(2).getReg();
3584 if (RI->isSubRegisterEq(Rn, Rt))
3585 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3586 "is also a source");
3591 // Now check immediate ranges. Separate from the above as there is overlap
3592 // in the instructions being checked and this keeps the nested conditionals
3594 switch (Inst.getOpcode()) {
3595 case AArch64::ADDSWri:
3596 case AArch64::ADDSXri:
3597 case AArch64::ADDWri:
3598 case AArch64::ADDXri:
3599 case AArch64::SUBSWri:
3600 case AArch64::SUBSXri:
3601 case AArch64::SUBWri:
3602 case AArch64::SUBXri: {
3603 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3604 // some slight duplication here.
3605 if (Inst.getOperand(2).isExpr()) {
3606 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3607 AArch64MCExpr::VariantKind ELFRefKind;
3608 MCSymbolRefExpr::VariantKind DarwinRefKind;
3610 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3611 return Error(Loc[2], "invalid immediate expression");
3614 // Only allow these with ADDXri.
3615 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3616 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3617 Inst.getOpcode() == AArch64::ADDXri)
3620 // Only allow these with ADDXri/ADDWri
3621 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3622 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3623 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3624 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3625 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3626 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3627 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3628 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3629 (Inst.getOpcode() == AArch64::ADDXri ||
3630 Inst.getOpcode() == AArch64::ADDWri))
3633 // Don't allow expressions in the immediate field otherwise
3634 return Error(Loc[2], "invalid immediate expression");
3643 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3645 case Match_MissingFeature:
3647 "instruction requires a CPU feature not currently enabled");
3648 case Match_InvalidOperand:
3649 return Error(Loc, "invalid operand for instruction");
3650 case Match_InvalidSuffix:
3651 return Error(Loc, "invalid type suffix for instruction");
3652 case Match_InvalidCondCode:
3653 return Error(Loc, "expected AArch64 condition code");
3654 case Match_AddSubRegExtendSmall:
3656 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3657 case Match_AddSubRegExtendLarge:
3659 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3660 case Match_AddSubSecondSource:
3662 "expected compatible register, symbol or integer in range [0, 4095]");
3663 case Match_LogicalSecondSource:
3664 return Error(Loc, "expected compatible register or logical immediate");
3665 case Match_InvalidMovImm32Shift:
3666 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3667 case Match_InvalidMovImm64Shift:
3668 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3669 case Match_AddSubRegShift32:
3671 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3672 case Match_AddSubRegShift64:
3674 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3675 case Match_InvalidFPImm:
3677 "expected compatible register or floating-point constant");
3678 case Match_InvalidMemoryIndexedSImm9:
3679 return Error(Loc, "index must be an integer in range [-256, 255].");
3680 case Match_InvalidMemoryIndexed4SImm7:
3681 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3682 case Match_InvalidMemoryIndexed8SImm7:
3683 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3684 case Match_InvalidMemoryIndexed16SImm7:
3685 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3686 case Match_InvalidMemoryWExtend8:
3688 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3689 case Match_InvalidMemoryWExtend16:
3691 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3692 case Match_InvalidMemoryWExtend32:
3694 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3695 case Match_InvalidMemoryWExtend64:
3697 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3698 case Match_InvalidMemoryWExtend128:
3700 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3701 case Match_InvalidMemoryXExtend8:
3703 "expected 'lsl' or 'sxtx' with optional shift of #0");
3704 case Match_InvalidMemoryXExtend16:
3706 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3707 case Match_InvalidMemoryXExtend32:
3709 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3710 case Match_InvalidMemoryXExtend64:
3712 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3713 case Match_InvalidMemoryXExtend128:
3715 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3716 case Match_InvalidMemoryIndexed1:
3717 return Error(Loc, "index must be an integer in range [0, 4095].");
3718 case Match_InvalidMemoryIndexed2:
3719 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3720 case Match_InvalidMemoryIndexed4:
3721 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3722 case Match_InvalidMemoryIndexed8:
3723 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3724 case Match_InvalidMemoryIndexed16:
3725 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3726 case Match_InvalidImm0_1:
3727 return Error(Loc, "immediate must be an integer in range [0, 1].");
3728 case Match_InvalidImm0_7:
3729 return Error(Loc, "immediate must be an integer in range [0, 7].");
3730 case Match_InvalidImm0_15:
3731 return Error(Loc, "immediate must be an integer in range [0, 15].");
3732 case Match_InvalidImm0_31:
3733 return Error(Loc, "immediate must be an integer in range [0, 31].");
3734 case Match_InvalidImm0_63:
3735 return Error(Loc, "immediate must be an integer in range [0, 63].");
3736 case Match_InvalidImm0_127:
3737 return Error(Loc, "immediate must be an integer in range [0, 127].");
3738 case Match_InvalidImm0_65535:
3739 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3740 case Match_InvalidImm1_8:
3741 return Error(Loc, "immediate must be an integer in range [1, 8].");
3742 case Match_InvalidImm1_16:
3743 return Error(Loc, "immediate must be an integer in range [1, 16].");
3744 case Match_InvalidImm1_32:
3745 return Error(Loc, "immediate must be an integer in range [1, 32].");
3746 case Match_InvalidImm1_64:
3747 return Error(Loc, "immediate must be an integer in range [1, 64].");
3748 case Match_InvalidIndex1:
3749 return Error(Loc, "expected lane specifier '[1]'");
3750 case Match_InvalidIndexB:
3751 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3752 case Match_InvalidIndexH:
3753 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3754 case Match_InvalidIndexS:
3755 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3756 case Match_InvalidIndexD:
3757 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3758 case Match_InvalidLabel:
3759 return Error(Loc, "expected label or encodable integer pc offset");
3761 return Error(Loc, "expected readable system register");
3763 return Error(Loc, "expected writable system register or pstate");
3764 case Match_MnemonicFail:
3765 return Error(Loc, "unrecognized instruction mnemonic");
3767 llvm_unreachable("unexpected error code!");
3771 static const char *getSubtargetFeatureName(uint64_t Val);
3773 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3774 OperandVector &Operands,
3776 uint64_t &ErrorInfo,
3777 bool MatchingInlineAsm) {
3778 assert(!Operands.empty() && "Unexpect empty operand list!");
3779 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3780 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3782 StringRef Tok = Op.getToken();
3783 unsigned NumOperands = Operands.size();
3785 if (NumOperands == 4 && Tok == "lsl") {
3786 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3787 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3788 if (Op2.isReg() && Op3.isImm()) {
3789 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3791 uint64_t Op3Val = Op3CE->getValue();
3792 uint64_t NewOp3Val = 0;
3793 uint64_t NewOp4Val = 0;
3794 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3796 NewOp3Val = (32 - Op3Val) & 0x1f;
3797 NewOp4Val = 31 - Op3Val;
3799 NewOp3Val = (64 - Op3Val) & 0x3f;
3800 NewOp4Val = 63 - Op3Val;
3803 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3804 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3806 Operands[0] = AArch64Operand::CreateToken(
3807 "ubfm", false, Op.getStartLoc(), getContext());
3808 Operands.push_back(AArch64Operand::CreateImm(
3809 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3810 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3811 Op3.getEndLoc(), getContext());
3814 } else if (NumOperands == 4 && Tok == "bfc") {
3815 // FIXME: Horrible hack to handle BFC->BFM alias.
3816 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3817 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3818 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3820 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3821 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3822 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3824 if (LSBCE && WidthCE) {
3825 uint64_t LSB = LSBCE->getValue();
3826 uint64_t Width = WidthCE->getValue();
3828 uint64_t RegWidth = 0;
3829 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3835 if (LSB >= RegWidth)
3836 return Error(LSBOp.getStartLoc(),
3837 "expected integer in range [0, 31]");
3838 if (Width < 1 || Width > RegWidth)
3839 return Error(WidthOp.getStartLoc(),
3840 "expected integer in range [1, 32]");
3844 ImmR = (32 - LSB) & 0x1f;
3846 ImmR = (64 - LSB) & 0x3f;
3848 uint64_t ImmS = Width - 1;
3850 if (ImmR != 0 && ImmS >= ImmR)
3851 return Error(WidthOp.getStartLoc(),
3852 "requested insert overflows register");
3854 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3855 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3856 Operands[0] = AArch64Operand::CreateToken(
3857 "bfm", false, Op.getStartLoc(), getContext());
3858 Operands[2] = AArch64Operand::CreateReg(
3859 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3860 SMLoc(), getContext());
3861 Operands[3] = AArch64Operand::CreateImm(
3862 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3863 Operands.emplace_back(
3864 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3865 WidthOp.getEndLoc(), getContext()));
3868 } else if (NumOperands == 5) {
3869 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3870 // UBFIZ -> UBFM aliases.
3871 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3872 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3873 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3874 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3876 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3877 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3878 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3880 if (Op3CE && Op4CE) {
3881 uint64_t Op3Val = Op3CE->getValue();
3882 uint64_t Op4Val = Op4CE->getValue();
3884 uint64_t RegWidth = 0;
3885 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3891 if (Op3Val >= RegWidth)
3892 return Error(Op3.getStartLoc(),
3893 "expected integer in range [0, 31]");
3894 if (Op4Val < 1 || Op4Val > RegWidth)
3895 return Error(Op4.getStartLoc(),
3896 "expected integer in range [1, 32]");
3898 uint64_t NewOp3Val = 0;
3900 NewOp3Val = (32 - Op3Val) & 0x1f;
3902 NewOp3Val = (64 - Op3Val) & 0x3f;
3904 uint64_t NewOp4Val = Op4Val - 1;
3906 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3907 return Error(Op4.getStartLoc(),
3908 "requested insert overflows register");
3910 const MCExpr *NewOp3 =
3911 MCConstantExpr::create(NewOp3Val, getContext());
3912 const MCExpr *NewOp4 =
3913 MCConstantExpr::create(NewOp4Val, getContext());
3914 Operands[3] = AArch64Operand::CreateImm(
3915 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3916 Operands[4] = AArch64Operand::CreateImm(
3917 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3919 Operands[0] = AArch64Operand::CreateToken(
3920 "bfm", false, Op.getStartLoc(), getContext());
3921 else if (Tok == "sbfiz")
3922 Operands[0] = AArch64Operand::CreateToken(
3923 "sbfm", false, Op.getStartLoc(), getContext());
3924 else if (Tok == "ubfiz")
3925 Operands[0] = AArch64Operand::CreateToken(
3926 "ubfm", false, Op.getStartLoc(), getContext());
3928 llvm_unreachable("No valid mnemonic for alias?");
3932 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3933 // UBFX -> UBFM aliases.
3934 } else if (NumOperands == 5 &&
3935 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3936 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3937 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3938 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3940 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3941 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3942 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3944 if (Op3CE && Op4CE) {
3945 uint64_t Op3Val = Op3CE->getValue();
3946 uint64_t Op4Val = Op4CE->getValue();
3948 uint64_t RegWidth = 0;
3949 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3955 if (Op3Val >= RegWidth)
3956 return Error(Op3.getStartLoc(),
3957 "expected integer in range [0, 31]");
3958 if (Op4Val < 1 || Op4Val > RegWidth)
3959 return Error(Op4.getStartLoc(),
3960 "expected integer in range [1, 32]");
3962 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3964 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3965 return Error(Op4.getStartLoc(),
3966 "requested extract overflows register");
3968 const MCExpr *NewOp4 =
3969 MCConstantExpr::create(NewOp4Val, getContext());
3970 Operands[4] = AArch64Operand::CreateImm(
3971 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3973 Operands[0] = AArch64Operand::CreateToken(
3974 "bfm", false, Op.getStartLoc(), getContext());
3975 else if (Tok == "sbfx")
3976 Operands[0] = AArch64Operand::CreateToken(
3977 "sbfm", false, Op.getStartLoc(), getContext());
3978 else if (Tok == "ubfx")
3979 Operands[0] = AArch64Operand::CreateToken(
3980 "ubfm", false, Op.getStartLoc(), getContext());
3982 llvm_unreachable("No valid mnemonic for alias?");
3987 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3988 // InstAlias can't quite handle this since the reg classes aren't
3990 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3991 // The source register can be Wn here, but the matcher expects a
3992 // GPR64. Twiddle it here if necessary.
3993 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3995 unsigned Reg = getXRegFromWReg(Op.getReg());
3996 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3997 Op.getEndLoc(), getContext());
4000 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4001 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4002 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4004 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4006 // The source register can be Wn here, but the matcher expects a
4007 // GPR64. Twiddle it here if necessary.
4008 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4010 unsigned Reg = getXRegFromWReg(Op.getReg());
4011 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4012 Op.getEndLoc(), getContext());
4016 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4017 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4018 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4020 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4022 // The source register can be Wn here, but the matcher expects a
4023 // GPR32. Twiddle it here if necessary.
4024 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4026 unsigned Reg = getWRegFromXReg(Op.getReg());
4027 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4028 Op.getEndLoc(), getContext());
4033 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4034 if (NumOperands == 3 && Tok == "fmov") {
4035 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
4036 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
4037 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
4039 !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4043 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4044 Op.getEndLoc(), getContext());
4049 // First try to match against the secondary set of tables containing the
4050 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4051 unsigned MatchResult =
4052 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4054 // If that fails, try against the alternate table containing long-form NEON:
4055 // "fadd v0.2s, v1.2s, v2.2s"
4056 if (MatchResult != Match_Success) {
4057 // But first, save the short-form match result: we can use it in case the
4058 // long-form match also fails.
4059 auto ShortFormNEONErrorInfo = ErrorInfo;
4060 auto ShortFormNEONMatchResult = MatchResult;
4063 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4065 // Now, both matches failed, and the long-form match failed on the mnemonic
4066 // suffix token operand. The short-form match failure is probably more
4067 // relevant: use it instead.
4068 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4069 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4070 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4071 MatchResult = ShortFormNEONMatchResult;
4072 ErrorInfo = ShortFormNEONErrorInfo;
4077 switch (MatchResult) {
4078 case Match_Success: {
4079 // Perform range checking and other semantic validations
4080 SmallVector<SMLoc, 8> OperandLocs;
4081 NumOperands = Operands.size();
4082 for (unsigned i = 1; i < NumOperands; ++i)
4083 OperandLocs.push_back(Operands[i]->getStartLoc());
4084 if (validateInstruction(Inst, OperandLocs))
4088 Out.EmitInstruction(Inst, getSTI());
4091 case Match_MissingFeature: {
4092 assert(ErrorInfo && "Unknown missing feature!");
4093 // Special case the error message for the very common case where only
4094 // a single subtarget feature is missing (neon, e.g.).
4095 std::string Msg = "instruction requires:";
4097 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4098 if (ErrorInfo & Mask) {
4100 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4104 return Error(IDLoc, Msg);
4106 case Match_MnemonicFail:
4107 return showMatchError(IDLoc, MatchResult);
4108 case Match_InvalidOperand: {
4109 SMLoc ErrorLoc = IDLoc;
4111 if (ErrorInfo != ~0ULL) {
4112 if (ErrorInfo >= Operands.size())
4113 return Error(IDLoc, "too few operands for instruction");
4115 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4116 if (ErrorLoc == SMLoc())
4119 // If the match failed on a suffix token operand, tweak the diagnostic
4121 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4122 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4123 MatchResult = Match_InvalidSuffix;
4125 return showMatchError(ErrorLoc, MatchResult);
4127 case Match_InvalidMemoryIndexed1:
4128 case Match_InvalidMemoryIndexed2:
4129 case Match_InvalidMemoryIndexed4:
4130 case Match_InvalidMemoryIndexed8:
4131 case Match_InvalidMemoryIndexed16:
4132 case Match_InvalidCondCode:
4133 case Match_AddSubRegExtendSmall:
4134 case Match_AddSubRegExtendLarge:
4135 case Match_AddSubSecondSource:
4136 case Match_LogicalSecondSource:
4137 case Match_AddSubRegShift32:
4138 case Match_AddSubRegShift64:
4139 case Match_InvalidMovImm32Shift:
4140 case Match_InvalidMovImm64Shift:
4141 case Match_InvalidFPImm:
4142 case Match_InvalidMemoryWExtend8:
4143 case Match_InvalidMemoryWExtend16:
4144 case Match_InvalidMemoryWExtend32:
4145 case Match_InvalidMemoryWExtend64:
4146 case Match_InvalidMemoryWExtend128:
4147 case Match_InvalidMemoryXExtend8:
4148 case Match_InvalidMemoryXExtend16:
4149 case Match_InvalidMemoryXExtend32:
4150 case Match_InvalidMemoryXExtend64:
4151 case Match_InvalidMemoryXExtend128:
4152 case Match_InvalidMemoryIndexed4SImm7:
4153 case Match_InvalidMemoryIndexed8SImm7:
4154 case Match_InvalidMemoryIndexed16SImm7:
4155 case Match_InvalidMemoryIndexedSImm9:
4156 case Match_InvalidImm0_1:
4157 case Match_InvalidImm0_7:
4158 case Match_InvalidImm0_15:
4159 case Match_InvalidImm0_31:
4160 case Match_InvalidImm0_63:
4161 case Match_InvalidImm0_127:
4162 case Match_InvalidImm0_65535:
4163 case Match_InvalidImm1_8:
4164 case Match_InvalidImm1_16:
4165 case Match_InvalidImm1_32:
4166 case Match_InvalidImm1_64:
4167 case Match_InvalidIndex1:
4168 case Match_InvalidIndexB:
4169 case Match_InvalidIndexH:
4170 case Match_InvalidIndexS:
4171 case Match_InvalidIndexD:
4172 case Match_InvalidLabel:
4175 if (ErrorInfo >= Operands.size())
4176 return Error(IDLoc, "too few operands for instruction");
4177 // Any time we get here, there's nothing fancy to do. Just get the
4178 // operand SMLoc and display the diagnostic.
4179 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4180 if (ErrorLoc == SMLoc())
4182 return showMatchError(ErrorLoc, MatchResult);
4186 llvm_unreachable("Implement any new match types added!");
4189 /// ParseDirective parses the arm specific directives
4190 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4191 const MCObjectFileInfo::Environment Format =
4192 getContext().getObjectFileInfo()->getObjectFileType();
4193 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4194 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4196 StringRef IDVal = DirectiveID.getIdentifier();
4197 SMLoc Loc = DirectiveID.getLoc();
4198 if (IDVal == ".hword")
4199 return parseDirectiveWord(2, Loc);
4200 if (IDVal == ".word")
4201 return parseDirectiveWord(4, Loc);
4202 if (IDVal == ".xword")
4203 return parseDirectiveWord(8, Loc);
4204 if (IDVal == ".tlsdesccall")
4205 return parseDirectiveTLSDescCall(Loc);
4206 if (IDVal == ".ltorg" || IDVal == ".pool")
4207 return parseDirectiveLtorg(Loc);
4208 if (IDVal == ".unreq")
4209 return parseDirectiveUnreq(Loc);
4211 if (!IsMachO && !IsCOFF) {
4212 if (IDVal == ".inst")
4213 return parseDirectiveInst(Loc);
4216 return parseDirectiveLOH(IDVal, Loc);
4219 /// parseDirectiveWord
4220 /// ::= .word [ expression (, expression)* ]
4221 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4222 MCAsmParser &Parser = getParser();
4223 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4225 const MCExpr *Value;
4226 if (getParser().parseExpression(Value))
4229 getParser().getStreamer().EmitValue(Value, Size, L);
4231 if (getLexer().is(AsmToken::EndOfStatement))
4234 // FIXME: Improve diagnostic.
4235 if (getLexer().isNot(AsmToken::Comma))
4236 return Error(L, "unexpected token in directive");
4245 /// parseDirectiveInst
4246 /// ::= .inst opcode [, ...]
4247 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4248 MCAsmParser &Parser = getParser();
4249 if (getLexer().is(AsmToken::EndOfStatement)) {
4250 Parser.eatToEndOfStatement();
4251 Error(Loc, "expected expression following directive");
4258 if (getParser().parseExpression(Expr)) {
4259 Error(Loc, "expected expression");
4263 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4265 Error(Loc, "expected constant expression");
4269 getTargetStreamer().emitInst(Value->getValue());
4271 if (getLexer().is(AsmToken::EndOfStatement))
4274 if (getLexer().isNot(AsmToken::Comma)) {
4275 Error(Loc, "unexpected token in directive");
4279 Parser.Lex(); // Eat comma.
4286 // parseDirectiveTLSDescCall:
4287 // ::= .tlsdesccall symbol
4288 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4290 if (getParser().parseIdentifier(Name))
4291 return Error(L, "expected symbol after directive");
4293 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4294 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4295 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4298 Inst.setOpcode(AArch64::TLSDESCCALL);
4299 Inst.addOperand(MCOperand::createExpr(Expr));
4301 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4305 /// ::= .loh <lohName | lohId> label1, ..., labelN
4306 /// The number of arguments depends on the loh identifier.
4307 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4308 if (IDVal != MCLOHDirectiveName())
4311 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4312 if (getParser().getTok().isNot(AsmToken::Integer))
4313 return TokError("expected an identifier or a number in directive");
4314 // We successfully get a numeric value for the identifier.
4315 // Check if it is valid.
4316 int64_t Id = getParser().getTok().getIntVal();
4317 if (Id <= -1U && !isValidMCLOHType(Id))
4318 return TokError("invalid numeric identifier in directive");
4319 Kind = (MCLOHType)Id;
4321 StringRef Name = getTok().getIdentifier();
4322 // We successfully parse an identifier.
4323 // Check if it is a recognized one.
4324 int Id = MCLOHNameToId(Name);
4327 return TokError("invalid identifier in directive");
4328 Kind = (MCLOHType)Id;
4330 // Consume the identifier.
4332 // Get the number of arguments of this LOH.
4333 int NbArgs = MCLOHIdToNbArgs(Kind);
4335 assert(NbArgs != -1 && "Invalid number of arguments");
4337 SmallVector<MCSymbol *, 3> Args;
4338 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4340 if (getParser().parseIdentifier(Name))
4341 return TokError("expected identifier in directive");
4342 Args.push_back(getContext().getOrCreateSymbol(Name));
4344 if (Idx + 1 == NbArgs)
4346 if (getLexer().isNot(AsmToken::Comma))
4347 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4350 if (getLexer().isNot(AsmToken::EndOfStatement))
4351 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4353 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4357 /// parseDirectiveLtorg
4358 /// ::= .ltorg | .pool
4359 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4360 getTargetStreamer().emitCurrentConstantPool();
4364 /// parseDirectiveReq
4365 /// ::= name .req registername
4366 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4367 MCAsmParser &Parser = getParser();
4368 Parser.Lex(); // Eat the '.req' token.
4369 SMLoc SRegLoc = getLoc();
4370 unsigned RegNum = tryParseRegister();
4371 bool IsVector = false;
4373 if (RegNum == static_cast<unsigned>(-1)) {
4375 RegNum = tryMatchVectorRegister(Kind, false);
4376 if (!Kind.empty()) {
4377 Error(SRegLoc, "vector register without type specifier expected");
4383 if (RegNum == static_cast<unsigned>(-1)) {
4384 Parser.eatToEndOfStatement();
4385 Error(SRegLoc, "register name or alias expected");
4389 // Shouldn't be anything else.
4390 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4391 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4392 Parser.eatToEndOfStatement();
4396 Parser.Lex(); // Consume the EndOfStatement
4398 auto pair = std::make_pair(IsVector, RegNum);
4399 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4400 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4405 /// parseDirectiveUneq
4406 /// ::= .unreq registername
4407 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4408 MCAsmParser &Parser = getParser();
4409 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4410 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4411 Parser.eatToEndOfStatement();
4414 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4415 Parser.Lex(); // Eat the identifier.
4420 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4421 AArch64MCExpr::VariantKind &ELFRefKind,
4422 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4424 ELFRefKind = AArch64MCExpr::VK_INVALID;
4425 DarwinRefKind = MCSymbolRefExpr::VK_None;
4428 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4429 ELFRefKind = AE->getKind();
4430 Expr = AE->getSubExpr();
4433 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4435 // It's a simple symbol reference with no addend.
4436 DarwinRefKind = SE->getKind();
4440 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4444 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4447 DarwinRefKind = SE->getKind();
4449 if (BE->getOpcode() != MCBinaryExpr::Add &&
4450 BE->getOpcode() != MCBinaryExpr::Sub)
4453 // See if the addend is is a constant, otherwise there's more going
4454 // on here than we can deal with.
4455 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4459 Addend = AddendExpr->getValue();
4460 if (BE->getOpcode() == MCBinaryExpr::Sub)
4463 // It's some symbol reference + a constant addend, but really
4464 // shouldn't use both Darwin and ELF syntax.
4465 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4466 DarwinRefKind == MCSymbolRefExpr::VK_None;
4469 /// Force static initialization.
4470 extern "C" void LLVMInitializeAArch64AsmParser() {
4471 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4472 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4473 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4476 #define GET_REGISTER_MATCHER
4477 #define GET_SUBTARGET_FEATURE_NAME
4478 #define GET_MATCHER_IMPLEMENTATION
4479 #include "AArch64GenAsmMatcher.inc"
4481 // Define this matcher function after the auto-generated include so we
4482 // have the match class enum definitions.
4483 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4485 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4486 // If the kind is a token for a literal immediate, check if our asm
4487 // operand matches. This is for InstAliases which have a fixed-value
4488 // immediate in the syntax.
4489 int64_t ExpectedVal;
4492 return Match_InvalidOperand;
4534 return Match_InvalidOperand;
4535 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4537 return Match_InvalidOperand;
4538 if (CE->getValue() == ExpectedVal)
4539 return Match_Success;
4540 return Match_InvalidOperand;
4544 AArch64AsmParser::OperandMatchResultTy
4545 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4549 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4550 Error(S, "expected register");
4551 return MatchOperand_ParseFail;
4554 int FirstReg = tryParseRegister();
4555 if (FirstReg == -1) {
4556 return MatchOperand_ParseFail;
4558 const MCRegisterClass &WRegClass =
4559 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4560 const MCRegisterClass &XRegClass =
4561 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4563 bool isXReg = XRegClass.contains(FirstReg),
4564 isWReg = WRegClass.contains(FirstReg);
4565 if (!isXReg && !isWReg) {
4566 Error(S, "expected first even register of a "
4567 "consecutive same-size even/odd register pair");
4568 return MatchOperand_ParseFail;
4571 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4572 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4574 if (FirstEncoding & 0x1) {
4575 Error(S, "expected first even register of a "
4576 "consecutive same-size even/odd register pair");
4577 return MatchOperand_ParseFail;
4581 if (getParser().getTok().isNot(AsmToken::Comma)) {
4582 Error(M, "expected comma");
4583 return MatchOperand_ParseFail;
4589 int SecondReg = tryParseRegister();
4590 if (SecondReg ==-1) {
4591 return MatchOperand_ParseFail;
4594 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4595 (isXReg && !XRegClass.contains(SecondReg)) ||
4596 (isWReg && !WRegClass.contains(SecondReg))) {
4597 Error(E,"expected second odd register of a "
4598 "consecutive same-size even/odd register pair");
4599 return MatchOperand_ParseFail;
4604 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4605 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4607 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4608 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4611 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4614 return MatchOperand_Success;