1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
118 const MCInstrInfo &MII, const MCTargetOptions &Options)
119 : MCTargetAsmParser(Options, STI) {
120 MCAsmParserExtension::Initialize(Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
217 uint32_t PStateField;
230 struct ShiftExtendOp {
231 AArch64_AM::ShiftExtendType Type;
233 bool HasExplicitAmount;
243 struct VectorListOp VectorList;
244 struct VectorIndexOp VectorIndex;
246 struct ShiftedImmOp ShiftedImm;
247 struct CondCodeOp CondCode;
248 struct FPImmOp FPImm;
249 struct BarrierOp Barrier;
250 struct SysRegOp SysReg;
251 struct SysCRImmOp SysCRImm;
252 struct PrefetchOp Prefetch;
253 struct ShiftExtendOp ShiftExtend;
256 // Keep the MCContext around as the MCExprs may need manipulated during
257 // the add<>Operands() calls.
261 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265 StartLoc = o.StartLoc;
275 ShiftedImm = o.ShiftedImm;
278 CondCode = o.CondCode;
290 VectorList = o.VectorList;
293 VectorIndex = o.VectorIndex;
299 SysCRImm = o.SysCRImm;
302 Prefetch = o.Prefetch;
305 ShiftExtend = o.ShiftExtend;
310 /// getStartLoc - Get the location of the first token of this operand.
311 SMLoc getStartLoc() const override { return StartLoc; }
312 /// getEndLoc - Get the location of the last token of this operand.
313 SMLoc getEndLoc() const override { return EndLoc; }
315 StringRef getToken() const {
316 assert(Kind == k_Token && "Invalid access!");
317 return StringRef(Tok.Data, Tok.Length);
320 bool isTokenSuffix() const {
321 assert(Kind == k_Token && "Invalid access!");
325 const MCExpr *getImm() const {
326 assert(Kind == k_Immediate && "Invalid access!");
330 const MCExpr *getShiftedImmVal() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.Val;
335 unsigned getShiftedImmShift() const {
336 assert(Kind == k_ShiftedImm && "Invalid access!");
337 return ShiftedImm.ShiftAmount;
340 AArch64CC::CondCode getCondCode() const {
341 assert(Kind == k_CondCode && "Invalid access!");
342 return CondCode.Code;
345 unsigned getFPImm() const {
346 assert(Kind == k_FPImm && "Invalid access!");
350 unsigned getBarrier() const {
351 assert(Kind == k_Barrier && "Invalid access!");
355 StringRef getBarrierName() const {
356 assert(Kind == k_Barrier && "Invalid access!");
357 return StringRef(Barrier.Data, Barrier.Length);
360 unsigned getReg() const override {
361 assert(Kind == k_Register && "Invalid access!");
365 unsigned getVectorListStart() const {
366 assert(Kind == k_VectorList && "Invalid access!");
367 return VectorList.RegNum;
370 unsigned getVectorListCount() const {
371 assert(Kind == k_VectorList && "Invalid access!");
372 return VectorList.Count;
375 unsigned getVectorIndex() const {
376 assert(Kind == k_VectorIndex && "Invalid access!");
377 return VectorIndex.Val;
380 StringRef getSysReg() const {
381 assert(Kind == k_SysReg && "Invalid access!");
382 return StringRef(SysReg.Data, SysReg.Length);
385 unsigned getSysCR() const {
386 assert(Kind == k_SysCR && "Invalid access!");
390 unsigned getPrefetch() const {
391 assert(Kind == k_Prefetch && "Invalid access!");
395 StringRef getPrefetchName() const {
396 assert(Kind == k_Prefetch && "Invalid access!");
397 return StringRef(Prefetch.Data, Prefetch.Length);
400 AArch64_AM::ShiftExtendType getShiftExtendType() const {
401 assert(Kind == k_ShiftExtend && "Invalid access!");
402 return ShiftExtend.Type;
405 unsigned getShiftExtendAmount() const {
406 assert(Kind == k_ShiftExtend && "Invalid access!");
407 return ShiftExtend.Amount;
410 bool hasShiftExtendAmount() const {
411 assert(Kind == k_ShiftExtend && "Invalid access!");
412 return ShiftExtend.HasExplicitAmount;
415 bool isImm() const override { return Kind == k_Immediate; }
416 bool isMem() const override { return false; }
417 bool isSImm9() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val < 256);
426 bool isSImm7s4() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435 bool isSImm7s8() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444 bool isSImm7s16() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
454 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
455 AArch64MCExpr::VariantKind ELFRefKind;
456 MCSymbolRefExpr::VariantKind DarwinRefKind;
458 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460 // If we don't understand the expression, assume the best and
461 // let the fixup and relocation code deal with it.
465 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
466 ELFRefKind == AArch64MCExpr::VK_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
474 // Note that we don't range-check the addend. It's adjusted modulo page
475 // size when converted, so there is no "out of range" condition when using
477 return Addend >= 0 && (Addend % Scale) == 0;
478 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
480 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
487 template <int Scale> bool isUImm12Offset() const {
491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 return isSymbolicUImm12Offset(getImm(), Scale);
495 int64_t Val = MCE->getValue();
496 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
499 bool isImm0_1() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 2);
508 bool isImm0_7() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 8);
517 bool isImm1_8() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 9);
526 bool isImm0_15() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 16);
535 bool isImm1_16() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val > 0 && Val < 17);
544 bool isImm0_31() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 0 && Val < 32);
553 bool isImm1_31() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 1 && Val < 32);
562 bool isImm1_32() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 33);
571 bool isImm0_63() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 0 && Val < 64);
580 bool isImm1_63() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 1 && Val < 64);
589 bool isImm1_64() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 1 && Val < 65);
598 bool isImm0_127() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 128);
607 bool isImm0_255() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 0 && Val < 256);
616 bool isImm0_65535() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 return (Val >= 0 && Val < 65536);
625 bool isImm32_63() const {
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
631 int64_t Val = MCE->getValue();
632 return (Val >= 32 && Val < 64);
634 bool isLogicalImm32() const {
637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640 int64_t Val = MCE->getValue();
641 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
644 return AArch64_AM::isLogicalImmediate(Val, 32);
646 bool isLogicalImm64() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
654 bool isLogicalImm32Not() const {
657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
660 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
661 return AArch64_AM::isLogicalImmediate(Val, 32);
663 bool isLogicalImm64Not() const {
666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
669 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
671 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
672 bool isAddSubImm() const {
673 if (!isShiftedImm() && !isImm())
678 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
679 if (isShiftedImm()) {
680 unsigned Shift = ShiftedImm.ShiftAmount;
681 Expr = ShiftedImm.Val;
682 if (Shift != 0 && Shift != 12)
688 AArch64MCExpr::VariantKind ELFRefKind;
689 MCSymbolRefExpr::VariantKind DarwinRefKind;
691 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
692 DarwinRefKind, Addend)) {
693 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
694 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
695 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
696 || ELFRefKind == AArch64MCExpr::VK_LO12
697 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
700 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
703 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
706 // Otherwise it should be a real immediate in range:
707 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
708 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
710 bool isAddSubImmNeg() const {
711 if (!isShiftedImm() && !isImm())
716 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
717 if (isShiftedImm()) {
718 unsigned Shift = ShiftedImm.ShiftAmount;
719 Expr = ShiftedImm.Val;
720 if (Shift != 0 && Shift != 12)
725 // Otherwise it should be a real negative immediate in range:
726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
727 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
729 bool isCondCode() const { return Kind == k_CondCode; }
730 bool isSIMDImmType10() const {
733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
738 bool isBranchTarget26() const {
741 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
744 int64_t Val = MCE->getValue();
747 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
749 bool isPCRelLabel19() const {
752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
755 int64_t Val = MCE->getValue();
758 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
760 bool isBranchTarget14() const {
763 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
766 int64_t Val = MCE->getValue();
769 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
773 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
777 AArch64MCExpr::VariantKind ELFRefKind;
778 MCSymbolRefExpr::VariantKind DarwinRefKind;
780 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
781 DarwinRefKind, Addend)) {
784 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
787 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
788 if (ELFRefKind == AllowedModifiers[i])
795 bool isMovZSymbolG3() const {
796 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
799 bool isMovZSymbolG2() const {
800 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
801 AArch64MCExpr::VK_TPREL_G2,
802 AArch64MCExpr::VK_DTPREL_G2});
805 bool isMovZSymbolG1() const {
806 return isMovWSymbol({
807 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
808 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
809 AArch64MCExpr::VK_DTPREL_G1,
813 bool isMovZSymbolG0() const {
814 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
815 AArch64MCExpr::VK_TPREL_G0,
816 AArch64MCExpr::VK_DTPREL_G0});
819 bool isMovKSymbolG3() const {
820 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 bool isMovKSymbolG2() const {
824 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
827 bool isMovKSymbolG1() const {
828 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
829 AArch64MCExpr::VK_TPREL_G1_NC,
830 AArch64MCExpr::VK_DTPREL_G1_NC});
833 bool isMovKSymbolG0() const {
835 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
836 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
839 template<int RegWidth, int Shift>
840 bool isMOVZMovAlias() const {
841 if (!isImm()) return false;
843 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
844 if (!CE) return false;
845 uint64_t Value = CE->getValue();
848 Value &= 0xffffffffULL;
850 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
851 if (Value == 0 && Shift != 0)
854 return (Value & ~(0xffffULL << Shift)) == 0;
857 template<int RegWidth, int Shift>
858 bool isMOVNMovAlias() const {
859 if (!isImm()) return false;
861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862 if (!CE) return false;
863 uint64_t Value = CE->getValue();
865 // MOVZ takes precedence over MOVN.
866 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
867 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
872 Value &= 0xffffffffULL;
874 return (Value & ~(0xffffULL << Shift)) == 0;
877 bool isFPImm() const { return Kind == k_FPImm; }
878 bool isBarrier() const { return Kind == k_Barrier; }
879 bool isSysReg() const { return Kind == k_SysReg; }
880 bool isMRSSystemRegister() const {
881 if (!isSysReg()) return false;
883 return SysReg.MRSReg != -1U;
885 bool isMSRSystemRegister() const {
886 if (!isSysReg()) return false;
887 return SysReg.MSRReg != -1U;
889 bool isSystemPStateFieldWithImm0_1() const {
890 if (!isSysReg()) return false;
891 return (SysReg.PStateField == AArch64PState::PAN ||
892 SysReg.PStateField == AArch64PState::UAO);
894 bool isSystemPStateFieldWithImm0_15() const {
895 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
896 return SysReg.PStateField != -1U;
898 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
899 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
900 bool isVectorRegLo() const {
901 return Kind == k_Register && Reg.isVector &&
902 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
905 bool isGPR32as64() const {
906 return Kind == k_Register && !Reg.isVector &&
907 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
909 bool isWSeqPair() const {
910 return Kind == k_Register && !Reg.isVector &&
911 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
914 bool isXSeqPair() const {
915 return Kind == k_Register && !Reg.isVector &&
916 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
920 bool isGPR64sp0() const {
921 return Kind == k_Register && !Reg.isVector &&
922 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
925 /// Is this a vector list with the type implicit (presumably attached to the
926 /// instruction itself)?
927 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
928 return Kind == k_VectorList && VectorList.Count == NumRegs &&
929 !VectorList.ElementKind;
932 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
933 bool isTypedVectorList() const {
934 if (Kind != k_VectorList)
936 if (VectorList.Count != NumRegs)
938 if (VectorList.ElementKind != ElementKind)
940 return VectorList.NumElements == NumElements;
943 bool isVectorIndex1() const {
944 return Kind == k_VectorIndex && VectorIndex.Val == 1;
946 bool isVectorIndexB() const {
947 return Kind == k_VectorIndex && VectorIndex.Val < 16;
949 bool isVectorIndexH() const {
950 return Kind == k_VectorIndex && VectorIndex.Val < 8;
952 bool isVectorIndexS() const {
953 return Kind == k_VectorIndex && VectorIndex.Val < 4;
955 bool isVectorIndexD() const {
956 return Kind == k_VectorIndex && VectorIndex.Val < 2;
958 bool isToken() const override { return Kind == k_Token; }
959 bool isTokenEqual(StringRef Str) const {
960 return Kind == k_Token && getToken() == Str;
962 bool isSysCR() const { return Kind == k_SysCR; }
963 bool isPrefetch() const { return Kind == k_Prefetch; }
964 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
965 bool isShifter() const {
966 if (!isShiftExtend())
969 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
970 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
971 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
972 ST == AArch64_AM::MSL);
974 bool isExtend() const {
975 if (!isShiftExtend())
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
980 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
981 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
982 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
983 ET == AArch64_AM::LSL) &&
984 getShiftExtendAmount() <= 4;
987 bool isExtend64() const {
990 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
991 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
992 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
994 bool isExtendLSL64() const {
997 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
998 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
999 ET == AArch64_AM::LSL) &&
1000 getShiftExtendAmount() <= 4;
1003 template<int Width> bool isMemXExtend() const {
1006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1007 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1008 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1009 getShiftExtendAmount() == 0);
1012 template<int Width> bool isMemWExtend() const {
1015 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1016 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1017 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1018 getShiftExtendAmount() == 0);
1021 template <unsigned width>
1022 bool isArithmeticShifter() const {
1026 // An arithmetic shifter is LSL, LSR, or ASR.
1027 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1032 template <unsigned width>
1033 bool isLogicalShifter() const {
1037 // A logical shifter is LSL, LSR, ASR or ROR.
1038 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1040 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1041 getShiftExtendAmount() < width;
1044 bool isMovImm32Shifter() const {
1048 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1049 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1050 if (ST != AArch64_AM::LSL)
1052 uint64_t Val = getShiftExtendAmount();
1053 return (Val == 0 || Val == 16);
1056 bool isMovImm64Shifter() const {
1060 // A MOVi shifter is LSL of 0 or 16.
1061 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1062 if (ST != AArch64_AM::LSL)
1064 uint64_t Val = getShiftExtendAmount();
1065 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1068 bool isLogicalVecShifter() const {
1072 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1073 unsigned Shift = getShiftExtendAmount();
1074 return getShiftExtendType() == AArch64_AM::LSL &&
1075 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1078 bool isLogicalVecHalfWordShifter() const {
1079 if (!isLogicalVecShifter())
1082 // A logical vector shifter is a left shift by 0 or 8.
1083 unsigned Shift = getShiftExtendAmount();
1084 return getShiftExtendType() == AArch64_AM::LSL &&
1085 (Shift == 0 || Shift == 8);
1088 bool isMoveVecShifter() const {
1089 if (!isShiftExtend())
1092 // A logical vector shifter is a left shift by 8 or 16.
1093 unsigned Shift = getShiftExtendAmount();
1094 return getShiftExtendType() == AArch64_AM::MSL &&
1095 (Shift == 8 || Shift == 16);
1098 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1099 // to LDUR/STUR when the offset is not legal for the former but is for
1100 // the latter. As such, in addition to checking for being a legal unscaled
1101 // address, also check that it is not a legal scaled address. This avoids
1102 // ambiguity in the matcher.
1104 bool isSImm9OffsetFB() const {
1105 return isSImm9() && !isUImm12Offset<Width / 8>();
1108 bool isAdrpLabel() const {
1109 // Validation was handled during parsing, so we just sanity check that
1110 // something didn't go haywire.
1114 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1115 int64_t Val = CE->getValue();
1116 int64_t Min = - (4096 * (1LL << (21 - 1)));
1117 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1118 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1124 bool isAdrLabel() const {
1125 // Validation was handled during parsing, so we just sanity check that
1126 // something didn't go haywire.
1130 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1131 int64_t Val = CE->getValue();
1132 int64_t Min = - (1LL << (21 - 1));
1133 int64_t Max = ((1LL << (21 - 1)) - 1);
1134 return Val >= Min && Val <= Max;
1140 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1141 // Add as immediates when possible. Null MCExpr = 0.
1143 Inst.addOperand(MCOperand::createImm(0));
1144 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1145 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1147 Inst.addOperand(MCOperand::createExpr(Expr));
1150 void addRegOperands(MCInst &Inst, unsigned N) const {
1151 assert(N == 1 && "Invalid number of operands!");
1152 Inst.addOperand(MCOperand::createReg(getReg()));
1155 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1158 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1160 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1161 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1162 RI->getEncodingValue(getReg()));
1164 Inst.addOperand(MCOperand::createReg(Reg));
1167 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1171 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1174 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1178 Inst.addOperand(MCOperand::createReg(getReg()));
1181 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::createReg(getReg()));
1186 template <unsigned NumRegs>
1187 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 static const unsigned FirstRegs[] = { AArch64::D0,
1192 AArch64::D0_D1_D2_D3 };
1193 unsigned FirstReg = FirstRegs[NumRegs - 1];
1196 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1199 template <unsigned NumRegs>
1200 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 static const unsigned FirstRegs[] = { AArch64::Q0,
1205 AArch64::Q0_Q1_Q2_Q3 };
1206 unsigned FirstReg = FirstRegs[NumRegs - 1];
1209 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1212 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1217 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1222 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1227 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1232 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1237 void addImmOperands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 // If this is a pageoff symrefexpr with an addend, adjust the addend
1240 // to be only the page-offset portion. Otherwise, just add the expr
1242 addExpr(Inst, getImm());
1245 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1246 assert(N == 2 && "Invalid number of operands!");
1247 if (isShiftedImm()) {
1248 addExpr(Inst, getShiftedImmVal());
1249 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1251 addExpr(Inst, getImm());
1252 Inst.addOperand(MCOperand::createImm(0));
1256 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1257 assert(N == 2 && "Invalid number of operands!");
1259 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1260 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1261 int64_t Val = -CE->getValue();
1262 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1264 Inst.addOperand(MCOperand::createImm(Val));
1265 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1268 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 Inst.addOperand(MCOperand::createImm(getCondCode()));
1273 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 addExpr(Inst, getImm());
1279 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1282 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1283 addImmOperands(Inst, N);
1287 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1288 assert(N == 1 && "Invalid number of operands!");
1289 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createExpr(getImm()));
1295 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1298 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1301 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1304 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1307 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1310 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1313 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1316 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1322 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1328 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1334 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1340 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1346 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349 assert(MCE && "Invalid constant immediate operand!");
1350 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1353 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1354 assert(N == 1 && "Invalid number of operands!");
1355 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1356 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1359 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1362 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1365 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1366 assert(N == 1 && "Invalid number of operands!");
1367 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1368 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1371 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1377 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1378 assert(N == 1 && "Invalid number of operands!");
1379 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1380 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1383 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1384 assert(N == 1 && "Invalid number of operands!");
1385 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1386 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1389 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1392 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1395 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1401 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1402 assert(N == 1 && "Invalid number of operands!");
1403 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1404 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1407 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1408 assert(N == 1 && "Invalid number of operands!");
1409 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1413 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1417 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1418 Inst.addOperand(MCOperand::createImm(encoding));
1421 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1422 assert(N == 1 && "Invalid number of operands!");
1423 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1424 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1425 Inst.addOperand(MCOperand::createImm(encoding));
1428 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1431 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1432 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1433 Inst.addOperand(MCOperand::createImm(encoding));
1436 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1437 assert(N == 1 && "Invalid number of operands!");
1438 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1441 Inst.addOperand(MCOperand::createImm(encoding));
1444 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1447 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1448 Inst.addOperand(MCOperand::createImm(encoding));
1451 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1452 // Branch operands don't encode the low bits, so shift them off
1453 // here. If it's a label, however, just put it on directly as there's
1454 // not enough information now to do anything.
1455 assert(N == 1 && "Invalid number of operands!");
1456 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1458 addExpr(Inst, getImm());
1461 assert(MCE && "Invalid constant immediate operand!");
1462 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1465 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1466 // Branch operands don't encode the low bits, so shift them off
1467 // here. If it's a label, however, just put it on directly as there's
1468 // not enough information now to do anything.
1469 assert(N == 1 && "Invalid number of operands!");
1470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1472 addExpr(Inst, getImm());
1475 assert(MCE && "Invalid constant immediate operand!");
1476 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1479 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1480 // Branch operands don't encode the low bits, so shift them off
1481 // here. If it's a label, however, just put it on directly as there's
1482 // not enough information now to do anything.
1483 assert(N == 1 && "Invalid number of operands!");
1484 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1486 addExpr(Inst, getImm());
1489 assert(MCE && "Invalid constant immediate operand!");
1490 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1493 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1495 Inst.addOperand(MCOperand::createImm(getFPImm()));
1498 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1500 Inst.addOperand(MCOperand::createImm(getBarrier()));
1503 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 1 && "Invalid number of operands!");
1506 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1509 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1512 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1515 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 1 && "Invalid number of operands!");
1518 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1521 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1524 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1527 void addSysCROperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!");
1529 Inst.addOperand(MCOperand::createImm(getSysCR()));
1532 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!");
1534 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1537 void addShifterOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && "Invalid number of operands!");
1540 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1541 Inst.addOperand(MCOperand::createImm(Imm));
1544 void addExtendOperands(MCInst &Inst, unsigned N) const {
1545 assert(N == 1 && "Invalid number of operands!");
1546 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1547 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1548 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1549 Inst.addOperand(MCOperand::createImm(Imm));
1552 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1553 assert(N == 1 && "Invalid number of operands!");
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1556 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1557 Inst.addOperand(MCOperand::createImm(Imm));
1560 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1561 assert(N == 2 && "Invalid number of operands!");
1562 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1563 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1564 Inst.addOperand(MCOperand::createImm(IsSigned));
1565 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1568 // For 8-bit load/store instructions with a register offset, both the
1569 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1570 // they're disambiguated by whether the shift was explicit or implicit rather
1572 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1573 assert(N == 2 && "Invalid number of operands!");
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1576 Inst.addOperand(MCOperand::createImm(IsSigned));
1577 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1581 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1582 assert(N == 1 && "Invalid number of operands!");
1584 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1585 uint64_t Value = CE->getValue();
1586 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1590 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1591 assert(N == 1 && "Invalid number of operands!");
1593 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1594 uint64_t Value = CE->getValue();
1595 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1598 void print(raw_ostream &OS) const override;
1600 static std::unique_ptr<AArch64Operand>
1601 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1603 Op->Tok.Data = Str.data();
1604 Op->Tok.Length = Str.size();
1605 Op->Tok.IsSuffix = IsSuffix;
1611 static std::unique_ptr<AArch64Operand>
1612 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1614 Op->Reg.RegNum = RegNum;
1615 Op->Reg.isVector = isVector;
1621 static std::unique_ptr<AArch64Operand>
1622 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1623 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1624 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1625 Op->VectorList.RegNum = RegNum;
1626 Op->VectorList.Count = Count;
1627 Op->VectorList.NumElements = NumElements;
1628 Op->VectorList.ElementKind = ElementKind;
1634 static std::unique_ptr<AArch64Operand>
1635 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1636 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1637 Op->VectorIndex.Val = Idx;
1643 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1644 SMLoc E, MCContext &Ctx) {
1645 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1652 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1653 unsigned ShiftAmount,
1656 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1657 Op->ShiftedImm .Val = Val;
1658 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1664 static std::unique_ptr<AArch64Operand>
1665 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1667 Op->CondCode.Code = Code;
1673 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1675 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1676 Op->FPImm.Val = Val;
1682 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1686 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1687 Op->Barrier.Val = Val;
1688 Op->Barrier.Data = Str.data();
1689 Op->Barrier.Length = Str.size();
1695 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1698 uint32_t PStateField,
1700 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1701 Op->SysReg.Data = Str.data();
1702 Op->SysReg.Length = Str.size();
1703 Op->SysReg.MRSReg = MRSReg;
1704 Op->SysReg.MSRReg = MSRReg;
1705 Op->SysReg.PStateField = PStateField;
1711 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1712 SMLoc E, MCContext &Ctx) {
1713 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1714 Op->SysCRImm.Val = Val;
1720 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1724 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1725 Op->Prefetch.Val = Val;
1726 Op->Barrier.Data = Str.data();
1727 Op->Barrier.Length = Str.size();
1733 static std::unique_ptr<AArch64Operand>
1734 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1735 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1736 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1737 Op->ShiftExtend.Type = ShOp;
1738 Op->ShiftExtend.Amount = Val;
1739 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1746 } // end anonymous namespace.
1748 void AArch64Operand::print(raw_ostream &OS) const {
1751 OS << "<fpimm " << getFPImm() << "("
1752 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1755 StringRef Name = getBarrierName();
1757 OS << "<barrier " << Name << ">";
1759 OS << "<barrier invalid #" << getBarrier() << ">";
1765 case k_ShiftedImm: {
1766 unsigned Shift = getShiftedImmShift();
1767 OS << "<shiftedimm ";
1768 OS << *getShiftedImmVal();
1769 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1773 OS << "<condcode " << getCondCode() << ">";
1776 OS << "<register " << getReg() << ">";
1778 case k_VectorList: {
1779 OS << "<vectorlist ";
1780 unsigned Reg = getVectorListStart();
1781 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1782 OS << Reg + i << " ";
1787 OS << "<vectorindex " << getVectorIndex() << ">";
1790 OS << "<sysreg: " << getSysReg() << '>';
1793 OS << "'" << getToken() << "'";
1796 OS << "c" << getSysCR();
1799 StringRef Name = getPrefetchName();
1801 OS << "<prfop " << Name << ">";
1803 OS << "<prfop invalid #" << getPrefetch() << ">";
1806 case k_ShiftExtend: {
1807 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1808 << getShiftExtendAmount();
1809 if (!hasShiftExtendAmount())
1817 /// @name Auto-generated Match Functions
1820 static unsigned MatchRegisterName(StringRef Name);
1824 static unsigned matchVectorRegName(StringRef Name) {
1825 return StringSwitch<unsigned>(Name.lower())
1826 .Case("v0", AArch64::Q0)
1827 .Case("v1", AArch64::Q1)
1828 .Case("v2", AArch64::Q2)
1829 .Case("v3", AArch64::Q3)
1830 .Case("v4", AArch64::Q4)
1831 .Case("v5", AArch64::Q5)
1832 .Case("v6", AArch64::Q6)
1833 .Case("v7", AArch64::Q7)
1834 .Case("v8", AArch64::Q8)
1835 .Case("v9", AArch64::Q9)
1836 .Case("v10", AArch64::Q10)
1837 .Case("v11", AArch64::Q11)
1838 .Case("v12", AArch64::Q12)
1839 .Case("v13", AArch64::Q13)
1840 .Case("v14", AArch64::Q14)
1841 .Case("v15", AArch64::Q15)
1842 .Case("v16", AArch64::Q16)
1843 .Case("v17", AArch64::Q17)
1844 .Case("v18", AArch64::Q18)
1845 .Case("v19", AArch64::Q19)
1846 .Case("v20", AArch64::Q20)
1847 .Case("v21", AArch64::Q21)
1848 .Case("v22", AArch64::Q22)
1849 .Case("v23", AArch64::Q23)
1850 .Case("v24", AArch64::Q24)
1851 .Case("v25", AArch64::Q25)
1852 .Case("v26", AArch64::Q26)
1853 .Case("v27", AArch64::Q27)
1854 .Case("v28", AArch64::Q28)
1855 .Case("v29", AArch64::Q29)
1856 .Case("v30", AArch64::Q30)
1857 .Case("v31", AArch64::Q31)
1861 static bool isValidVectorKind(StringRef Name) {
1862 return StringSwitch<bool>(Name.lower())
1872 // Accept the width neutral ones, too, for verbose syntax. If those
1873 // aren't used in the right places, the token operand won't match so
1874 // all will work out.
1882 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1883 char &ElementKind) {
1884 assert(isValidVectorKind(Name));
1886 ElementKind = Name.lower()[Name.size() - 1];
1889 if (Name.size() == 2)
1892 // Parse the lane count
1893 Name = Name.drop_front();
1894 while (isdigit(Name.front())) {
1895 NumElements = 10 * NumElements + (Name.front() - '0');
1896 Name = Name.drop_front();
1900 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1902 StartLoc = getLoc();
1903 RegNo = tryParseRegister();
1904 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1905 return (RegNo == (unsigned)-1);
1908 // Matches a register name or register alias previously defined by '.req'
1909 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1911 unsigned RegNum = isVector ? matchVectorRegName(Name)
1912 : MatchRegisterName(Name);
1915 // Check for aliases registered via .req. Canonicalize to lower case.
1916 // That's more consistent since register names are case insensitive, and
1917 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1918 auto Entry = RegisterReqs.find(Name.lower());
1919 if (Entry == RegisterReqs.end())
1921 // set RegNum if the match is the right kind of register
1922 if (isVector == Entry->getValue().first)
1923 RegNum = Entry->getValue().second;
1928 /// tryParseRegister - Try to parse a register name. The token must be an
1929 /// Identifier when called, and if it is a register name the token is eaten and
1930 /// the register is added to the operand list.
1931 int AArch64AsmParser::tryParseRegister() {
1932 MCAsmParser &Parser = getParser();
1933 const AsmToken &Tok = Parser.getTok();
1934 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1936 std::string lowerCase = Tok.getString().lower();
1937 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1938 // Also handle a few aliases of registers.
1940 RegNum = StringSwitch<unsigned>(lowerCase)
1941 .Case("fp", AArch64::FP)
1942 .Case("lr", AArch64::LR)
1943 .Case("x31", AArch64::XZR)
1944 .Case("w31", AArch64::WZR)
1950 Parser.Lex(); // Eat identifier token.
1954 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1955 /// kind specifier. If it is a register specifier, eat the token and return it.
1956 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1957 MCAsmParser &Parser = getParser();
1958 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1959 TokError("vector register expected");
1963 StringRef Name = Parser.getTok().getString();
1964 // If there is a kind specifier, it's separated from the register name by
1966 size_t Start = 0, Next = Name.find('.');
1967 StringRef Head = Name.slice(Start, Next);
1968 unsigned RegNum = matchRegisterNameAlias(Head, true);
1971 if (Next != StringRef::npos) {
1972 Kind = Name.slice(Next, StringRef::npos);
1973 if (!isValidVectorKind(Kind)) {
1974 TokError("invalid vector kind qualifier");
1978 Parser.Lex(); // Eat the register token.
1983 TokError("vector register expected");
1987 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1988 AArch64AsmParser::OperandMatchResultTy
1989 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1990 MCAsmParser &Parser = getParser();
1993 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1994 Error(S, "Expected cN operand where 0 <= N <= 15");
1995 return MatchOperand_ParseFail;
1998 StringRef Tok = Parser.getTok().getIdentifier();
1999 if (Tok[0] != 'c' && Tok[0] != 'C') {
2000 Error(S, "Expected cN operand where 0 <= N <= 15");
2001 return MatchOperand_ParseFail;
2005 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2006 if (BadNum || CRNum > 15) {
2007 Error(S, "Expected cN operand where 0 <= N <= 15");
2008 return MatchOperand_ParseFail;
2011 Parser.Lex(); // Eat identifier token.
2013 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2014 return MatchOperand_Success;
2017 /// tryParsePrefetch - Try to parse a prefetch operand.
2018 AArch64AsmParser::OperandMatchResultTy
2019 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2020 MCAsmParser &Parser = getParser();
2022 const AsmToken &Tok = Parser.getTok();
2023 // Either an identifier for named values or a 5-bit immediate.
2024 bool Hash = Tok.is(AsmToken::Hash);
2025 if (Hash || Tok.is(AsmToken::Integer)) {
2027 Parser.Lex(); // Eat hash token.
2028 const MCExpr *ImmVal;
2029 if (getParser().parseExpression(ImmVal))
2030 return MatchOperand_ParseFail;
2032 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2034 TokError("immediate value expected for prefetch operand");
2035 return MatchOperand_ParseFail;
2037 unsigned prfop = MCE->getValue();
2039 TokError("prefetch operand out of range, [0,31] expected");
2040 return MatchOperand_ParseFail;
2044 auto Mapper = AArch64PRFM::PRFMMapper();
2046 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2047 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2049 return MatchOperand_Success;
2052 if (Tok.isNot(AsmToken::Identifier)) {
2053 TokError("pre-fetch hint expected");
2054 return MatchOperand_ParseFail;
2058 auto Mapper = AArch64PRFM::PRFMMapper();
2060 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2062 TokError("pre-fetch hint expected");
2063 return MatchOperand_ParseFail;
2066 Parser.Lex(); // Eat identifier token.
2067 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2069 return MatchOperand_Success;
2072 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2074 AArch64AsmParser::OperandMatchResultTy
2075 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2076 MCAsmParser &Parser = getParser();
2080 if (Parser.getTok().is(AsmToken::Hash)) {
2081 Parser.Lex(); // Eat hash token.
2084 if (parseSymbolicImmVal(Expr))
2085 return MatchOperand_ParseFail;
2087 AArch64MCExpr::VariantKind ELFRefKind;
2088 MCSymbolRefExpr::VariantKind DarwinRefKind;
2090 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2091 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2092 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2093 // No modifier was specified at all; this is the syntax for an ELF basic
2094 // ADRP relocation (unfortunately).
2096 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2097 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2098 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2100 Error(S, "gotpage label reference not allowed an addend");
2101 return MatchOperand_ParseFail;
2102 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2103 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2104 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2105 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2106 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2107 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2108 // The operand must be an @page or @gotpage qualified symbolref.
2109 Error(S, "page or gotpage label reference expected");
2110 return MatchOperand_ParseFail;
2114 // We have either a label reference possibly with addend or an immediate. The
2115 // addend is a raw value here. The linker will adjust it to only reference the
2117 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2118 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2120 return MatchOperand_Success;
2123 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2125 AArch64AsmParser::OperandMatchResultTy
2126 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2127 MCAsmParser &Parser = getParser();
2131 if (Parser.getTok().is(AsmToken::Hash)) {
2132 Parser.Lex(); // Eat hash token.
2135 if (getParser().parseExpression(Expr))
2136 return MatchOperand_ParseFail;
2138 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2139 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2141 return MatchOperand_Success;
2144 /// tryParseFPImm - A floating point immediate expression operand.
2145 AArch64AsmParser::OperandMatchResultTy
2146 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2147 MCAsmParser &Parser = getParser();
2151 if (Parser.getTok().is(AsmToken::Hash)) {
2152 Parser.Lex(); // Eat '#'
2156 // Handle negation, as that still comes through as a separate token.
2157 bool isNegative = false;
2158 if (Parser.getTok().is(AsmToken::Minus)) {
2162 const AsmToken &Tok = Parser.getTok();
2163 if (Tok.is(AsmToken::Real)) {
2164 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2166 RealVal.changeSign();
2168 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2169 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2170 Parser.Lex(); // Eat the token.
2171 // Check for out of range values. As an exception, we let Zero through,
2172 // as we handle that special case in post-processing before matching in
2173 // order to use the zero register for it.
2174 if (Val == -1 && !RealVal.isPosZero()) {
2175 TokError("expected compatible register or floating-point constant");
2176 return MatchOperand_ParseFail;
2178 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2179 return MatchOperand_Success;
2181 if (Tok.is(AsmToken::Integer)) {
2183 if (!isNegative && Tok.getString().startswith("0x")) {
2184 Val = Tok.getIntVal();
2185 if (Val > 255 || Val < 0) {
2186 TokError("encoded floating point value out of range");
2187 return MatchOperand_ParseFail;
2190 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2191 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2192 // If we had a '-' in front, toggle the sign bit.
2193 IntVal ^= (uint64_t)isNegative << 63;
2194 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2196 Parser.Lex(); // Eat the token.
2197 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2198 return MatchOperand_Success;
2202 return MatchOperand_NoMatch;
2204 TokError("invalid floating point immediate");
2205 return MatchOperand_ParseFail;
2208 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2209 AArch64AsmParser::OperandMatchResultTy
2210 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2211 MCAsmParser &Parser = getParser();
2214 if (Parser.getTok().is(AsmToken::Hash))
2215 Parser.Lex(); // Eat '#'
2216 else if (Parser.getTok().isNot(AsmToken::Integer))
2217 // Operand should start from # or should be integer, emit error otherwise.
2218 return MatchOperand_NoMatch;
2221 if (parseSymbolicImmVal(Imm))
2222 return MatchOperand_ParseFail;
2223 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2224 uint64_t ShiftAmount = 0;
2225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2227 int64_t Val = MCE->getValue();
2228 if (Val > 0xfff && (Val & 0xfff) == 0) {
2229 Imm = MCConstantExpr::create(Val >> 12, getContext());
2233 SMLoc E = Parser.getTok().getLoc();
2234 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2236 return MatchOperand_Success;
2242 // The optional operand must be "lsl #N" where N is non-negative.
2243 if (!Parser.getTok().is(AsmToken::Identifier) ||
2244 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2245 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2246 return MatchOperand_ParseFail;
2252 if (Parser.getTok().is(AsmToken::Hash)) {
2256 if (Parser.getTok().isNot(AsmToken::Integer)) {
2257 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2258 return MatchOperand_ParseFail;
2261 int64_t ShiftAmount = Parser.getTok().getIntVal();
2263 if (ShiftAmount < 0) {
2264 Error(Parser.getTok().getLoc(), "positive shift amount required");
2265 return MatchOperand_ParseFail;
2267 Parser.Lex(); // Eat the number
2269 SMLoc E = Parser.getTok().getLoc();
2270 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2271 S, E, getContext()));
2272 return MatchOperand_Success;
2275 /// parseCondCodeString - Parse a Condition Code string.
2276 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2277 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2278 .Case("eq", AArch64CC::EQ)
2279 .Case("ne", AArch64CC::NE)
2280 .Case("cs", AArch64CC::HS)
2281 .Case("hs", AArch64CC::HS)
2282 .Case("cc", AArch64CC::LO)
2283 .Case("lo", AArch64CC::LO)
2284 .Case("mi", AArch64CC::MI)
2285 .Case("pl", AArch64CC::PL)
2286 .Case("vs", AArch64CC::VS)
2287 .Case("vc", AArch64CC::VC)
2288 .Case("hi", AArch64CC::HI)
2289 .Case("ls", AArch64CC::LS)
2290 .Case("ge", AArch64CC::GE)
2291 .Case("lt", AArch64CC::LT)
2292 .Case("gt", AArch64CC::GT)
2293 .Case("le", AArch64CC::LE)
2294 .Case("al", AArch64CC::AL)
2295 .Case("nv", AArch64CC::NV)
2296 .Default(AArch64CC::Invalid);
2300 /// parseCondCode - Parse a Condition Code operand.
2301 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2302 bool invertCondCode) {
2303 MCAsmParser &Parser = getParser();
2305 const AsmToken &Tok = Parser.getTok();
2306 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2308 StringRef Cond = Tok.getString();
2309 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2310 if (CC == AArch64CC::Invalid)
2311 return TokError("invalid condition code");
2312 Parser.Lex(); // Eat identifier token.
2314 if (invertCondCode) {
2315 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2316 return TokError("condition codes AL and NV are invalid for this instruction");
2317 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2321 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2325 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2326 /// them if present.
2327 AArch64AsmParser::OperandMatchResultTy
2328 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2329 MCAsmParser &Parser = getParser();
2330 const AsmToken &Tok = Parser.getTok();
2331 std::string LowerID = Tok.getString().lower();
2332 AArch64_AM::ShiftExtendType ShOp =
2333 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2334 .Case("lsl", AArch64_AM::LSL)
2335 .Case("lsr", AArch64_AM::LSR)
2336 .Case("asr", AArch64_AM::ASR)
2337 .Case("ror", AArch64_AM::ROR)
2338 .Case("msl", AArch64_AM::MSL)
2339 .Case("uxtb", AArch64_AM::UXTB)
2340 .Case("uxth", AArch64_AM::UXTH)
2341 .Case("uxtw", AArch64_AM::UXTW)
2342 .Case("uxtx", AArch64_AM::UXTX)
2343 .Case("sxtb", AArch64_AM::SXTB)
2344 .Case("sxth", AArch64_AM::SXTH)
2345 .Case("sxtw", AArch64_AM::SXTW)
2346 .Case("sxtx", AArch64_AM::SXTX)
2347 .Default(AArch64_AM::InvalidShiftExtend);
2349 if (ShOp == AArch64_AM::InvalidShiftExtend)
2350 return MatchOperand_NoMatch;
2352 SMLoc S = Tok.getLoc();
2355 bool Hash = getLexer().is(AsmToken::Hash);
2356 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2357 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2358 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2359 ShOp == AArch64_AM::MSL) {
2360 // We expect a number here.
2361 TokError("expected #imm after shift specifier");
2362 return MatchOperand_ParseFail;
2365 // "extend" type operatoins don't need an immediate, #0 is implicit.
2366 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2368 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2369 return MatchOperand_Success;
2373 Parser.Lex(); // Eat the '#'.
2375 // Make sure we do actually have a number or a parenthesized expression.
2376 SMLoc E = Parser.getTok().getLoc();
2377 if (!Parser.getTok().is(AsmToken::Integer) &&
2378 !Parser.getTok().is(AsmToken::LParen)) {
2379 Error(E, "expected integer shift amount");
2380 return MatchOperand_ParseFail;
2383 const MCExpr *ImmVal;
2384 if (getParser().parseExpression(ImmVal))
2385 return MatchOperand_ParseFail;
2387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2389 Error(E, "expected constant '#imm' after shift specifier");
2390 return MatchOperand_ParseFail;
2393 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2394 Operands.push_back(AArch64Operand::CreateShiftExtend(
2395 ShOp, MCE->getValue(), true, S, E, getContext()));
2396 return MatchOperand_Success;
2399 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2400 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2401 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2402 OperandVector &Operands) {
2403 if (Name.find('.') != StringRef::npos)
2404 return TokError("invalid operand");
2408 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2410 MCAsmParser &Parser = getParser();
2411 const AsmToken &Tok = Parser.getTok();
2412 StringRef Op = Tok.getString();
2413 SMLoc S = Tok.getLoc();
2415 const MCExpr *Expr = nullptr;
2417 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2419 Expr = MCConstantExpr::create(op1, getContext()); \
2420 Operands.push_back( \
2421 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2422 Operands.push_back( \
2423 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2424 Operands.push_back( \
2425 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2426 Expr = MCConstantExpr::create(op2, getContext()); \
2427 Operands.push_back( \
2428 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2431 if (Mnemonic == "ic") {
2432 if (!Op.compare_lower("ialluis")) {
2433 // SYS #0, C7, C1, #0
2434 SYS_ALIAS(0, 7, 1, 0);
2435 } else if (!Op.compare_lower("iallu")) {
2436 // SYS #0, C7, C5, #0
2437 SYS_ALIAS(0, 7, 5, 0);
2438 } else if (!Op.compare_lower("ivau")) {
2439 // SYS #3, C7, C5, #1
2440 SYS_ALIAS(3, 7, 5, 1);
2442 return TokError("invalid operand for IC instruction");
2444 } else if (Mnemonic == "dc") {
2445 if (!Op.compare_lower("zva")) {
2446 // SYS #3, C7, C4, #1
2447 SYS_ALIAS(3, 7, 4, 1);
2448 } else if (!Op.compare_lower("ivac")) {
2449 // SYS #3, C7, C6, #1
2450 SYS_ALIAS(0, 7, 6, 1);
2451 } else if (!Op.compare_lower("isw")) {
2452 // SYS #0, C7, C6, #2
2453 SYS_ALIAS(0, 7, 6, 2);
2454 } else if (!Op.compare_lower("cvac")) {
2455 // SYS #3, C7, C10, #1
2456 SYS_ALIAS(3, 7, 10, 1);
2457 } else if (!Op.compare_lower("csw")) {
2458 // SYS #0, C7, C10, #2
2459 SYS_ALIAS(0, 7, 10, 2);
2460 } else if (!Op.compare_lower("cvau")) {
2461 // SYS #3, C7, C11, #1
2462 SYS_ALIAS(3, 7, 11, 1);
2463 } else if (!Op.compare_lower("civac")) {
2464 // SYS #3, C7, C14, #1
2465 SYS_ALIAS(3, 7, 14, 1);
2466 } else if (!Op.compare_lower("cisw")) {
2467 // SYS #0, C7, C14, #2
2468 SYS_ALIAS(0, 7, 14, 2);
2469 } else if (!Op.compare_lower("cvap")) {
2470 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2471 // SYS #3, C7, C12, #1
2472 SYS_ALIAS(3, 7, 12, 1);
2474 return TokError("DC CVAP requires ARMv8.2a");
2477 return TokError("invalid operand for DC instruction");
2479 } else if (Mnemonic == "at") {
2480 if (!Op.compare_lower("s1e1r")) {
2481 // SYS #0, C7, C8, #0
2482 SYS_ALIAS(0, 7, 8, 0);
2483 } else if (!Op.compare_lower("s1e2r")) {
2484 // SYS #4, C7, C8, #0
2485 SYS_ALIAS(4, 7, 8, 0);
2486 } else if (!Op.compare_lower("s1e3r")) {
2487 // SYS #6, C7, C8, #0
2488 SYS_ALIAS(6, 7, 8, 0);
2489 } else if (!Op.compare_lower("s1e1w")) {
2490 // SYS #0, C7, C8, #1
2491 SYS_ALIAS(0, 7, 8, 1);
2492 } else if (!Op.compare_lower("s1e2w")) {
2493 // SYS #4, C7, C8, #1
2494 SYS_ALIAS(4, 7, 8, 1);
2495 } else if (!Op.compare_lower("s1e3w")) {
2496 // SYS #6, C7, C8, #1
2497 SYS_ALIAS(6, 7, 8, 1);
2498 } else if (!Op.compare_lower("s1e0r")) {
2499 // SYS #0, C7, C8, #3
2500 SYS_ALIAS(0, 7, 8, 2);
2501 } else if (!Op.compare_lower("s1e0w")) {
2502 // SYS #0, C7, C8, #3
2503 SYS_ALIAS(0, 7, 8, 3);
2504 } else if (!Op.compare_lower("s12e1r")) {
2505 // SYS #4, C7, C8, #4
2506 SYS_ALIAS(4, 7, 8, 4);
2507 } else if (!Op.compare_lower("s12e1w")) {
2508 // SYS #4, C7, C8, #5
2509 SYS_ALIAS(4, 7, 8, 5);
2510 } else if (!Op.compare_lower("s12e0r")) {
2511 // SYS #4, C7, C8, #6
2512 SYS_ALIAS(4, 7, 8, 6);
2513 } else if (!Op.compare_lower("s12e0w")) {
2514 // SYS #4, C7, C8, #7
2515 SYS_ALIAS(4, 7, 8, 7);
2517 return TokError("invalid operand for AT instruction");
2519 } else if (Mnemonic == "tlbi") {
2520 if (!Op.compare_lower("vmalle1is")) {
2521 // SYS #0, C8, C3, #0
2522 SYS_ALIAS(0, 8, 3, 0);
2523 } else if (!Op.compare_lower("alle2is")) {
2524 // SYS #4, C8, C3, #0
2525 SYS_ALIAS(4, 8, 3, 0);
2526 } else if (!Op.compare_lower("alle3is")) {
2527 // SYS #6, C8, C3, #0
2528 SYS_ALIAS(6, 8, 3, 0);
2529 } else if (!Op.compare_lower("vae1is")) {
2530 // SYS #0, C8, C3, #1
2531 SYS_ALIAS(0, 8, 3, 1);
2532 } else if (!Op.compare_lower("vae2is")) {
2533 // SYS #4, C8, C3, #1
2534 SYS_ALIAS(4, 8, 3, 1);
2535 } else if (!Op.compare_lower("vae3is")) {
2536 // SYS #6, C8, C3, #1
2537 SYS_ALIAS(6, 8, 3, 1);
2538 } else if (!Op.compare_lower("aside1is")) {
2539 // SYS #0, C8, C3, #2
2540 SYS_ALIAS(0, 8, 3, 2);
2541 } else if (!Op.compare_lower("vaae1is")) {
2542 // SYS #0, C8, C3, #3
2543 SYS_ALIAS(0, 8, 3, 3);
2544 } else if (!Op.compare_lower("alle1is")) {
2545 // SYS #4, C8, C3, #4
2546 SYS_ALIAS(4, 8, 3, 4);
2547 } else if (!Op.compare_lower("vale1is")) {
2548 // SYS #0, C8, C3, #5
2549 SYS_ALIAS(0, 8, 3, 5);
2550 } else if (!Op.compare_lower("vaale1is")) {
2551 // SYS #0, C8, C3, #7
2552 SYS_ALIAS(0, 8, 3, 7);
2553 } else if (!Op.compare_lower("vmalle1")) {
2554 // SYS #0, C8, C7, #0
2555 SYS_ALIAS(0, 8, 7, 0);
2556 } else if (!Op.compare_lower("alle2")) {
2557 // SYS #4, C8, C7, #0
2558 SYS_ALIAS(4, 8, 7, 0);
2559 } else if (!Op.compare_lower("vale2is")) {
2560 // SYS #4, C8, C3, #5
2561 SYS_ALIAS(4, 8, 3, 5);
2562 } else if (!Op.compare_lower("vale3is")) {
2563 // SYS #6, C8, C3, #5
2564 SYS_ALIAS(6, 8, 3, 5);
2565 } else if (!Op.compare_lower("alle3")) {
2566 // SYS #6, C8, C7, #0
2567 SYS_ALIAS(6, 8, 7, 0);
2568 } else if (!Op.compare_lower("vae1")) {
2569 // SYS #0, C8, C7, #1
2570 SYS_ALIAS(0, 8, 7, 1);
2571 } else if (!Op.compare_lower("vae2")) {
2572 // SYS #4, C8, C7, #1
2573 SYS_ALIAS(4, 8, 7, 1);
2574 } else if (!Op.compare_lower("vae3")) {
2575 // SYS #6, C8, C7, #1
2576 SYS_ALIAS(6, 8, 7, 1);
2577 } else if (!Op.compare_lower("aside1")) {
2578 // SYS #0, C8, C7, #2
2579 SYS_ALIAS(0, 8, 7, 2);
2580 } else if (!Op.compare_lower("vaae1")) {
2581 // SYS #0, C8, C7, #3
2582 SYS_ALIAS(0, 8, 7, 3);
2583 } else if (!Op.compare_lower("alle1")) {
2584 // SYS #4, C8, C7, #4
2585 SYS_ALIAS(4, 8, 7, 4);
2586 } else if (!Op.compare_lower("vale1")) {
2587 // SYS #0, C8, C7, #5
2588 SYS_ALIAS(0, 8, 7, 5);
2589 } else if (!Op.compare_lower("vale2")) {
2590 // SYS #4, C8, C7, #5
2591 SYS_ALIAS(4, 8, 7, 5);
2592 } else if (!Op.compare_lower("vale3")) {
2593 // SYS #6, C8, C7, #5
2594 SYS_ALIAS(6, 8, 7, 5);
2595 } else if (!Op.compare_lower("vaale1")) {
2596 // SYS #0, C8, C7, #7
2597 SYS_ALIAS(0, 8, 7, 7);
2598 } else if (!Op.compare_lower("ipas2e1")) {
2599 // SYS #4, C8, C4, #1
2600 SYS_ALIAS(4, 8, 4, 1);
2601 } else if (!Op.compare_lower("ipas2le1")) {
2602 // SYS #4, C8, C4, #5
2603 SYS_ALIAS(4, 8, 4, 5);
2604 } else if (!Op.compare_lower("ipas2e1is")) {
2605 // SYS #4, C8, C4, #1
2606 SYS_ALIAS(4, 8, 0, 1);
2607 } else if (!Op.compare_lower("ipas2le1is")) {
2608 // SYS #4, C8, C4, #5
2609 SYS_ALIAS(4, 8, 0, 5);
2610 } else if (!Op.compare_lower("vmalls12e1")) {
2611 // SYS #4, C8, C7, #6
2612 SYS_ALIAS(4, 8, 7, 6);
2613 } else if (!Op.compare_lower("vmalls12e1is")) {
2614 // SYS #4, C8, C3, #6
2615 SYS_ALIAS(4, 8, 3, 6);
2617 return TokError("invalid operand for TLBI instruction");
2623 Parser.Lex(); // Eat operand.
2625 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2626 bool HasRegister = false;
2628 // Check for the optional register operand.
2629 if (getLexer().is(AsmToken::Comma)) {
2630 Parser.Lex(); // Eat comma.
2632 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2633 return TokError("expected register operand");
2638 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2639 Parser.eatToEndOfStatement();
2640 return TokError("unexpected token in argument list");
2643 if (ExpectRegister && !HasRegister) {
2644 return TokError("specified " + Mnemonic + " op requires a register");
2646 else if (!ExpectRegister && HasRegister) {
2647 return TokError("specified " + Mnemonic + " op does not use a register");
2650 Parser.Lex(); // Consume the EndOfStatement
2654 AArch64AsmParser::OperandMatchResultTy
2655 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2656 MCAsmParser &Parser = getParser();
2657 const AsmToken &Tok = Parser.getTok();
2659 // Can be either a #imm style literal or an option name
2660 bool Hash = Tok.is(AsmToken::Hash);
2661 if (Hash || Tok.is(AsmToken::Integer)) {
2662 // Immediate operand.
2664 Parser.Lex(); // Eat the '#'
2665 const MCExpr *ImmVal;
2666 SMLoc ExprLoc = getLoc();
2667 if (getParser().parseExpression(ImmVal))
2668 return MatchOperand_ParseFail;
2669 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2671 Error(ExprLoc, "immediate value expected for barrier operand");
2672 return MatchOperand_ParseFail;
2674 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2675 Error(ExprLoc, "barrier operand out of range");
2676 return MatchOperand_ParseFail;
2679 auto Mapper = AArch64DB::DBarrierMapper();
2681 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2682 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2683 ExprLoc, getContext()));
2684 return MatchOperand_Success;
2687 if (Tok.isNot(AsmToken::Identifier)) {
2688 TokError("invalid operand for instruction");
2689 return MatchOperand_ParseFail;
2693 auto Mapper = AArch64DB::DBarrierMapper();
2695 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2697 TokError("invalid barrier option name");
2698 return MatchOperand_ParseFail;
2701 // The only valid named option for ISB is 'sy'
2702 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2703 TokError("'sy' or #imm operand expected");
2704 return MatchOperand_ParseFail;
2707 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2708 getLoc(), getContext()));
2709 Parser.Lex(); // Consume the option
2711 return MatchOperand_Success;
2714 AArch64AsmParser::OperandMatchResultTy
2715 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2716 MCAsmParser &Parser = getParser();
2717 const AsmToken &Tok = Parser.getTok();
2719 if (Tok.isNot(AsmToken::Identifier))
2720 return MatchOperand_NoMatch;
2723 auto MRSMapper = AArch64SysReg::MRSMapper();
2724 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2725 getSTI().getFeatureBits(), IsKnown);
2726 assert(IsKnown == (MRSReg != -1U) &&
2727 "register should be -1 if and only if it's unknown");
2729 auto MSRMapper = AArch64SysReg::MSRMapper();
2730 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2731 getSTI().getFeatureBits(), IsKnown);
2732 assert(IsKnown == (MSRReg != -1U) &&
2733 "register should be -1 if and only if it's unknown");
2735 auto PStateMapper = AArch64PState::PStateMapper();
2736 uint32_t PStateField =
2737 PStateMapper.fromString(Tok.getString(),
2738 getSTI().getFeatureBits(), IsKnown);
2739 assert(IsKnown == (PStateField != -1U) &&
2740 "register should be -1 if and only if it's unknown");
2742 Operands.push_back(AArch64Operand::CreateSysReg(
2743 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2744 Parser.Lex(); // Eat identifier
2746 return MatchOperand_Success;
2749 /// tryParseVectorRegister - Parse a vector register operand.
2750 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2751 MCAsmParser &Parser = getParser();
2752 if (Parser.getTok().isNot(AsmToken::Identifier))
2756 // Check for a vector register specifier first.
2758 int64_t Reg = tryMatchVectorRegister(Kind, false);
2762 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2763 // If there was an explicit qualifier, that goes on as a literal text
2767 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2769 // If there is an index specifier following the register, parse that too.
2770 if (Parser.getTok().is(AsmToken::LBrac)) {
2771 SMLoc SIdx = getLoc();
2772 Parser.Lex(); // Eat left bracket token.
2774 const MCExpr *ImmVal;
2775 if (getParser().parseExpression(ImmVal))
2777 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2779 TokError("immediate value expected for vector index");
2784 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2785 Error(E, "']' expected");
2789 Parser.Lex(); // Eat right bracket token.
2791 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2798 /// parseRegister - Parse a non-vector register operand.
2799 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2800 MCAsmParser &Parser = getParser();
2802 // Try for a vector register.
2803 if (!tryParseVectorRegister(Operands))
2806 // Try for a scalar register.
2807 int64_t Reg = tryParseRegister();
2811 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2813 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2814 // as a string token in the instruction itself.
2815 if (getLexer().getKind() == AsmToken::LBrac) {
2816 SMLoc LBracS = getLoc();
2818 const AsmToken &Tok = Parser.getTok();
2819 if (Tok.is(AsmToken::Integer)) {
2820 SMLoc IntS = getLoc();
2821 int64_t Val = Tok.getIntVal();
2824 if (getLexer().getKind() == AsmToken::RBrac) {
2825 SMLoc RBracS = getLoc();
2828 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2830 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2832 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2842 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2843 MCAsmParser &Parser = getParser();
2844 bool HasELFModifier = false;
2845 AArch64MCExpr::VariantKind RefKind;
2847 if (Parser.getTok().is(AsmToken::Colon)) {
2848 Parser.Lex(); // Eat ':"
2849 HasELFModifier = true;
2851 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2852 Error(Parser.getTok().getLoc(),
2853 "expect relocation specifier in operand after ':'");
2857 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2858 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2859 .Case("lo12", AArch64MCExpr::VK_LO12)
2860 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2861 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2862 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2863 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2864 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2865 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2866 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2867 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2868 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2869 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2870 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2871 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2872 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2873 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2874 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2875 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2876 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2877 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2878 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2879 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2880 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2881 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2882 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2883 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2884 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2885 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2886 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2887 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2888 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2889 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2890 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2891 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2892 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2893 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2894 .Default(AArch64MCExpr::VK_INVALID);
2896 if (RefKind == AArch64MCExpr::VK_INVALID) {
2897 Error(Parser.getTok().getLoc(),
2898 "expect relocation specifier in operand after ':'");
2902 Parser.Lex(); // Eat identifier
2904 if (Parser.getTok().isNot(AsmToken::Colon)) {
2905 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2908 Parser.Lex(); // Eat ':'
2911 if (getParser().parseExpression(ImmVal))
2915 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2920 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2921 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2922 MCAsmParser &Parser = getParser();
2923 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2925 Parser.Lex(); // Eat left bracket token.
2927 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2930 int64_t PrevReg = FirstReg;
2933 if (Parser.getTok().is(AsmToken::Minus)) {
2934 Parser.Lex(); // Eat the minus.
2936 SMLoc Loc = getLoc();
2938 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2941 // Any Kind suffices must match on all regs in the list.
2942 if (Kind != NextKind)
2943 return Error(Loc, "mismatched register size suffix");
2945 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2947 if (Space == 0 || Space > 3) {
2948 return Error(Loc, "invalid number of vectors");
2954 while (Parser.getTok().is(AsmToken::Comma)) {
2955 Parser.Lex(); // Eat the comma token.
2957 SMLoc Loc = getLoc();
2959 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2962 // Any Kind suffices must match on all regs in the list.
2963 if (Kind != NextKind)
2964 return Error(Loc, "mismatched register size suffix");
2966 // Registers must be incremental (with wraparound at 31)
2967 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2968 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2969 return Error(Loc, "registers must be sequential");
2976 if (Parser.getTok().isNot(AsmToken::RCurly))
2977 return Error(getLoc(), "'}' expected");
2978 Parser.Lex(); // Eat the '}' token.
2981 return Error(S, "invalid number of vectors");
2983 unsigned NumElements = 0;
2984 char ElementKind = 0;
2986 parseValidVectorKind(Kind, NumElements, ElementKind);
2988 Operands.push_back(AArch64Operand::CreateVectorList(
2989 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2991 // If there is an index specifier following the list, parse that too.
2992 if (Parser.getTok().is(AsmToken::LBrac)) {
2993 SMLoc SIdx = getLoc();
2994 Parser.Lex(); // Eat left bracket token.
2996 const MCExpr *ImmVal;
2997 if (getParser().parseExpression(ImmVal))
2999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3001 TokError("immediate value expected for vector index");
3006 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3007 Error(E, "']' expected");
3011 Parser.Lex(); // Eat right bracket token.
3013 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3019 AArch64AsmParser::OperandMatchResultTy
3020 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3021 MCAsmParser &Parser = getParser();
3022 const AsmToken &Tok = Parser.getTok();
3023 if (!Tok.is(AsmToken::Identifier))
3024 return MatchOperand_NoMatch;
3026 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3028 MCContext &Ctx = getContext();
3029 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3030 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3031 return MatchOperand_NoMatch;
3034 Parser.Lex(); // Eat register
3036 if (Parser.getTok().isNot(AsmToken::Comma)) {
3038 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3039 return MatchOperand_Success;
3041 Parser.Lex(); // Eat comma.
3043 if (Parser.getTok().is(AsmToken::Hash))
3044 Parser.Lex(); // Eat hash
3046 if (Parser.getTok().isNot(AsmToken::Integer)) {
3047 Error(getLoc(), "index must be absent or #0");
3048 return MatchOperand_ParseFail;
3051 const MCExpr *ImmVal;
3052 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3053 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3054 Error(getLoc(), "index must be absent or #0");
3055 return MatchOperand_ParseFail;
3059 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3060 return MatchOperand_Success;
3063 /// parseOperand - Parse a arm instruction operand. For now this parses the
3064 /// operand regardless of the mnemonic.
3065 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3066 bool invertCondCode) {
3067 MCAsmParser &Parser = getParser();
3068 // Check if the current operand has a custom associated parser, if so, try to
3069 // custom parse the operand, or fallback to the general approach.
3070 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3071 if (ResTy == MatchOperand_Success)
3073 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3074 // there was a match, but an error occurred, in which case, just return that
3075 // the operand parsing failed.
3076 if (ResTy == MatchOperand_ParseFail)
3079 // Nothing custom, so do general case parsing.
3081 switch (getLexer().getKind()) {
3085 if (parseSymbolicImmVal(Expr))
3086 return Error(S, "invalid operand");
3088 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3089 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3092 case AsmToken::LBrac: {
3093 SMLoc Loc = Parser.getTok().getLoc();
3094 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3096 Parser.Lex(); // Eat '['
3098 // There's no comma after a '[', so we can parse the next operand
3100 return parseOperand(Operands, false, false);
3102 case AsmToken::LCurly:
3103 return parseVectorList(Operands);
3104 case AsmToken::Identifier: {
3105 // If we're expecting a Condition Code operand, then just parse that.
3107 return parseCondCode(Operands, invertCondCode);
3109 // If it's a register name, parse it.
3110 if (!parseRegister(Operands))
3113 // This could be an optional "shift" or "extend" operand.
3114 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3115 // We can only continue if no tokens were eaten.
3116 if (GotShift != MatchOperand_NoMatch)
3119 // This was not a register so parse other operands that start with an
3120 // identifier (like labels) as expressions and create them as immediates.
3121 const MCExpr *IdVal;
3123 if (getParser().parseExpression(IdVal))
3126 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3127 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3130 case AsmToken::Integer:
3131 case AsmToken::Real:
3132 case AsmToken::Hash: {
3133 // #42 -> immediate.
3135 if (getLexer().is(AsmToken::Hash))
3138 // Parse a negative sign
3139 bool isNegative = false;
3140 if (Parser.getTok().is(AsmToken::Minus)) {
3142 // We need to consume this token only when we have a Real, otherwise
3143 // we let parseSymbolicImmVal take care of it
3144 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3148 // The only Real that should come through here is a literal #0.0 for
3149 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3150 // so convert the value.
3151 const AsmToken &Tok = Parser.getTok();
3152 if (Tok.is(AsmToken::Real)) {
3153 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3154 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3155 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3156 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3157 Mnemonic != "fcmlt")
3158 return TokError("unexpected floating point literal");
3159 else if (IntVal != 0 || isNegative)
3160 return TokError("expected floating-point constant #0.0");
3161 Parser.Lex(); // Eat the token.
3164 AArch64Operand::CreateToken("#0", false, S, getContext()));
3166 AArch64Operand::CreateToken(".0", false, S, getContext()));
3170 const MCExpr *ImmVal;
3171 if (parseSymbolicImmVal(ImmVal))
3174 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3175 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3178 case AsmToken::Equal: {
3179 SMLoc Loc = Parser.getTok().getLoc();
3180 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3181 return Error(Loc, "unexpected token in operand");
3182 Parser.Lex(); // Eat '='
3183 const MCExpr *SubExprVal;
3184 if (getParser().parseExpression(SubExprVal))
3187 if (Operands.size() < 2 ||
3188 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3189 return Error(Loc, "Only valid when first operand is register");
3192 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3193 Operands[1]->getReg());
3195 MCContext& Ctx = getContext();
3196 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3197 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3198 if (isa<MCConstantExpr>(SubExprVal)) {
3199 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3200 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3201 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3205 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3206 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3207 Operands.push_back(AArch64Operand::CreateImm(
3208 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3210 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3211 ShiftAmt, true, S, E, Ctx));
3214 APInt Simm = APInt(64, Imm << ShiftAmt);
3215 // check if the immediate is an unsigned or signed 32-bit int for W regs
3216 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3217 return Error(Loc, "Immediate too large for register");
3219 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3220 const MCExpr *CPLoc =
3221 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3222 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3228 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3230 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3231 StringRef Name, SMLoc NameLoc,
3232 OperandVector &Operands) {
3233 MCAsmParser &Parser = getParser();
3234 Name = StringSwitch<StringRef>(Name.lower())
3235 .Case("beq", "b.eq")
3236 .Case("bne", "b.ne")
3237 .Case("bhs", "b.hs")
3238 .Case("bcs", "b.cs")
3239 .Case("blo", "b.lo")
3240 .Case("bcc", "b.cc")
3241 .Case("bmi", "b.mi")
3242 .Case("bpl", "b.pl")
3243 .Case("bvs", "b.vs")
3244 .Case("bvc", "b.vc")
3245 .Case("bhi", "b.hi")
3246 .Case("bls", "b.ls")
3247 .Case("bge", "b.ge")
3248 .Case("blt", "b.lt")
3249 .Case("bgt", "b.gt")
3250 .Case("ble", "b.le")
3251 .Case("bal", "b.al")
3252 .Case("bnv", "b.nv")
3255 // First check for the AArch64-specific .req directive.
3256 if (Parser.getTok().is(AsmToken::Identifier) &&
3257 Parser.getTok().getIdentifier() == ".req") {
3258 parseDirectiveReq(Name, NameLoc);
3259 // We always return 'error' for this, as we're done with this
3260 // statement and don't need to match the 'instruction."
3264 // Create the leading tokens for the mnemonic, split by '.' characters.
3265 size_t Start = 0, Next = Name.find('.');
3266 StringRef Head = Name.slice(Start, Next);
3268 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3269 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3270 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3271 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3272 Parser.eatToEndOfStatement();
3277 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3280 // Handle condition codes for a branch mnemonic
3281 if (Head == "b" && Next != StringRef::npos) {
3283 Next = Name.find('.', Start + 1);
3284 Head = Name.slice(Start + 1, Next);
3286 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3287 (Head.data() - Name.data()));
3288 AArch64CC::CondCode CC = parseCondCodeString(Head);
3289 if (CC == AArch64CC::Invalid)
3290 return Error(SuffixLoc, "invalid condition code");
3292 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3294 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3297 // Add the remaining tokens in the mnemonic.
3298 while (Next != StringRef::npos) {
3300 Next = Name.find('.', Start + 1);
3301 Head = Name.slice(Start, Next);
3302 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3303 (Head.data() - Name.data()) + 1);
3305 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3308 // Conditional compare instructions have a Condition Code operand, which needs
3309 // to be parsed and an immediate operand created.
3310 bool condCodeFourthOperand =
3311 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3312 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3313 Head == "csinc" || Head == "csinv" || Head == "csneg");
3315 // These instructions are aliases to some of the conditional select
3316 // instructions. However, the condition code is inverted in the aliased
3319 // FIXME: Is this the correct way to handle these? Or should the parser
3320 // generate the aliased instructions directly?
3321 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3322 bool condCodeThirdOperand =
3323 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3325 // Read the remaining operands.
3326 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3327 // Read the first operand.
3328 if (parseOperand(Operands, false, false)) {
3329 Parser.eatToEndOfStatement();
3334 while (getLexer().is(AsmToken::Comma)) {
3335 Parser.Lex(); // Eat the comma.
3337 // Parse and remember the operand.
3338 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3339 (N == 3 && condCodeThirdOperand) ||
3340 (N == 2 && condCodeSecondOperand),
3341 condCodeSecondOperand || condCodeThirdOperand)) {
3342 Parser.eatToEndOfStatement();
3346 // After successfully parsing some operands there are two special cases to
3347 // consider (i.e. notional operands not separated by commas). Both are due
3348 // to memory specifiers:
3349 // + An RBrac will end an address for load/store/prefetch
3350 // + An '!' will indicate a pre-indexed operation.
3352 // It's someone else's responsibility to make sure these tokens are sane
3353 // in the given context!
3354 if (Parser.getTok().is(AsmToken::RBrac)) {
3355 SMLoc Loc = Parser.getTok().getLoc();
3356 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3361 if (Parser.getTok().is(AsmToken::Exclaim)) {
3362 SMLoc Loc = Parser.getTok().getLoc();
3363 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3372 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3373 SMLoc Loc = Parser.getTok().getLoc();
3374 Parser.eatToEndOfStatement();
3375 return Error(Loc, "unexpected token in argument list");
3378 Parser.Lex(); // Consume the EndOfStatement
3382 // FIXME: This entire function is a giant hack to provide us with decent
3383 // operand range validation/diagnostics until TableGen/MC can be extended
3384 // to support autogeneration of this kind of validation.
3385 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3386 SmallVectorImpl<SMLoc> &Loc) {
3387 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3388 // Check for indexed addressing modes w/ the base register being the
3389 // same as a destination/source register or pair load where
3390 // the Rt == Rt2. All of those are undefined behaviour.
3391 switch (Inst.getOpcode()) {
3392 case AArch64::LDPSWpre:
3393 case AArch64::LDPWpost:
3394 case AArch64::LDPWpre:
3395 case AArch64::LDPXpost:
3396 case AArch64::LDPXpre: {
3397 unsigned Rt = Inst.getOperand(1).getReg();
3398 unsigned Rt2 = Inst.getOperand(2).getReg();
3399 unsigned Rn = Inst.getOperand(3).getReg();
3400 if (RI->isSubRegisterEq(Rn, Rt))
3401 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3402 "is also a destination");
3403 if (RI->isSubRegisterEq(Rn, Rt2))
3404 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3405 "is also a destination");
3408 case AArch64::LDPDi:
3409 case AArch64::LDPQi:
3410 case AArch64::LDPSi:
3411 case AArch64::LDPSWi:
3412 case AArch64::LDPWi:
3413 case AArch64::LDPXi: {
3414 unsigned Rt = Inst.getOperand(0).getReg();
3415 unsigned Rt2 = Inst.getOperand(1).getReg();
3417 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3420 case AArch64::LDPDpost:
3421 case AArch64::LDPDpre:
3422 case AArch64::LDPQpost:
3423 case AArch64::LDPQpre:
3424 case AArch64::LDPSpost:
3425 case AArch64::LDPSpre:
3426 case AArch64::LDPSWpost: {
3427 unsigned Rt = Inst.getOperand(1).getReg();
3428 unsigned Rt2 = Inst.getOperand(2).getReg();
3430 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3433 case AArch64::STPDpost:
3434 case AArch64::STPDpre:
3435 case AArch64::STPQpost:
3436 case AArch64::STPQpre:
3437 case AArch64::STPSpost:
3438 case AArch64::STPSpre:
3439 case AArch64::STPWpost:
3440 case AArch64::STPWpre:
3441 case AArch64::STPXpost:
3442 case AArch64::STPXpre: {
3443 unsigned Rt = Inst.getOperand(1).getReg();
3444 unsigned Rt2 = Inst.getOperand(2).getReg();
3445 unsigned Rn = Inst.getOperand(3).getReg();
3446 if (RI->isSubRegisterEq(Rn, Rt))
3447 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3448 "is also a source");
3449 if (RI->isSubRegisterEq(Rn, Rt2))
3450 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3451 "is also a source");
3454 case AArch64::LDRBBpre:
3455 case AArch64::LDRBpre:
3456 case AArch64::LDRHHpre:
3457 case AArch64::LDRHpre:
3458 case AArch64::LDRSBWpre:
3459 case AArch64::LDRSBXpre:
3460 case AArch64::LDRSHWpre:
3461 case AArch64::LDRSHXpre:
3462 case AArch64::LDRSWpre:
3463 case AArch64::LDRWpre:
3464 case AArch64::LDRXpre:
3465 case AArch64::LDRBBpost:
3466 case AArch64::LDRBpost:
3467 case AArch64::LDRHHpost:
3468 case AArch64::LDRHpost:
3469 case AArch64::LDRSBWpost:
3470 case AArch64::LDRSBXpost:
3471 case AArch64::LDRSHWpost:
3472 case AArch64::LDRSHXpost:
3473 case AArch64::LDRSWpost:
3474 case AArch64::LDRWpost:
3475 case AArch64::LDRXpost: {
3476 unsigned Rt = Inst.getOperand(1).getReg();
3477 unsigned Rn = Inst.getOperand(2).getReg();
3478 if (RI->isSubRegisterEq(Rn, Rt))
3479 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3480 "is also a source");
3483 case AArch64::STRBBpost:
3484 case AArch64::STRBpost:
3485 case AArch64::STRHHpost:
3486 case AArch64::STRHpost:
3487 case AArch64::STRWpost:
3488 case AArch64::STRXpost:
3489 case AArch64::STRBBpre:
3490 case AArch64::STRBpre:
3491 case AArch64::STRHHpre:
3492 case AArch64::STRHpre:
3493 case AArch64::STRWpre:
3494 case AArch64::STRXpre: {
3495 unsigned Rt = Inst.getOperand(1).getReg();
3496 unsigned Rn = Inst.getOperand(2).getReg();
3497 if (RI->isSubRegisterEq(Rn, Rt))
3498 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3499 "is also a source");
3504 // Now check immediate ranges. Separate from the above as there is overlap
3505 // in the instructions being checked and this keeps the nested conditionals
3507 switch (Inst.getOpcode()) {
3508 case AArch64::ADDSWri:
3509 case AArch64::ADDSXri:
3510 case AArch64::ADDWri:
3511 case AArch64::ADDXri:
3512 case AArch64::SUBSWri:
3513 case AArch64::SUBSXri:
3514 case AArch64::SUBWri:
3515 case AArch64::SUBXri: {
3516 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3517 // some slight duplication here.
3518 if (Inst.getOperand(2).isExpr()) {
3519 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3520 AArch64MCExpr::VariantKind ELFRefKind;
3521 MCSymbolRefExpr::VariantKind DarwinRefKind;
3523 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3524 return Error(Loc[2], "invalid immediate expression");
3527 // Only allow these with ADDXri.
3528 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3529 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3530 Inst.getOpcode() == AArch64::ADDXri)
3533 // Only allow these with ADDXri/ADDWri
3534 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3535 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3536 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3537 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3538 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3539 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3540 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3541 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3542 (Inst.getOpcode() == AArch64::ADDXri ||
3543 Inst.getOpcode() == AArch64::ADDWri))
3546 // Don't allow expressions in the immediate field otherwise
3547 return Error(Loc[2], "invalid immediate expression");
3556 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3558 case Match_MissingFeature:
3560 "instruction requires a CPU feature not currently enabled");
3561 case Match_InvalidOperand:
3562 return Error(Loc, "invalid operand for instruction");
3563 case Match_InvalidSuffix:
3564 return Error(Loc, "invalid type suffix for instruction");
3565 case Match_InvalidCondCode:
3566 return Error(Loc, "expected AArch64 condition code");
3567 case Match_AddSubRegExtendSmall:
3569 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3570 case Match_AddSubRegExtendLarge:
3572 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3573 case Match_AddSubSecondSource:
3575 "expected compatible register, symbol or integer in range [0, 4095]");
3576 case Match_LogicalSecondSource:
3577 return Error(Loc, "expected compatible register or logical immediate");
3578 case Match_InvalidMovImm32Shift:
3579 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3580 case Match_InvalidMovImm64Shift:
3581 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3582 case Match_AddSubRegShift32:
3584 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3585 case Match_AddSubRegShift64:
3587 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3588 case Match_InvalidFPImm:
3590 "expected compatible register or floating-point constant");
3591 case Match_InvalidMemoryIndexedSImm9:
3592 return Error(Loc, "index must be an integer in range [-256, 255].");
3593 case Match_InvalidMemoryIndexed4SImm7:
3594 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3595 case Match_InvalidMemoryIndexed8SImm7:
3596 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3597 case Match_InvalidMemoryIndexed16SImm7:
3598 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3599 case Match_InvalidMemoryWExtend8:
3601 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3602 case Match_InvalidMemoryWExtend16:
3604 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3605 case Match_InvalidMemoryWExtend32:
3607 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3608 case Match_InvalidMemoryWExtend64:
3610 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3611 case Match_InvalidMemoryWExtend128:
3613 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3614 case Match_InvalidMemoryXExtend8:
3616 "expected 'lsl' or 'sxtx' with optional shift of #0");
3617 case Match_InvalidMemoryXExtend16:
3619 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3620 case Match_InvalidMemoryXExtend32:
3622 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3623 case Match_InvalidMemoryXExtend64:
3625 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3626 case Match_InvalidMemoryXExtend128:
3628 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3629 case Match_InvalidMemoryIndexed1:
3630 return Error(Loc, "index must be an integer in range [0, 4095].");
3631 case Match_InvalidMemoryIndexed2:
3632 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3633 case Match_InvalidMemoryIndexed4:
3634 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3635 case Match_InvalidMemoryIndexed8:
3636 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3637 case Match_InvalidMemoryIndexed16:
3638 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3639 case Match_InvalidImm0_1:
3640 return Error(Loc, "immediate must be an integer in range [0, 1].");
3641 case Match_InvalidImm0_7:
3642 return Error(Loc, "immediate must be an integer in range [0, 7].");
3643 case Match_InvalidImm0_15:
3644 return Error(Loc, "immediate must be an integer in range [0, 15].");
3645 case Match_InvalidImm0_31:
3646 return Error(Loc, "immediate must be an integer in range [0, 31].");
3647 case Match_InvalidImm0_63:
3648 return Error(Loc, "immediate must be an integer in range [0, 63].");
3649 case Match_InvalidImm0_127:
3650 return Error(Loc, "immediate must be an integer in range [0, 127].");
3651 case Match_InvalidImm0_65535:
3652 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3653 case Match_InvalidImm1_8:
3654 return Error(Loc, "immediate must be an integer in range [1, 8].");
3655 case Match_InvalidImm1_16:
3656 return Error(Loc, "immediate must be an integer in range [1, 16].");
3657 case Match_InvalidImm1_32:
3658 return Error(Loc, "immediate must be an integer in range [1, 32].");
3659 case Match_InvalidImm1_64:
3660 return Error(Loc, "immediate must be an integer in range [1, 64].");
3661 case Match_InvalidIndex1:
3662 return Error(Loc, "expected lane specifier '[1]'");
3663 case Match_InvalidIndexB:
3664 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3665 case Match_InvalidIndexH:
3666 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3667 case Match_InvalidIndexS:
3668 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3669 case Match_InvalidIndexD:
3670 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3671 case Match_InvalidLabel:
3672 return Error(Loc, "expected label or encodable integer pc offset");
3674 return Error(Loc, "expected readable system register");
3676 return Error(Loc, "expected writable system register or pstate");
3677 case Match_MnemonicFail:
3678 return Error(Loc, "unrecognized instruction mnemonic");
3680 llvm_unreachable("unexpected error code!");
3684 static const char *getSubtargetFeatureName(uint64_t Val);
3686 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3687 OperandVector &Operands,
3689 uint64_t &ErrorInfo,
3690 bool MatchingInlineAsm) {
3691 assert(!Operands.empty() && "Unexpect empty operand list!");
3692 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3693 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3695 StringRef Tok = Op.getToken();
3696 unsigned NumOperands = Operands.size();
3698 if (NumOperands == 4 && Tok == "lsl") {
3699 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3700 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3701 if (Op2.isReg() && Op3.isImm()) {
3702 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3704 uint64_t Op3Val = Op3CE->getValue();
3705 uint64_t NewOp3Val = 0;
3706 uint64_t NewOp4Val = 0;
3707 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3709 NewOp3Val = (32 - Op3Val) & 0x1f;
3710 NewOp4Val = 31 - Op3Val;
3712 NewOp3Val = (64 - Op3Val) & 0x3f;
3713 NewOp4Val = 63 - Op3Val;
3716 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3717 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3719 Operands[0] = AArch64Operand::CreateToken(
3720 "ubfm", false, Op.getStartLoc(), getContext());
3721 Operands.push_back(AArch64Operand::CreateImm(
3722 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3723 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3724 Op3.getEndLoc(), getContext());
3727 } else if (NumOperands == 4 && Tok == "bfc") {
3728 // FIXME: Horrible hack to handle BFC->BFM alias.
3729 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3730 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3731 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3733 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3734 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3735 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3737 if (LSBCE && WidthCE) {
3738 uint64_t LSB = LSBCE->getValue();
3739 uint64_t Width = WidthCE->getValue();
3741 uint64_t RegWidth = 0;
3742 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3748 if (LSB >= RegWidth)
3749 return Error(LSBOp.getStartLoc(),
3750 "expected integer in range [0, 31]");
3751 if (Width < 1 || Width > RegWidth)
3752 return Error(WidthOp.getStartLoc(),
3753 "expected integer in range [1, 32]");
3757 ImmR = (32 - LSB) & 0x1f;
3759 ImmR = (64 - LSB) & 0x3f;
3761 uint64_t ImmS = Width - 1;
3763 if (ImmR != 0 && ImmS >= ImmR)
3764 return Error(WidthOp.getStartLoc(),
3765 "requested insert overflows register");
3767 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3768 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3769 Operands[0] = AArch64Operand::CreateToken(
3770 "bfm", false, Op.getStartLoc(), getContext());
3771 Operands[2] = AArch64Operand::CreateReg(
3772 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3773 SMLoc(), getContext());
3774 Operands[3] = AArch64Operand::CreateImm(
3775 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3776 Operands.emplace_back(
3777 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3778 WidthOp.getEndLoc(), getContext()));
3781 } else if (NumOperands == 5) {
3782 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3783 // UBFIZ -> UBFM aliases.
3784 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3785 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3786 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3787 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3789 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3790 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3791 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3793 if (Op3CE && Op4CE) {
3794 uint64_t Op3Val = Op3CE->getValue();
3795 uint64_t Op4Val = Op4CE->getValue();
3797 uint64_t RegWidth = 0;
3798 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3804 if (Op3Val >= RegWidth)
3805 return Error(Op3.getStartLoc(),
3806 "expected integer in range [0, 31]");
3807 if (Op4Val < 1 || Op4Val > RegWidth)
3808 return Error(Op4.getStartLoc(),
3809 "expected integer in range [1, 32]");
3811 uint64_t NewOp3Val = 0;
3813 NewOp3Val = (32 - Op3Val) & 0x1f;
3815 NewOp3Val = (64 - Op3Val) & 0x3f;
3817 uint64_t NewOp4Val = Op4Val - 1;
3819 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3820 return Error(Op4.getStartLoc(),
3821 "requested insert overflows register");
3823 const MCExpr *NewOp3 =
3824 MCConstantExpr::create(NewOp3Val, getContext());
3825 const MCExpr *NewOp4 =
3826 MCConstantExpr::create(NewOp4Val, getContext());
3827 Operands[3] = AArch64Operand::CreateImm(
3828 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3829 Operands[4] = AArch64Operand::CreateImm(
3830 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3832 Operands[0] = AArch64Operand::CreateToken(
3833 "bfm", false, Op.getStartLoc(), getContext());
3834 else if (Tok == "sbfiz")
3835 Operands[0] = AArch64Operand::CreateToken(
3836 "sbfm", false, Op.getStartLoc(), getContext());
3837 else if (Tok == "ubfiz")
3838 Operands[0] = AArch64Operand::CreateToken(
3839 "ubfm", false, Op.getStartLoc(), getContext());
3841 llvm_unreachable("No valid mnemonic for alias?");
3845 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3846 // UBFX -> UBFM aliases.
3847 } else if (NumOperands == 5 &&
3848 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3849 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3850 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3851 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3853 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3854 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3855 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3857 if (Op3CE && Op4CE) {
3858 uint64_t Op3Val = Op3CE->getValue();
3859 uint64_t Op4Val = Op4CE->getValue();
3861 uint64_t RegWidth = 0;
3862 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3868 if (Op3Val >= RegWidth)
3869 return Error(Op3.getStartLoc(),
3870 "expected integer in range [0, 31]");
3871 if (Op4Val < 1 || Op4Val > RegWidth)
3872 return Error(Op4.getStartLoc(),
3873 "expected integer in range [1, 32]");
3875 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3877 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3878 return Error(Op4.getStartLoc(),
3879 "requested extract overflows register");
3881 const MCExpr *NewOp4 =
3882 MCConstantExpr::create(NewOp4Val, getContext());
3883 Operands[4] = AArch64Operand::CreateImm(
3884 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3886 Operands[0] = AArch64Operand::CreateToken(
3887 "bfm", false, Op.getStartLoc(), getContext());
3888 else if (Tok == "sbfx")
3889 Operands[0] = AArch64Operand::CreateToken(
3890 "sbfm", false, Op.getStartLoc(), getContext());
3891 else if (Tok == "ubfx")
3892 Operands[0] = AArch64Operand::CreateToken(
3893 "ubfm", false, Op.getStartLoc(), getContext());
3895 llvm_unreachable("No valid mnemonic for alias?");
3900 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3901 // InstAlias can't quite handle this since the reg classes aren't
3903 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3904 // The source register can be Wn here, but the matcher expects a
3905 // GPR64. Twiddle it here if necessary.
3906 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3908 unsigned Reg = getXRegFromWReg(Op.getReg());
3909 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3910 Op.getEndLoc(), getContext());
3913 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3914 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3915 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3917 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3919 // The source register can be Wn here, but the matcher expects a
3920 // GPR64. Twiddle it here if necessary.
3921 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3923 unsigned Reg = getXRegFromWReg(Op.getReg());
3924 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3925 Op.getEndLoc(), getContext());
3929 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3930 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3931 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3933 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3935 // The source register can be Wn here, but the matcher expects a
3936 // GPR32. Twiddle it here if necessary.
3937 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3939 unsigned Reg = getWRegFromXReg(Op.getReg());
3940 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3941 Op.getEndLoc(), getContext());
3946 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3947 if (NumOperands == 3 && Tok == "fmov") {
3948 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3949 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3950 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3952 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3956 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3957 Op.getEndLoc(), getContext());
3962 // First try to match against the secondary set of tables containing the
3963 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3964 unsigned MatchResult =
3965 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3967 // If that fails, try against the alternate table containing long-form NEON:
3968 // "fadd v0.2s, v1.2s, v2.2s"
3969 if (MatchResult != Match_Success) {
3970 // But first, save the short-form match result: we can use it in case the
3971 // long-form match also fails.
3972 auto ShortFormNEONErrorInfo = ErrorInfo;
3973 auto ShortFormNEONMatchResult = MatchResult;
3976 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3978 // Now, both matches failed, and the long-form match failed on the mnemonic
3979 // suffix token operand. The short-form match failure is probably more
3980 // relevant: use it instead.
3981 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3982 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3983 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3984 MatchResult = ShortFormNEONMatchResult;
3985 ErrorInfo = ShortFormNEONErrorInfo;
3990 switch (MatchResult) {
3991 case Match_Success: {
3992 // Perform range checking and other semantic validations
3993 SmallVector<SMLoc, 8> OperandLocs;
3994 NumOperands = Operands.size();
3995 for (unsigned i = 1; i < NumOperands; ++i)
3996 OperandLocs.push_back(Operands[i]->getStartLoc());
3997 if (validateInstruction(Inst, OperandLocs))
4001 Out.EmitInstruction(Inst, getSTI());
4004 case Match_MissingFeature: {
4005 assert(ErrorInfo && "Unknown missing feature!");
4006 // Special case the error message for the very common case where only
4007 // a single subtarget feature is missing (neon, e.g.).
4008 std::string Msg = "instruction requires:";
4010 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4011 if (ErrorInfo & Mask) {
4013 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4017 return Error(IDLoc, Msg);
4019 case Match_MnemonicFail:
4020 return showMatchError(IDLoc, MatchResult);
4021 case Match_InvalidOperand: {
4022 SMLoc ErrorLoc = IDLoc;
4024 if (ErrorInfo != ~0ULL) {
4025 if (ErrorInfo >= Operands.size())
4026 return Error(IDLoc, "too few operands for instruction");
4028 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4029 if (ErrorLoc == SMLoc())
4032 // If the match failed on a suffix token operand, tweak the diagnostic
4034 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4035 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4036 MatchResult = Match_InvalidSuffix;
4038 return showMatchError(ErrorLoc, MatchResult);
4040 case Match_InvalidMemoryIndexed1:
4041 case Match_InvalidMemoryIndexed2:
4042 case Match_InvalidMemoryIndexed4:
4043 case Match_InvalidMemoryIndexed8:
4044 case Match_InvalidMemoryIndexed16:
4045 case Match_InvalidCondCode:
4046 case Match_AddSubRegExtendSmall:
4047 case Match_AddSubRegExtendLarge:
4048 case Match_AddSubSecondSource:
4049 case Match_LogicalSecondSource:
4050 case Match_AddSubRegShift32:
4051 case Match_AddSubRegShift64:
4052 case Match_InvalidMovImm32Shift:
4053 case Match_InvalidMovImm64Shift:
4054 case Match_InvalidFPImm:
4055 case Match_InvalidMemoryWExtend8:
4056 case Match_InvalidMemoryWExtend16:
4057 case Match_InvalidMemoryWExtend32:
4058 case Match_InvalidMemoryWExtend64:
4059 case Match_InvalidMemoryWExtend128:
4060 case Match_InvalidMemoryXExtend8:
4061 case Match_InvalidMemoryXExtend16:
4062 case Match_InvalidMemoryXExtend32:
4063 case Match_InvalidMemoryXExtend64:
4064 case Match_InvalidMemoryXExtend128:
4065 case Match_InvalidMemoryIndexed4SImm7:
4066 case Match_InvalidMemoryIndexed8SImm7:
4067 case Match_InvalidMemoryIndexed16SImm7:
4068 case Match_InvalidMemoryIndexedSImm9:
4069 case Match_InvalidImm0_1:
4070 case Match_InvalidImm0_7:
4071 case Match_InvalidImm0_15:
4072 case Match_InvalidImm0_31:
4073 case Match_InvalidImm0_63:
4074 case Match_InvalidImm0_127:
4075 case Match_InvalidImm0_65535:
4076 case Match_InvalidImm1_8:
4077 case Match_InvalidImm1_16:
4078 case Match_InvalidImm1_32:
4079 case Match_InvalidImm1_64:
4080 case Match_InvalidIndex1:
4081 case Match_InvalidIndexB:
4082 case Match_InvalidIndexH:
4083 case Match_InvalidIndexS:
4084 case Match_InvalidIndexD:
4085 case Match_InvalidLabel:
4088 if (ErrorInfo >= Operands.size())
4089 return Error(IDLoc, "too few operands for instruction");
4090 // Any time we get here, there's nothing fancy to do. Just get the
4091 // operand SMLoc and display the diagnostic.
4092 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4093 if (ErrorLoc == SMLoc())
4095 return showMatchError(ErrorLoc, MatchResult);
4099 llvm_unreachable("Implement any new match types added!");
4102 /// ParseDirective parses the arm specific directives
4103 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4104 const MCObjectFileInfo::Environment Format =
4105 getContext().getObjectFileInfo()->getObjectFileType();
4106 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4107 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4109 StringRef IDVal = DirectiveID.getIdentifier();
4110 SMLoc Loc = DirectiveID.getLoc();
4111 if (IDVal == ".hword")
4112 return parseDirectiveWord(2, Loc);
4113 if (IDVal == ".word")
4114 return parseDirectiveWord(4, Loc);
4115 if (IDVal == ".xword")
4116 return parseDirectiveWord(8, Loc);
4117 if (IDVal == ".tlsdesccall")
4118 return parseDirectiveTLSDescCall(Loc);
4119 if (IDVal == ".ltorg" || IDVal == ".pool")
4120 return parseDirectiveLtorg(Loc);
4121 if (IDVal == ".unreq")
4122 return parseDirectiveUnreq(Loc);
4124 if (!IsMachO && !IsCOFF) {
4125 if (IDVal == ".inst")
4126 return parseDirectiveInst(Loc);
4129 return parseDirectiveLOH(IDVal, Loc);
4132 /// parseDirectiveWord
4133 /// ::= .word [ expression (, expression)* ]
4134 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4135 MCAsmParser &Parser = getParser();
4136 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4138 const MCExpr *Value;
4139 if (getParser().parseExpression(Value))
4142 getParser().getStreamer().EmitValue(Value, Size, L);
4144 if (getLexer().is(AsmToken::EndOfStatement))
4147 // FIXME: Improve diagnostic.
4148 if (getLexer().isNot(AsmToken::Comma))
4149 return Error(L, "unexpected token in directive");
4158 /// parseDirectiveInst
4159 /// ::= .inst opcode [, ...]
4160 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4161 MCAsmParser &Parser = getParser();
4162 if (getLexer().is(AsmToken::EndOfStatement)) {
4163 Parser.eatToEndOfStatement();
4164 Error(Loc, "expected expression following directive");
4171 if (getParser().parseExpression(Expr)) {
4172 Error(Loc, "expected expression");
4176 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4178 Error(Loc, "expected constant expression");
4182 getTargetStreamer().emitInst(Value->getValue());
4184 if (getLexer().is(AsmToken::EndOfStatement))
4187 if (getLexer().isNot(AsmToken::Comma)) {
4188 Error(Loc, "unexpected token in directive");
4192 Parser.Lex(); // Eat comma.
4199 // parseDirectiveTLSDescCall:
4200 // ::= .tlsdesccall symbol
4201 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4203 if (getParser().parseIdentifier(Name))
4204 return Error(L, "expected symbol after directive");
4206 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4207 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4208 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4211 Inst.setOpcode(AArch64::TLSDESCCALL);
4212 Inst.addOperand(MCOperand::createExpr(Expr));
4214 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4218 /// ::= .loh <lohName | lohId> label1, ..., labelN
4219 /// The number of arguments depends on the loh identifier.
4220 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4221 if (IDVal != MCLOHDirectiveName())
4224 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4225 if (getParser().getTok().isNot(AsmToken::Integer))
4226 return TokError("expected an identifier or a number in directive");
4227 // We successfully get a numeric value for the identifier.
4228 // Check if it is valid.
4229 int64_t Id = getParser().getTok().getIntVal();
4230 if (Id <= -1U && !isValidMCLOHType(Id))
4231 return TokError("invalid numeric identifier in directive");
4232 Kind = (MCLOHType)Id;
4234 StringRef Name = getTok().getIdentifier();
4235 // We successfully parse an identifier.
4236 // Check if it is a recognized one.
4237 int Id = MCLOHNameToId(Name);
4240 return TokError("invalid identifier in directive");
4241 Kind = (MCLOHType)Id;
4243 // Consume the identifier.
4245 // Get the number of arguments of this LOH.
4246 int NbArgs = MCLOHIdToNbArgs(Kind);
4248 assert(NbArgs != -1 && "Invalid number of arguments");
4250 SmallVector<MCSymbol *, 3> Args;
4251 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4253 if (getParser().parseIdentifier(Name))
4254 return TokError("expected identifier in directive");
4255 Args.push_back(getContext().getOrCreateSymbol(Name));
4257 if (Idx + 1 == NbArgs)
4259 if (getLexer().isNot(AsmToken::Comma))
4260 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4263 if (getLexer().isNot(AsmToken::EndOfStatement))
4264 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4266 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4270 /// parseDirectiveLtorg
4271 /// ::= .ltorg | .pool
4272 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4273 getTargetStreamer().emitCurrentConstantPool();
4277 /// parseDirectiveReq
4278 /// ::= name .req registername
4279 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4280 MCAsmParser &Parser = getParser();
4281 Parser.Lex(); // Eat the '.req' token.
4282 SMLoc SRegLoc = getLoc();
4283 unsigned RegNum = tryParseRegister();
4284 bool IsVector = false;
4286 if (RegNum == static_cast<unsigned>(-1)) {
4288 RegNum = tryMatchVectorRegister(Kind, false);
4289 if (!Kind.empty()) {
4290 Error(SRegLoc, "vector register without type specifier expected");
4296 if (RegNum == static_cast<unsigned>(-1)) {
4297 Parser.eatToEndOfStatement();
4298 Error(SRegLoc, "register name or alias expected");
4302 // Shouldn't be anything else.
4303 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4304 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4305 Parser.eatToEndOfStatement();
4309 Parser.Lex(); // Consume the EndOfStatement
4311 auto pair = std::make_pair(IsVector, RegNum);
4312 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4313 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4318 /// parseDirectiveUneq
4319 /// ::= .unreq registername
4320 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4321 MCAsmParser &Parser = getParser();
4322 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4323 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4324 Parser.eatToEndOfStatement();
4327 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4328 Parser.Lex(); // Eat the identifier.
4333 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4334 AArch64MCExpr::VariantKind &ELFRefKind,
4335 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4337 ELFRefKind = AArch64MCExpr::VK_INVALID;
4338 DarwinRefKind = MCSymbolRefExpr::VK_None;
4341 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4342 ELFRefKind = AE->getKind();
4343 Expr = AE->getSubExpr();
4346 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4348 // It's a simple symbol reference with no addend.
4349 DarwinRefKind = SE->getKind();
4353 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4357 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4360 DarwinRefKind = SE->getKind();
4362 if (BE->getOpcode() != MCBinaryExpr::Add &&
4363 BE->getOpcode() != MCBinaryExpr::Sub)
4366 // See if the addend is is a constant, otherwise there's more going
4367 // on here than we can deal with.
4368 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4372 Addend = AddendExpr->getValue();
4373 if (BE->getOpcode() == MCBinaryExpr::Sub)
4376 // It's some symbol reference + a constant addend, but really
4377 // shouldn't use both Darwin and ELF syntax.
4378 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4379 DarwinRefKind == MCSymbolRefExpr::VK_None;
4382 /// Force static initialization.
4383 extern "C" void LLVMInitializeAArch64AsmParser() {
4384 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4385 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4386 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4389 #define GET_REGISTER_MATCHER
4390 #define GET_SUBTARGET_FEATURE_NAME
4391 #define GET_MATCHER_IMPLEMENTATION
4392 #include "AArch64GenAsmMatcher.inc"
4394 // Define this matcher function after the auto-generated include so we
4395 // have the match class enum definitions.
4396 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4398 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4399 // If the kind is a token for a literal immediate, check if our asm
4400 // operand matches. This is for InstAliases which have a fixed-value
4401 // immediate in the syntax.
4402 int64_t ExpectedVal;
4405 return Match_InvalidOperand;
4447 return Match_InvalidOperand;
4448 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4450 return Match_InvalidOperand;
4451 if (CE->getValue() == ExpectedVal)
4452 return Match_Success;
4453 return Match_InvalidOperand;
4457 AArch64AsmParser::OperandMatchResultTy
4458 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4462 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4463 Error(S, "expected register");
4464 return MatchOperand_ParseFail;
4467 int FirstReg = tryParseRegister();
4468 if (FirstReg == -1) {
4469 return MatchOperand_ParseFail;
4471 const MCRegisterClass &WRegClass =
4472 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4473 const MCRegisterClass &XRegClass =
4474 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4476 bool isXReg = XRegClass.contains(FirstReg),
4477 isWReg = WRegClass.contains(FirstReg);
4478 if (!isXReg && !isWReg) {
4479 Error(S, "expected first even register of a "
4480 "consecutive same-size even/odd register pair");
4481 return MatchOperand_ParseFail;
4484 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4485 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4487 if (FirstEncoding & 0x1) {
4488 Error(S, "expected first even register of a "
4489 "consecutive same-size even/odd register pair");
4490 return MatchOperand_ParseFail;
4494 if (getParser().getTok().isNot(AsmToken::Comma)) {
4495 Error(M, "expected comma");
4496 return MatchOperand_ParseFail;
4502 int SecondReg = tryParseRegister();
4503 if (SecondReg ==-1) {
4504 return MatchOperand_ParseFail;
4507 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4508 (isXReg && !XRegClass.contains(SecondReg)) ||
4509 (isWReg && !WRegClass.contains(SecondReg))) {
4510 Error(E,"expected second odd register of a "
4511 "consecutive same-size even/odd register pair");
4512 return MatchOperand_ParseFail;
4517 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4518 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4520 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4521 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4524 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4527 return MatchOperand_Success;