1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
118 const MCInstrInfo &MII, const MCTargetOptions &Options)
119 : MCTargetAsmParser(Options, STI) {
120 MCAsmParserExtension::Initialize(Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
217 uint32_t PStateField;
230 struct ShiftExtendOp {
231 AArch64_AM::ShiftExtendType Type;
233 bool HasExplicitAmount;
243 struct VectorListOp VectorList;
244 struct VectorIndexOp VectorIndex;
246 struct ShiftedImmOp ShiftedImm;
247 struct CondCodeOp CondCode;
248 struct FPImmOp FPImm;
249 struct BarrierOp Barrier;
250 struct SysRegOp SysReg;
251 struct SysCRImmOp SysCRImm;
252 struct PrefetchOp Prefetch;
253 struct ShiftExtendOp ShiftExtend;
256 // Keep the MCContext around as the MCExprs may need manipulated during
257 // the add<>Operands() calls.
261 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265 StartLoc = o.StartLoc;
275 ShiftedImm = o.ShiftedImm;
278 CondCode = o.CondCode;
290 VectorList = o.VectorList;
293 VectorIndex = o.VectorIndex;
299 SysCRImm = o.SysCRImm;
302 Prefetch = o.Prefetch;
305 ShiftExtend = o.ShiftExtend;
310 /// getStartLoc - Get the location of the first token of this operand.
311 SMLoc getStartLoc() const override { return StartLoc; }
312 /// getEndLoc - Get the location of the last token of this operand.
313 SMLoc getEndLoc() const override { return EndLoc; }
315 StringRef getToken() const {
316 assert(Kind == k_Token && "Invalid access!");
317 return StringRef(Tok.Data, Tok.Length);
320 bool isTokenSuffix() const {
321 assert(Kind == k_Token && "Invalid access!");
325 const MCExpr *getImm() const {
326 assert(Kind == k_Immediate && "Invalid access!");
330 const MCExpr *getShiftedImmVal() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.Val;
335 unsigned getShiftedImmShift() const {
336 assert(Kind == k_ShiftedImm && "Invalid access!");
337 return ShiftedImm.ShiftAmount;
340 AArch64CC::CondCode getCondCode() const {
341 assert(Kind == k_CondCode && "Invalid access!");
342 return CondCode.Code;
345 unsigned getFPImm() const {
346 assert(Kind == k_FPImm && "Invalid access!");
350 unsigned getBarrier() const {
351 assert(Kind == k_Barrier && "Invalid access!");
355 StringRef getBarrierName() const {
356 assert(Kind == k_Barrier && "Invalid access!");
357 return StringRef(Barrier.Data, Barrier.Length);
360 unsigned getReg() const override {
361 assert(Kind == k_Register && "Invalid access!");
365 unsigned getVectorListStart() const {
366 assert(Kind == k_VectorList && "Invalid access!");
367 return VectorList.RegNum;
370 unsigned getVectorListCount() const {
371 assert(Kind == k_VectorList && "Invalid access!");
372 return VectorList.Count;
375 unsigned getVectorIndex() const {
376 assert(Kind == k_VectorIndex && "Invalid access!");
377 return VectorIndex.Val;
380 StringRef getSysReg() const {
381 assert(Kind == k_SysReg && "Invalid access!");
382 return StringRef(SysReg.Data, SysReg.Length);
385 unsigned getSysCR() const {
386 assert(Kind == k_SysCR && "Invalid access!");
390 unsigned getPrefetch() const {
391 assert(Kind == k_Prefetch && "Invalid access!");
395 StringRef getPrefetchName() const {
396 assert(Kind == k_Prefetch && "Invalid access!");
397 return StringRef(Prefetch.Data, Prefetch.Length);
400 AArch64_AM::ShiftExtendType getShiftExtendType() const {
401 assert(Kind == k_ShiftExtend && "Invalid access!");
402 return ShiftExtend.Type;
405 unsigned getShiftExtendAmount() const {
406 assert(Kind == k_ShiftExtend && "Invalid access!");
407 return ShiftExtend.Amount;
410 bool hasShiftExtendAmount() const {
411 assert(Kind == k_ShiftExtend && "Invalid access!");
412 return ShiftExtend.HasExplicitAmount;
415 bool isImm() const override { return Kind == k_Immediate; }
416 bool isMem() const override { return false; }
417 bool isSImm9() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val < 256);
426 bool isSImm7s4() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435 bool isSImm7s8() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444 bool isSImm7s16() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
454 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
455 AArch64MCExpr::VariantKind ELFRefKind;
456 MCSymbolRefExpr::VariantKind DarwinRefKind;
458 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460 // If we don't understand the expression, assume the best and
461 // let the fixup and relocation code deal with it.
465 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
466 ELFRefKind == AArch64MCExpr::VK_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
474 // Note that we don't range-check the addend. It's adjusted modulo page
475 // size when converted, so there is no "out of range" condition when using
477 return Addend >= 0 && (Addend % Scale) == 0;
478 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
480 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
487 template <int Scale> bool isUImm12Offset() const {
491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 return isSymbolicUImm12Offset(getImm(), Scale);
495 int64_t Val = MCE->getValue();
496 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
499 bool isImm0_1() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 2);
508 bool isImm0_7() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 8);
517 bool isImm1_8() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 9);
526 bool isImm0_15() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 16);
535 bool isImm1_16() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val > 0 && Val < 17);
544 bool isImm0_31() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 0 && Val < 32);
553 bool isImm1_31() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 1 && Val < 32);
562 bool isImm1_32() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 33);
571 bool isImm0_63() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 0 && Val < 64);
580 bool isImm1_63() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 1 && Val < 64);
589 bool isImm1_64() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 1 && Val < 65);
598 bool isImm0_127() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 128);
607 bool isImm0_255() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 0 && Val < 256);
616 bool isImm0_65535() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 return (Val >= 0 && Val < 65536);
625 bool isImm32_63() const {
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
631 int64_t Val = MCE->getValue();
632 return (Val >= 32 && Val < 64);
634 bool isLogicalImm32() const {
637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640 int64_t Val = MCE->getValue();
641 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
644 return AArch64_AM::isLogicalImmediate(Val, 32);
646 bool isLogicalImm64() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
654 bool isLogicalImm32Not() const {
657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
660 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
661 return AArch64_AM::isLogicalImmediate(Val, 32);
663 bool isLogicalImm64Not() const {
666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
669 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
671 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
672 bool isAddSubImm() const {
673 if (!isShiftedImm() && !isImm())
678 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
679 if (isShiftedImm()) {
680 unsigned Shift = ShiftedImm.ShiftAmount;
681 Expr = ShiftedImm.Val;
682 if (Shift != 0 && Shift != 12)
688 AArch64MCExpr::VariantKind ELFRefKind;
689 MCSymbolRefExpr::VariantKind DarwinRefKind;
691 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
692 DarwinRefKind, Addend)) {
693 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
694 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
695 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
696 || ELFRefKind == AArch64MCExpr::VK_LO12
697 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
700 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
703 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
706 // Otherwise it should be a real immediate in range:
707 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
708 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
710 bool isAddSubImmNeg() const {
711 if (!isShiftedImm() && !isImm())
716 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
717 if (isShiftedImm()) {
718 unsigned Shift = ShiftedImm.ShiftAmount;
719 Expr = ShiftedImm.Val;
720 if (Shift != 0 && Shift != 12)
725 // Otherwise it should be a real negative immediate in range:
726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
727 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
729 bool isCondCode() const { return Kind == k_CondCode; }
730 bool isSIMDImmType10() const {
733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
738 bool isBranchTarget26() const {
741 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
744 int64_t Val = MCE->getValue();
747 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
749 bool isPCRelLabel19() const {
752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
755 int64_t Val = MCE->getValue();
758 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
760 bool isBranchTarget14() const {
763 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
766 int64_t Val = MCE->getValue();
769 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
773 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
777 AArch64MCExpr::VariantKind ELFRefKind;
778 MCSymbolRefExpr::VariantKind DarwinRefKind;
780 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
781 DarwinRefKind, Addend)) {
784 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
787 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
788 if (ELFRefKind == AllowedModifiers[i])
795 bool isMovZSymbolG3() const {
796 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
799 bool isMovZSymbolG2() const {
800 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
801 AArch64MCExpr::VK_TPREL_G2,
802 AArch64MCExpr::VK_DTPREL_G2});
805 bool isMovZSymbolG1() const {
806 return isMovWSymbol({
807 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
808 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
809 AArch64MCExpr::VK_DTPREL_G1,
813 bool isMovZSymbolG0() const {
814 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
815 AArch64MCExpr::VK_TPREL_G0,
816 AArch64MCExpr::VK_DTPREL_G0});
819 bool isMovKSymbolG3() const {
820 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 bool isMovKSymbolG2() const {
824 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
827 bool isMovKSymbolG1() const {
828 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
829 AArch64MCExpr::VK_TPREL_G1_NC,
830 AArch64MCExpr::VK_DTPREL_G1_NC});
833 bool isMovKSymbolG0() const {
835 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
836 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
839 template<int RegWidth, int Shift>
840 bool isMOVZMovAlias() const {
841 if (!isImm()) return false;
843 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
844 if (!CE) return false;
845 uint64_t Value = CE->getValue();
848 Value &= 0xffffffffULL;
850 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
851 if (Value == 0 && Shift != 0)
854 return (Value & ~(0xffffULL << Shift)) == 0;
857 template<int RegWidth, int Shift>
858 bool isMOVNMovAlias() const {
859 if (!isImm()) return false;
861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862 if (!CE) return false;
863 uint64_t Value = CE->getValue();
865 // MOVZ takes precedence over MOVN.
866 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
867 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
872 Value &= 0xffffffffULL;
874 return (Value & ~(0xffffULL << Shift)) == 0;
877 bool isFPImm() const { return Kind == k_FPImm; }
878 bool isBarrier() const { return Kind == k_Barrier; }
879 bool isSysReg() const { return Kind == k_SysReg; }
880 bool isMRSSystemRegister() const {
881 if (!isSysReg()) return false;
883 return SysReg.MRSReg != -1U;
885 bool isMSRSystemRegister() const {
886 if (!isSysReg()) return false;
887 return SysReg.MSRReg != -1U;
889 bool isSystemPStateFieldWithImm0_1() const {
890 if (!isSysReg()) return false;
891 return (SysReg.PStateField == AArch64PState::PAN ||
892 SysReg.PStateField == AArch64PState::UAO);
894 bool isSystemPStateFieldWithImm0_15() const {
895 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
896 return SysReg.PStateField != -1U;
898 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
899 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
900 bool isVectorRegLo() const {
901 return Kind == k_Register && Reg.isVector &&
902 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
905 bool isGPR32as64() const {
906 return Kind == k_Register && !Reg.isVector &&
907 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
909 bool isWSeqPair() const {
910 return Kind == k_Register && !Reg.isVector &&
911 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
914 bool isXSeqPair() const {
915 return Kind == k_Register && !Reg.isVector &&
916 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
920 bool isGPR64sp0() const {
921 return Kind == k_Register && !Reg.isVector &&
922 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
925 /// Is this a vector list with the type implicit (presumably attached to the
926 /// instruction itself)?
927 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
928 return Kind == k_VectorList && VectorList.Count == NumRegs &&
929 !VectorList.ElementKind;
932 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
933 bool isTypedVectorList() const {
934 if (Kind != k_VectorList)
936 if (VectorList.Count != NumRegs)
938 if (VectorList.ElementKind != ElementKind)
940 return VectorList.NumElements == NumElements;
943 bool isVectorIndex1() const {
944 return Kind == k_VectorIndex && VectorIndex.Val == 1;
946 bool isVectorIndexB() const {
947 return Kind == k_VectorIndex && VectorIndex.Val < 16;
949 bool isVectorIndexH() const {
950 return Kind == k_VectorIndex && VectorIndex.Val < 8;
952 bool isVectorIndexS() const {
953 return Kind == k_VectorIndex && VectorIndex.Val < 4;
955 bool isVectorIndexD() const {
956 return Kind == k_VectorIndex && VectorIndex.Val < 2;
958 bool isToken() const override { return Kind == k_Token; }
959 bool isTokenEqual(StringRef Str) const {
960 return Kind == k_Token && getToken() == Str;
962 bool isSysCR() const { return Kind == k_SysCR; }
963 bool isPrefetch() const { return Kind == k_Prefetch; }
964 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
965 bool isShifter() const {
966 if (!isShiftExtend())
969 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
970 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
971 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
972 ST == AArch64_AM::MSL);
974 bool isExtend() const {
975 if (!isShiftExtend())
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
980 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
981 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
982 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
983 ET == AArch64_AM::LSL) &&
984 getShiftExtendAmount() <= 4;
987 bool isExtend64() const {
990 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
991 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
992 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
994 bool isExtendLSL64() const {
997 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
998 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
999 ET == AArch64_AM::LSL) &&
1000 getShiftExtendAmount() <= 4;
1003 template<int Width> bool isMemXExtend() const {
1006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1007 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1008 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1009 getShiftExtendAmount() == 0);
1012 template<int Width> bool isMemWExtend() const {
1015 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1016 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1017 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1018 getShiftExtendAmount() == 0);
1021 template <unsigned width>
1022 bool isArithmeticShifter() const {
1026 // An arithmetic shifter is LSL, LSR, or ASR.
1027 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1032 template <unsigned width>
1033 bool isLogicalShifter() const {
1037 // A logical shifter is LSL, LSR, ASR or ROR.
1038 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1040 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1041 getShiftExtendAmount() < width;
1044 bool isMovImm32Shifter() const {
1048 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1049 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1050 if (ST != AArch64_AM::LSL)
1052 uint64_t Val = getShiftExtendAmount();
1053 return (Val == 0 || Val == 16);
1056 bool isMovImm64Shifter() const {
1060 // A MOVi shifter is LSL of 0 or 16.
1061 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1062 if (ST != AArch64_AM::LSL)
1064 uint64_t Val = getShiftExtendAmount();
1065 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1068 bool isLogicalVecShifter() const {
1072 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1073 unsigned Shift = getShiftExtendAmount();
1074 return getShiftExtendType() == AArch64_AM::LSL &&
1075 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1078 bool isLogicalVecHalfWordShifter() const {
1079 if (!isLogicalVecShifter())
1082 // A logical vector shifter is a left shift by 0 or 8.
1083 unsigned Shift = getShiftExtendAmount();
1084 return getShiftExtendType() == AArch64_AM::LSL &&
1085 (Shift == 0 || Shift == 8);
1088 bool isMoveVecShifter() const {
1089 if (!isShiftExtend())
1092 // A logical vector shifter is a left shift by 8 or 16.
1093 unsigned Shift = getShiftExtendAmount();
1094 return getShiftExtendType() == AArch64_AM::MSL &&
1095 (Shift == 8 || Shift == 16);
1098 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1099 // to LDUR/STUR when the offset is not legal for the former but is for
1100 // the latter. As such, in addition to checking for being a legal unscaled
1101 // address, also check that it is not a legal scaled address. This avoids
1102 // ambiguity in the matcher.
1104 bool isSImm9OffsetFB() const {
1105 return isSImm9() && !isUImm12Offset<Width / 8>();
1108 bool isAdrpLabel() const {
1109 // Validation was handled during parsing, so we just sanity check that
1110 // something didn't go haywire.
1114 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1115 int64_t Val = CE->getValue();
1116 int64_t Min = - (4096 * (1LL << (21 - 1)));
1117 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1118 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1124 bool isAdrLabel() const {
1125 // Validation was handled during parsing, so we just sanity check that
1126 // something didn't go haywire.
1130 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1131 int64_t Val = CE->getValue();
1132 int64_t Min = - (1LL << (21 - 1));
1133 int64_t Max = ((1LL << (21 - 1)) - 1);
1134 return Val >= Min && Val <= Max;
1140 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1141 // Add as immediates when possible. Null MCExpr = 0.
1143 Inst.addOperand(MCOperand::createImm(0));
1144 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1145 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1147 Inst.addOperand(MCOperand::createExpr(Expr));
1150 void addRegOperands(MCInst &Inst, unsigned N) const {
1151 assert(N == 1 && "Invalid number of operands!");
1152 Inst.addOperand(MCOperand::createReg(getReg()));
1155 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1158 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1160 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1161 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1162 RI->getEncodingValue(getReg()));
1164 Inst.addOperand(MCOperand::createReg(Reg));
1167 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1171 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1174 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1178 Inst.addOperand(MCOperand::createReg(getReg()));
1181 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::createReg(getReg()));
1186 template <unsigned NumRegs>
1187 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 static const unsigned FirstRegs[] = { AArch64::D0,
1192 AArch64::D0_D1_D2_D3 };
1193 unsigned FirstReg = FirstRegs[NumRegs - 1];
1196 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1199 template <unsigned NumRegs>
1200 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 static const unsigned FirstRegs[] = { AArch64::Q0,
1205 AArch64::Q0_Q1_Q2_Q3 };
1206 unsigned FirstReg = FirstRegs[NumRegs - 1];
1209 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1212 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1217 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1222 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1227 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1232 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1237 void addImmOperands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 // If this is a pageoff symrefexpr with an addend, adjust the addend
1240 // to be only the page-offset portion. Otherwise, just add the expr
1242 addExpr(Inst, getImm());
1245 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1246 assert(N == 2 && "Invalid number of operands!");
1247 if (isShiftedImm()) {
1248 addExpr(Inst, getShiftedImmVal());
1249 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1251 addExpr(Inst, getImm());
1252 Inst.addOperand(MCOperand::createImm(0));
1256 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1257 assert(N == 2 && "Invalid number of operands!");
1259 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1260 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1261 int64_t Val = -CE->getValue();
1262 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1264 Inst.addOperand(MCOperand::createImm(Val));
1265 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1268 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 Inst.addOperand(MCOperand::createImm(getCondCode()));
1273 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 addExpr(Inst, getImm());
1279 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1282 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1283 addImmOperands(Inst, N);
1287 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1288 assert(N == 1 && "Invalid number of operands!");
1289 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createExpr(getImm()));
1295 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1298 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1301 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1304 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1307 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1310 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1313 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1316 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1322 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1328 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1334 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1340 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1346 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349 assert(MCE && "Invalid constant immediate operand!");
1350 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1353 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1354 assert(N == 1 && "Invalid number of operands!");
1355 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1356 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1359 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1362 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1365 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1366 assert(N == 1 && "Invalid number of operands!");
1367 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1368 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1371 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1377 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1378 assert(N == 1 && "Invalid number of operands!");
1379 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1380 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1383 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1384 assert(N == 1 && "Invalid number of operands!");
1385 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1386 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1389 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1392 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1395 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1401 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1402 assert(N == 1 && "Invalid number of operands!");
1403 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1404 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1407 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1408 assert(N == 1 && "Invalid number of operands!");
1409 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1413 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1417 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1418 Inst.addOperand(MCOperand::createImm(encoding));
1421 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1422 assert(N == 1 && "Invalid number of operands!");
1423 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1424 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1425 Inst.addOperand(MCOperand::createImm(encoding));
1428 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1431 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1432 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1433 Inst.addOperand(MCOperand::createImm(encoding));
1436 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1437 assert(N == 1 && "Invalid number of operands!");
1438 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1441 Inst.addOperand(MCOperand::createImm(encoding));
1444 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1447 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1448 Inst.addOperand(MCOperand::createImm(encoding));
1451 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1452 // Branch operands don't encode the low bits, so shift them off
1453 // here. If it's a label, however, just put it on directly as there's
1454 // not enough information now to do anything.
1455 assert(N == 1 && "Invalid number of operands!");
1456 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1458 addExpr(Inst, getImm());
1461 assert(MCE && "Invalid constant immediate operand!");
1462 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1465 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1466 // Branch operands don't encode the low bits, so shift them off
1467 // here. If it's a label, however, just put it on directly as there's
1468 // not enough information now to do anything.
1469 assert(N == 1 && "Invalid number of operands!");
1470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1472 addExpr(Inst, getImm());
1475 assert(MCE && "Invalid constant immediate operand!");
1476 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1479 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1480 // Branch operands don't encode the low bits, so shift them off
1481 // here. If it's a label, however, just put it on directly as there's
1482 // not enough information now to do anything.
1483 assert(N == 1 && "Invalid number of operands!");
1484 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1486 addExpr(Inst, getImm());
1489 assert(MCE && "Invalid constant immediate operand!");
1490 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1493 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1495 Inst.addOperand(MCOperand::createImm(getFPImm()));
1498 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1500 Inst.addOperand(MCOperand::createImm(getBarrier()));
1503 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 1 && "Invalid number of operands!");
1506 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1509 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1512 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1515 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 1 && "Invalid number of operands!");
1518 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1521 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1524 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1527 void addSysCROperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!");
1529 Inst.addOperand(MCOperand::createImm(getSysCR()));
1532 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!");
1534 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1537 void addShifterOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && "Invalid number of operands!");
1540 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1541 Inst.addOperand(MCOperand::createImm(Imm));
1544 void addExtendOperands(MCInst &Inst, unsigned N) const {
1545 assert(N == 1 && "Invalid number of operands!");
1546 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1547 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1548 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1549 Inst.addOperand(MCOperand::createImm(Imm));
1552 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1553 assert(N == 1 && "Invalid number of operands!");
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1556 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1557 Inst.addOperand(MCOperand::createImm(Imm));
1560 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1561 assert(N == 2 && "Invalid number of operands!");
1562 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1563 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1564 Inst.addOperand(MCOperand::createImm(IsSigned));
1565 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1568 // For 8-bit load/store instructions with a register offset, both the
1569 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1570 // they're disambiguated by whether the shift was explicit or implicit rather
1572 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1573 assert(N == 2 && "Invalid number of operands!");
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1576 Inst.addOperand(MCOperand::createImm(IsSigned));
1577 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1581 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1582 assert(N == 1 && "Invalid number of operands!");
1584 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1585 uint64_t Value = CE->getValue();
1586 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1590 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1591 assert(N == 1 && "Invalid number of operands!");
1593 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1594 uint64_t Value = CE->getValue();
1595 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1598 void print(raw_ostream &OS) const override;
1600 static std::unique_ptr<AArch64Operand>
1601 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1603 Op->Tok.Data = Str.data();
1604 Op->Tok.Length = Str.size();
1605 Op->Tok.IsSuffix = IsSuffix;
1611 static std::unique_ptr<AArch64Operand>
1612 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1614 Op->Reg.RegNum = RegNum;
1615 Op->Reg.isVector = isVector;
1621 static std::unique_ptr<AArch64Operand>
1622 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1623 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1624 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1625 Op->VectorList.RegNum = RegNum;
1626 Op->VectorList.Count = Count;
1627 Op->VectorList.NumElements = NumElements;
1628 Op->VectorList.ElementKind = ElementKind;
1634 static std::unique_ptr<AArch64Operand>
1635 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1636 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1637 Op->VectorIndex.Val = Idx;
1643 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1644 SMLoc E, MCContext &Ctx) {
1645 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1652 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1653 unsigned ShiftAmount,
1656 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1657 Op->ShiftedImm .Val = Val;
1658 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1664 static std::unique_ptr<AArch64Operand>
1665 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1667 Op->CondCode.Code = Code;
1673 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1675 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1676 Op->FPImm.Val = Val;
1682 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1686 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1687 Op->Barrier.Val = Val;
1688 Op->Barrier.Data = Str.data();
1689 Op->Barrier.Length = Str.size();
1695 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1698 uint32_t PStateField,
1700 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1701 Op->SysReg.Data = Str.data();
1702 Op->SysReg.Length = Str.size();
1703 Op->SysReg.MRSReg = MRSReg;
1704 Op->SysReg.MSRReg = MSRReg;
1705 Op->SysReg.PStateField = PStateField;
1711 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1712 SMLoc E, MCContext &Ctx) {
1713 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1714 Op->SysCRImm.Val = Val;
1720 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1724 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1725 Op->Prefetch.Val = Val;
1726 Op->Barrier.Data = Str.data();
1727 Op->Barrier.Length = Str.size();
1733 static std::unique_ptr<AArch64Operand>
1734 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1735 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1736 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1737 Op->ShiftExtend.Type = ShOp;
1738 Op->ShiftExtend.Amount = Val;
1739 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1746 } // end anonymous namespace.
1748 void AArch64Operand::print(raw_ostream &OS) const {
1751 OS << "<fpimm " << getFPImm() << "("
1752 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1755 StringRef Name = getBarrierName();
1757 OS << "<barrier " << Name << ">";
1759 OS << "<barrier invalid #" << getBarrier() << ">";
1765 case k_ShiftedImm: {
1766 unsigned Shift = getShiftedImmShift();
1767 OS << "<shiftedimm ";
1768 OS << *getShiftedImmVal();
1769 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1773 OS << "<condcode " << getCondCode() << ">";
1776 OS << "<register " << getReg() << ">";
1778 case k_VectorList: {
1779 OS << "<vectorlist ";
1780 unsigned Reg = getVectorListStart();
1781 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1782 OS << Reg + i << " ";
1787 OS << "<vectorindex " << getVectorIndex() << ">";
1790 OS << "<sysreg: " << getSysReg() << '>';
1793 OS << "'" << getToken() << "'";
1796 OS << "c" << getSysCR();
1799 StringRef Name = getPrefetchName();
1801 OS << "<prfop " << Name << ">";
1803 OS << "<prfop invalid #" << getPrefetch() << ">";
1806 case k_ShiftExtend: {
1807 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1808 << getShiftExtendAmount();
1809 if (!hasShiftExtendAmount())
1817 /// @name Auto-generated Match Functions
1820 static unsigned MatchRegisterName(StringRef Name);
1824 static unsigned matchVectorRegName(StringRef Name) {
1825 return StringSwitch<unsigned>(Name.lower())
1826 .Case("v0", AArch64::Q0)
1827 .Case("v1", AArch64::Q1)
1828 .Case("v2", AArch64::Q2)
1829 .Case("v3", AArch64::Q3)
1830 .Case("v4", AArch64::Q4)
1831 .Case("v5", AArch64::Q5)
1832 .Case("v6", AArch64::Q6)
1833 .Case("v7", AArch64::Q7)
1834 .Case("v8", AArch64::Q8)
1835 .Case("v9", AArch64::Q9)
1836 .Case("v10", AArch64::Q10)
1837 .Case("v11", AArch64::Q11)
1838 .Case("v12", AArch64::Q12)
1839 .Case("v13", AArch64::Q13)
1840 .Case("v14", AArch64::Q14)
1841 .Case("v15", AArch64::Q15)
1842 .Case("v16", AArch64::Q16)
1843 .Case("v17", AArch64::Q17)
1844 .Case("v18", AArch64::Q18)
1845 .Case("v19", AArch64::Q19)
1846 .Case("v20", AArch64::Q20)
1847 .Case("v21", AArch64::Q21)
1848 .Case("v22", AArch64::Q22)
1849 .Case("v23", AArch64::Q23)
1850 .Case("v24", AArch64::Q24)
1851 .Case("v25", AArch64::Q25)
1852 .Case("v26", AArch64::Q26)
1853 .Case("v27", AArch64::Q27)
1854 .Case("v28", AArch64::Q28)
1855 .Case("v29", AArch64::Q29)
1856 .Case("v30", AArch64::Q30)
1857 .Case("v31", AArch64::Q31)
1861 static bool isValidVectorKind(StringRef Name) {
1862 return StringSwitch<bool>(Name.lower())
1872 // Accept the width neutral ones, too, for verbose syntax. If those
1873 // aren't used in the right places, the token operand won't match so
1874 // all will work out.
1882 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1883 char &ElementKind) {
1884 assert(isValidVectorKind(Name));
1886 ElementKind = Name.lower()[Name.size() - 1];
1889 if (Name.size() == 2)
1892 // Parse the lane count
1893 Name = Name.drop_front();
1894 while (isdigit(Name.front())) {
1895 NumElements = 10 * NumElements + (Name.front() - '0');
1896 Name = Name.drop_front();
1900 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1902 StartLoc = getLoc();
1903 RegNo = tryParseRegister();
1904 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1905 return (RegNo == (unsigned)-1);
1908 // Matches a register name or register alias previously defined by '.req'
1909 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1911 unsigned RegNum = isVector ? matchVectorRegName(Name)
1912 : MatchRegisterName(Name);
1915 // Check for aliases registered via .req. Canonicalize to lower case.
1916 // That's more consistent since register names are case insensitive, and
1917 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1918 auto Entry = RegisterReqs.find(Name.lower());
1919 if (Entry == RegisterReqs.end())
1921 // set RegNum if the match is the right kind of register
1922 if (isVector == Entry->getValue().first)
1923 RegNum = Entry->getValue().second;
1928 /// tryParseRegister - Try to parse a register name. The token must be an
1929 /// Identifier when called, and if it is a register name the token is eaten and
1930 /// the register is added to the operand list.
1931 int AArch64AsmParser::tryParseRegister() {
1932 MCAsmParser &Parser = getParser();
1933 const AsmToken &Tok = Parser.getTok();
1934 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1936 std::string lowerCase = Tok.getString().lower();
1937 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1938 // Also handle a few aliases of registers.
1940 RegNum = StringSwitch<unsigned>(lowerCase)
1941 .Case("fp", AArch64::FP)
1942 .Case("lr", AArch64::LR)
1943 .Case("x31", AArch64::XZR)
1944 .Case("w31", AArch64::WZR)
1950 Parser.Lex(); // Eat identifier token.
1954 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1955 /// kind specifier. If it is a register specifier, eat the token and return it.
1956 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1957 MCAsmParser &Parser = getParser();
1958 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1959 TokError("vector register expected");
1963 StringRef Name = Parser.getTok().getString();
1964 // If there is a kind specifier, it's separated from the register name by
1966 size_t Start = 0, Next = Name.find('.');
1967 StringRef Head = Name.slice(Start, Next);
1968 unsigned RegNum = matchRegisterNameAlias(Head, true);
1971 if (Next != StringRef::npos) {
1972 Kind = Name.slice(Next, StringRef::npos);
1973 if (!isValidVectorKind(Kind)) {
1974 TokError("invalid vector kind qualifier");
1978 Parser.Lex(); // Eat the register token.
1983 TokError("vector register expected");
1987 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1988 AArch64AsmParser::OperandMatchResultTy
1989 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1990 MCAsmParser &Parser = getParser();
1993 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1994 Error(S, "Expected cN operand where 0 <= N <= 15");
1995 return MatchOperand_ParseFail;
1998 StringRef Tok = Parser.getTok().getIdentifier();
1999 if (Tok[0] != 'c' && Tok[0] != 'C') {
2000 Error(S, "Expected cN operand where 0 <= N <= 15");
2001 return MatchOperand_ParseFail;
2005 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2006 if (BadNum || CRNum > 15) {
2007 Error(S, "Expected cN operand where 0 <= N <= 15");
2008 return MatchOperand_ParseFail;
2011 Parser.Lex(); // Eat identifier token.
2013 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2014 return MatchOperand_Success;
2017 /// tryParsePrefetch - Try to parse a prefetch operand.
2018 AArch64AsmParser::OperandMatchResultTy
2019 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2020 MCAsmParser &Parser = getParser();
2022 const AsmToken &Tok = Parser.getTok();
2023 // Either an identifier for named values or a 5-bit immediate.
2024 bool Hash = Tok.is(AsmToken::Hash);
2025 if (Hash || Tok.is(AsmToken::Integer)) {
2027 Parser.Lex(); // Eat hash token.
2028 const MCExpr *ImmVal;
2029 if (getParser().parseExpression(ImmVal))
2030 return MatchOperand_ParseFail;
2032 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2034 TokError("immediate value expected for prefetch operand");
2035 return MatchOperand_ParseFail;
2037 unsigned prfop = MCE->getValue();
2039 TokError("prefetch operand out of range, [0,31] expected");
2040 return MatchOperand_ParseFail;
2044 auto Mapper = AArch64PRFM::PRFMMapper();
2046 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2047 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2049 return MatchOperand_Success;
2052 if (Tok.isNot(AsmToken::Identifier)) {
2053 TokError("pre-fetch hint expected");
2054 return MatchOperand_ParseFail;
2058 auto Mapper = AArch64PRFM::PRFMMapper();
2060 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2062 TokError("pre-fetch hint expected");
2063 return MatchOperand_ParseFail;
2066 Parser.Lex(); // Eat identifier token.
2067 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2069 return MatchOperand_Success;
2072 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2074 AArch64AsmParser::OperandMatchResultTy
2075 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2076 MCAsmParser &Parser = getParser();
2080 if (Parser.getTok().is(AsmToken::Hash)) {
2081 Parser.Lex(); // Eat hash token.
2084 if (parseSymbolicImmVal(Expr))
2085 return MatchOperand_ParseFail;
2087 AArch64MCExpr::VariantKind ELFRefKind;
2088 MCSymbolRefExpr::VariantKind DarwinRefKind;
2090 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2091 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2092 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2093 // No modifier was specified at all; this is the syntax for an ELF basic
2094 // ADRP relocation (unfortunately).
2096 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2097 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2098 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2100 Error(S, "gotpage label reference not allowed an addend");
2101 return MatchOperand_ParseFail;
2102 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2103 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2104 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2105 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2106 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2107 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2108 // The operand must be an @page or @gotpage qualified symbolref.
2109 Error(S, "page or gotpage label reference expected");
2110 return MatchOperand_ParseFail;
2114 // We have either a label reference possibly with addend or an immediate. The
2115 // addend is a raw value here. The linker will adjust it to only reference the
2117 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2118 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2120 return MatchOperand_Success;
2123 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2125 AArch64AsmParser::OperandMatchResultTy
2126 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2127 MCAsmParser &Parser = getParser();
2131 if (Parser.getTok().is(AsmToken::Hash)) {
2132 Parser.Lex(); // Eat hash token.
2135 if (getParser().parseExpression(Expr))
2136 return MatchOperand_ParseFail;
2138 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2139 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2141 return MatchOperand_Success;
2144 /// tryParseFPImm - A floating point immediate expression operand.
2145 AArch64AsmParser::OperandMatchResultTy
2146 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2147 MCAsmParser &Parser = getParser();
2151 if (Parser.getTok().is(AsmToken::Hash)) {
2152 Parser.Lex(); // Eat '#'
2156 // Handle negation, as that still comes through as a separate token.
2157 bool isNegative = false;
2158 if (Parser.getTok().is(AsmToken::Minus)) {
2162 const AsmToken &Tok = Parser.getTok();
2163 if (Tok.is(AsmToken::Real)) {
2164 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2166 RealVal.changeSign();
2168 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2169 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2170 Parser.Lex(); // Eat the token.
2171 // Check for out of range values. As an exception, we let Zero through,
2172 // as we handle that special case in post-processing before matching in
2173 // order to use the zero register for it.
2174 if (Val == -1 && !RealVal.isPosZero()) {
2175 TokError("expected compatible register or floating-point constant");
2176 return MatchOperand_ParseFail;
2178 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2179 return MatchOperand_Success;
2181 if (Tok.is(AsmToken::Integer)) {
2183 if (!isNegative && Tok.getString().startswith("0x")) {
2184 Val = Tok.getIntVal();
2185 if (Val > 255 || Val < 0) {
2186 TokError("encoded floating point value out of range");
2187 return MatchOperand_ParseFail;
2190 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2191 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2192 // If we had a '-' in front, toggle the sign bit.
2193 IntVal ^= (uint64_t)isNegative << 63;
2194 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2196 Parser.Lex(); // Eat the token.
2197 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2198 return MatchOperand_Success;
2202 return MatchOperand_NoMatch;
2204 TokError("invalid floating point immediate");
2205 return MatchOperand_ParseFail;
2208 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2209 AArch64AsmParser::OperandMatchResultTy
2210 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2211 MCAsmParser &Parser = getParser();
2214 if (Parser.getTok().is(AsmToken::Hash))
2215 Parser.Lex(); // Eat '#'
2216 else if (Parser.getTok().isNot(AsmToken::Integer))
2217 // Operand should start from # or should be integer, emit error otherwise.
2218 return MatchOperand_NoMatch;
2221 if (parseSymbolicImmVal(Imm))
2222 return MatchOperand_ParseFail;
2223 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2224 uint64_t ShiftAmount = 0;
2225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2227 int64_t Val = MCE->getValue();
2228 if (Val > 0xfff && (Val & 0xfff) == 0) {
2229 Imm = MCConstantExpr::create(Val >> 12, getContext());
2233 SMLoc E = Parser.getTok().getLoc();
2234 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2236 return MatchOperand_Success;
2242 // The optional operand must be "lsl #N" where N is non-negative.
2243 if (!Parser.getTok().is(AsmToken::Identifier) ||
2244 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2245 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2246 return MatchOperand_ParseFail;
2252 if (Parser.getTok().is(AsmToken::Hash)) {
2256 if (Parser.getTok().isNot(AsmToken::Integer)) {
2257 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2258 return MatchOperand_ParseFail;
2261 int64_t ShiftAmount = Parser.getTok().getIntVal();
2263 if (ShiftAmount < 0) {
2264 Error(Parser.getTok().getLoc(), "positive shift amount required");
2265 return MatchOperand_ParseFail;
2267 Parser.Lex(); // Eat the number
2269 SMLoc E = Parser.getTok().getLoc();
2270 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2271 S, E, getContext()));
2272 return MatchOperand_Success;
2275 /// parseCondCodeString - Parse a Condition Code string.
2276 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2277 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2278 .Case("eq", AArch64CC::EQ)
2279 .Case("ne", AArch64CC::NE)
2280 .Case("cs", AArch64CC::HS)
2281 .Case("hs", AArch64CC::HS)
2282 .Case("cc", AArch64CC::LO)
2283 .Case("lo", AArch64CC::LO)
2284 .Case("mi", AArch64CC::MI)
2285 .Case("pl", AArch64CC::PL)
2286 .Case("vs", AArch64CC::VS)
2287 .Case("vc", AArch64CC::VC)
2288 .Case("hi", AArch64CC::HI)
2289 .Case("ls", AArch64CC::LS)
2290 .Case("ge", AArch64CC::GE)
2291 .Case("lt", AArch64CC::LT)
2292 .Case("gt", AArch64CC::GT)
2293 .Case("le", AArch64CC::LE)
2294 .Case("al", AArch64CC::AL)
2295 .Case("nv", AArch64CC::NV)
2296 .Default(AArch64CC::Invalid);
2300 /// parseCondCode - Parse a Condition Code operand.
2301 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2302 bool invertCondCode) {
2303 MCAsmParser &Parser = getParser();
2305 const AsmToken &Tok = Parser.getTok();
2306 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2308 StringRef Cond = Tok.getString();
2309 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2310 if (CC == AArch64CC::Invalid)
2311 return TokError("invalid condition code");
2312 Parser.Lex(); // Eat identifier token.
2314 if (invertCondCode) {
2315 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2316 return TokError("condition codes AL and NV are invalid for this instruction");
2317 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2321 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2325 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2326 /// them if present.
2327 AArch64AsmParser::OperandMatchResultTy
2328 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2329 MCAsmParser &Parser = getParser();
2330 const AsmToken &Tok = Parser.getTok();
2331 std::string LowerID = Tok.getString().lower();
2332 AArch64_AM::ShiftExtendType ShOp =
2333 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2334 .Case("lsl", AArch64_AM::LSL)
2335 .Case("lsr", AArch64_AM::LSR)
2336 .Case("asr", AArch64_AM::ASR)
2337 .Case("ror", AArch64_AM::ROR)
2338 .Case("msl", AArch64_AM::MSL)
2339 .Case("uxtb", AArch64_AM::UXTB)
2340 .Case("uxth", AArch64_AM::UXTH)
2341 .Case("uxtw", AArch64_AM::UXTW)
2342 .Case("uxtx", AArch64_AM::UXTX)
2343 .Case("sxtb", AArch64_AM::SXTB)
2344 .Case("sxth", AArch64_AM::SXTH)
2345 .Case("sxtw", AArch64_AM::SXTW)
2346 .Case("sxtx", AArch64_AM::SXTX)
2347 .Default(AArch64_AM::InvalidShiftExtend);
2349 if (ShOp == AArch64_AM::InvalidShiftExtend)
2350 return MatchOperand_NoMatch;
2352 SMLoc S = Tok.getLoc();
2355 bool Hash = getLexer().is(AsmToken::Hash);
2356 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2357 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2358 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2359 ShOp == AArch64_AM::MSL) {
2360 // We expect a number here.
2361 TokError("expected #imm after shift specifier");
2362 return MatchOperand_ParseFail;
2365 // "extend" type operatoins don't need an immediate, #0 is implicit.
2366 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2368 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2369 return MatchOperand_Success;
2373 Parser.Lex(); // Eat the '#'.
2375 // Make sure we do actually have a number or a parenthesized expression.
2376 SMLoc E = Parser.getTok().getLoc();
2377 if (!Parser.getTok().is(AsmToken::Integer) &&
2378 !Parser.getTok().is(AsmToken::LParen)) {
2379 Error(E, "expected integer shift amount");
2380 return MatchOperand_ParseFail;
2383 const MCExpr *ImmVal;
2384 if (getParser().parseExpression(ImmVal))
2385 return MatchOperand_ParseFail;
2387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2389 Error(E, "expected constant '#imm' after shift specifier");
2390 return MatchOperand_ParseFail;
2393 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2394 Operands.push_back(AArch64Operand::CreateShiftExtend(
2395 ShOp, MCE->getValue(), true, S, E, getContext()));
2396 return MatchOperand_Success;
2399 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2400 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2401 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2402 OperandVector &Operands) {
2403 if (Name.find('.') != StringRef::npos)
2404 return TokError("invalid operand");
2408 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2410 MCAsmParser &Parser = getParser();
2411 const AsmToken &Tok = Parser.getTok();
2412 StringRef Op = Tok.getString();
2413 SMLoc S = Tok.getLoc();
2415 const MCExpr *Expr = nullptr;
2417 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2419 Expr = MCConstantExpr::create(op1, getContext()); \
2420 Operands.push_back( \
2421 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2422 Operands.push_back( \
2423 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2424 Operands.push_back( \
2425 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2426 Expr = MCConstantExpr::create(op2, getContext()); \
2427 Operands.push_back( \
2428 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2431 if (Mnemonic == "ic") {
2432 if (!Op.compare_lower("ialluis")) {
2433 // SYS #0, C7, C1, #0
2434 SYS_ALIAS(0, 7, 1, 0);
2435 } else if (!Op.compare_lower("iallu")) {
2436 // SYS #0, C7, C5, #0
2437 SYS_ALIAS(0, 7, 5, 0);
2438 } else if (!Op.compare_lower("ivau")) {
2439 // SYS #3, C7, C5, #1
2440 SYS_ALIAS(3, 7, 5, 1);
2442 return TokError("invalid operand for IC instruction");
2444 } else if (Mnemonic == "dc") {
2445 if (!Op.compare_lower("zva")) {
2446 // SYS #3, C7, C4, #1
2447 SYS_ALIAS(3, 7, 4, 1);
2448 } else if (!Op.compare_lower("ivac")) {
2449 // SYS #3, C7, C6, #1
2450 SYS_ALIAS(0, 7, 6, 1);
2451 } else if (!Op.compare_lower("isw")) {
2452 // SYS #0, C7, C6, #2
2453 SYS_ALIAS(0, 7, 6, 2);
2454 } else if (!Op.compare_lower("cvac")) {
2455 // SYS #3, C7, C10, #1
2456 SYS_ALIAS(3, 7, 10, 1);
2457 } else if (!Op.compare_lower("csw")) {
2458 // SYS #0, C7, C10, #2
2459 SYS_ALIAS(0, 7, 10, 2);
2460 } else if (!Op.compare_lower("cvau")) {
2461 // SYS #3, C7, C11, #1
2462 SYS_ALIAS(3, 7, 11, 1);
2463 } else if (!Op.compare_lower("civac")) {
2464 // SYS #3, C7, C14, #1
2465 SYS_ALIAS(3, 7, 14, 1);
2466 } else if (!Op.compare_lower("cisw")) {
2467 // SYS #0, C7, C14, #2
2468 SYS_ALIAS(0, 7, 14, 2);
2469 } else if (!Op.compare_lower("cvap")) {
2470 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2471 // SYS #3, C7, C12, #1
2472 SYS_ALIAS(3, 7, 12, 1);
2474 return TokError("DC CVAP requires ARMv8.2a");
2477 return TokError("invalid operand for DC instruction");
2479 } else if (Mnemonic == "at") {
2480 if (!Op.compare_lower("s1e1r")) {
2481 // SYS #0, C7, C8, #0
2482 SYS_ALIAS(0, 7, 8, 0);
2483 } else if (!Op.compare_lower("s1e2r")) {
2484 // SYS #4, C7, C8, #0
2485 SYS_ALIAS(4, 7, 8, 0);
2486 } else if (!Op.compare_lower("s1e3r")) {
2487 // SYS #6, C7, C8, #0
2488 SYS_ALIAS(6, 7, 8, 0);
2489 } else if (!Op.compare_lower("s1e1w")) {
2490 // SYS #0, C7, C8, #1
2491 SYS_ALIAS(0, 7, 8, 1);
2492 } else if (!Op.compare_lower("s1e2w")) {
2493 // SYS #4, C7, C8, #1
2494 SYS_ALIAS(4, 7, 8, 1);
2495 } else if (!Op.compare_lower("s1e3w")) {
2496 // SYS #6, C7, C8, #1
2497 SYS_ALIAS(6, 7, 8, 1);
2498 } else if (!Op.compare_lower("s1e0r")) {
2499 // SYS #0, C7, C8, #3
2500 SYS_ALIAS(0, 7, 8, 2);
2501 } else if (!Op.compare_lower("s1e0w")) {
2502 // SYS #0, C7, C8, #3
2503 SYS_ALIAS(0, 7, 8, 3);
2504 } else if (!Op.compare_lower("s12e1r")) {
2505 // SYS #4, C7, C8, #4
2506 SYS_ALIAS(4, 7, 8, 4);
2507 } else if (!Op.compare_lower("s12e1w")) {
2508 // SYS #4, C7, C8, #5
2509 SYS_ALIAS(4, 7, 8, 5);
2510 } else if (!Op.compare_lower("s12e0r")) {
2511 // SYS #4, C7, C8, #6
2512 SYS_ALIAS(4, 7, 8, 6);
2513 } else if (!Op.compare_lower("s12e0w")) {
2514 // SYS #4, C7, C8, #7
2515 SYS_ALIAS(4, 7, 8, 7);
2516 } else if (!Op.compare_lower("s1e1rp")) {
2517 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2518 // SYS #0, C7, C9, #0
2519 SYS_ALIAS(0, 7, 9, 0);
2521 return TokError("AT S1E1RP requires ARMv8.2a");
2523 } else if (!Op.compare_lower("s1e1wp")) {
2524 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2525 // SYS #0, C7, C9, #1
2526 SYS_ALIAS(0, 7, 9, 1);
2528 return TokError("AT S1E1WP requires ARMv8.2a");
2531 return TokError("invalid operand for AT instruction");
2533 } else if (Mnemonic == "tlbi") {
2534 if (!Op.compare_lower("vmalle1is")) {
2535 // SYS #0, C8, C3, #0
2536 SYS_ALIAS(0, 8, 3, 0);
2537 } else if (!Op.compare_lower("alle2is")) {
2538 // SYS #4, C8, C3, #0
2539 SYS_ALIAS(4, 8, 3, 0);
2540 } else if (!Op.compare_lower("alle3is")) {
2541 // SYS #6, C8, C3, #0
2542 SYS_ALIAS(6, 8, 3, 0);
2543 } else if (!Op.compare_lower("vae1is")) {
2544 // SYS #0, C8, C3, #1
2545 SYS_ALIAS(0, 8, 3, 1);
2546 } else if (!Op.compare_lower("vae2is")) {
2547 // SYS #4, C8, C3, #1
2548 SYS_ALIAS(4, 8, 3, 1);
2549 } else if (!Op.compare_lower("vae3is")) {
2550 // SYS #6, C8, C3, #1
2551 SYS_ALIAS(6, 8, 3, 1);
2552 } else if (!Op.compare_lower("aside1is")) {
2553 // SYS #0, C8, C3, #2
2554 SYS_ALIAS(0, 8, 3, 2);
2555 } else if (!Op.compare_lower("vaae1is")) {
2556 // SYS #0, C8, C3, #3
2557 SYS_ALIAS(0, 8, 3, 3);
2558 } else if (!Op.compare_lower("alle1is")) {
2559 // SYS #4, C8, C3, #4
2560 SYS_ALIAS(4, 8, 3, 4);
2561 } else if (!Op.compare_lower("vale1is")) {
2562 // SYS #0, C8, C3, #5
2563 SYS_ALIAS(0, 8, 3, 5);
2564 } else if (!Op.compare_lower("vaale1is")) {
2565 // SYS #0, C8, C3, #7
2566 SYS_ALIAS(0, 8, 3, 7);
2567 } else if (!Op.compare_lower("vmalle1")) {
2568 // SYS #0, C8, C7, #0
2569 SYS_ALIAS(0, 8, 7, 0);
2570 } else if (!Op.compare_lower("alle2")) {
2571 // SYS #4, C8, C7, #0
2572 SYS_ALIAS(4, 8, 7, 0);
2573 } else if (!Op.compare_lower("vale2is")) {
2574 // SYS #4, C8, C3, #5
2575 SYS_ALIAS(4, 8, 3, 5);
2576 } else if (!Op.compare_lower("vale3is")) {
2577 // SYS #6, C8, C3, #5
2578 SYS_ALIAS(6, 8, 3, 5);
2579 } else if (!Op.compare_lower("alle3")) {
2580 // SYS #6, C8, C7, #0
2581 SYS_ALIAS(6, 8, 7, 0);
2582 } else if (!Op.compare_lower("vae1")) {
2583 // SYS #0, C8, C7, #1
2584 SYS_ALIAS(0, 8, 7, 1);
2585 } else if (!Op.compare_lower("vae2")) {
2586 // SYS #4, C8, C7, #1
2587 SYS_ALIAS(4, 8, 7, 1);
2588 } else if (!Op.compare_lower("vae3")) {
2589 // SYS #6, C8, C7, #1
2590 SYS_ALIAS(6, 8, 7, 1);
2591 } else if (!Op.compare_lower("aside1")) {
2592 // SYS #0, C8, C7, #2
2593 SYS_ALIAS(0, 8, 7, 2);
2594 } else if (!Op.compare_lower("vaae1")) {
2595 // SYS #0, C8, C7, #3
2596 SYS_ALIAS(0, 8, 7, 3);
2597 } else if (!Op.compare_lower("alle1")) {
2598 // SYS #4, C8, C7, #4
2599 SYS_ALIAS(4, 8, 7, 4);
2600 } else if (!Op.compare_lower("vale1")) {
2601 // SYS #0, C8, C7, #5
2602 SYS_ALIAS(0, 8, 7, 5);
2603 } else if (!Op.compare_lower("vale2")) {
2604 // SYS #4, C8, C7, #5
2605 SYS_ALIAS(4, 8, 7, 5);
2606 } else if (!Op.compare_lower("vale3")) {
2607 // SYS #6, C8, C7, #5
2608 SYS_ALIAS(6, 8, 7, 5);
2609 } else if (!Op.compare_lower("vaale1")) {
2610 // SYS #0, C8, C7, #7
2611 SYS_ALIAS(0, 8, 7, 7);
2612 } else if (!Op.compare_lower("ipas2e1")) {
2613 // SYS #4, C8, C4, #1
2614 SYS_ALIAS(4, 8, 4, 1);
2615 } else if (!Op.compare_lower("ipas2le1")) {
2616 // SYS #4, C8, C4, #5
2617 SYS_ALIAS(4, 8, 4, 5);
2618 } else if (!Op.compare_lower("ipas2e1is")) {
2619 // SYS #4, C8, C4, #1
2620 SYS_ALIAS(4, 8, 0, 1);
2621 } else if (!Op.compare_lower("ipas2le1is")) {
2622 // SYS #4, C8, C4, #5
2623 SYS_ALIAS(4, 8, 0, 5);
2624 } else if (!Op.compare_lower("vmalls12e1")) {
2625 // SYS #4, C8, C7, #6
2626 SYS_ALIAS(4, 8, 7, 6);
2627 } else if (!Op.compare_lower("vmalls12e1is")) {
2628 // SYS #4, C8, C3, #6
2629 SYS_ALIAS(4, 8, 3, 6);
2631 return TokError("invalid operand for TLBI instruction");
2637 Parser.Lex(); // Eat operand.
2639 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2640 bool HasRegister = false;
2642 // Check for the optional register operand.
2643 if (getLexer().is(AsmToken::Comma)) {
2644 Parser.Lex(); // Eat comma.
2646 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2647 return TokError("expected register operand");
2652 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2653 Parser.eatToEndOfStatement();
2654 return TokError("unexpected token in argument list");
2657 if (ExpectRegister && !HasRegister) {
2658 return TokError("specified " + Mnemonic + " op requires a register");
2660 else if (!ExpectRegister && HasRegister) {
2661 return TokError("specified " + Mnemonic + " op does not use a register");
2664 Parser.Lex(); // Consume the EndOfStatement
2668 AArch64AsmParser::OperandMatchResultTy
2669 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2670 MCAsmParser &Parser = getParser();
2671 const AsmToken &Tok = Parser.getTok();
2673 // Can be either a #imm style literal or an option name
2674 bool Hash = Tok.is(AsmToken::Hash);
2675 if (Hash || Tok.is(AsmToken::Integer)) {
2676 // Immediate operand.
2678 Parser.Lex(); // Eat the '#'
2679 const MCExpr *ImmVal;
2680 SMLoc ExprLoc = getLoc();
2681 if (getParser().parseExpression(ImmVal))
2682 return MatchOperand_ParseFail;
2683 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2685 Error(ExprLoc, "immediate value expected for barrier operand");
2686 return MatchOperand_ParseFail;
2688 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2689 Error(ExprLoc, "barrier operand out of range");
2690 return MatchOperand_ParseFail;
2693 auto Mapper = AArch64DB::DBarrierMapper();
2695 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2696 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2697 ExprLoc, getContext()));
2698 return MatchOperand_Success;
2701 if (Tok.isNot(AsmToken::Identifier)) {
2702 TokError("invalid operand for instruction");
2703 return MatchOperand_ParseFail;
2707 auto Mapper = AArch64DB::DBarrierMapper();
2709 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2711 TokError("invalid barrier option name");
2712 return MatchOperand_ParseFail;
2715 // The only valid named option for ISB is 'sy'
2716 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2717 TokError("'sy' or #imm operand expected");
2718 return MatchOperand_ParseFail;
2721 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2722 getLoc(), getContext()));
2723 Parser.Lex(); // Consume the option
2725 return MatchOperand_Success;
2728 AArch64AsmParser::OperandMatchResultTy
2729 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2730 MCAsmParser &Parser = getParser();
2731 const AsmToken &Tok = Parser.getTok();
2733 if (Tok.isNot(AsmToken::Identifier))
2734 return MatchOperand_NoMatch;
2737 auto MRSMapper = AArch64SysReg::MRSMapper();
2738 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2739 getSTI().getFeatureBits(), IsKnown);
2740 assert(IsKnown == (MRSReg != -1U) &&
2741 "register should be -1 if and only if it's unknown");
2743 auto MSRMapper = AArch64SysReg::MSRMapper();
2744 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2745 getSTI().getFeatureBits(), IsKnown);
2746 assert(IsKnown == (MSRReg != -1U) &&
2747 "register should be -1 if and only if it's unknown");
2749 auto PStateMapper = AArch64PState::PStateMapper();
2750 uint32_t PStateField =
2751 PStateMapper.fromString(Tok.getString(),
2752 getSTI().getFeatureBits(), IsKnown);
2753 assert(IsKnown == (PStateField != -1U) &&
2754 "register should be -1 if and only if it's unknown");
2756 Operands.push_back(AArch64Operand::CreateSysReg(
2757 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2758 Parser.Lex(); // Eat identifier
2760 return MatchOperand_Success;
2763 /// tryParseVectorRegister - Parse a vector register operand.
2764 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2765 MCAsmParser &Parser = getParser();
2766 if (Parser.getTok().isNot(AsmToken::Identifier))
2770 // Check for a vector register specifier first.
2772 int64_t Reg = tryMatchVectorRegister(Kind, false);
2776 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2777 // If there was an explicit qualifier, that goes on as a literal text
2781 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2783 // If there is an index specifier following the register, parse that too.
2784 if (Parser.getTok().is(AsmToken::LBrac)) {
2785 SMLoc SIdx = getLoc();
2786 Parser.Lex(); // Eat left bracket token.
2788 const MCExpr *ImmVal;
2789 if (getParser().parseExpression(ImmVal))
2791 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2793 TokError("immediate value expected for vector index");
2798 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2799 Error(E, "']' expected");
2803 Parser.Lex(); // Eat right bracket token.
2805 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2812 /// parseRegister - Parse a non-vector register operand.
2813 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2814 MCAsmParser &Parser = getParser();
2816 // Try for a vector register.
2817 if (!tryParseVectorRegister(Operands))
2820 // Try for a scalar register.
2821 int64_t Reg = tryParseRegister();
2825 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2827 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2828 // as a string token in the instruction itself.
2829 if (getLexer().getKind() == AsmToken::LBrac) {
2830 SMLoc LBracS = getLoc();
2832 const AsmToken &Tok = Parser.getTok();
2833 if (Tok.is(AsmToken::Integer)) {
2834 SMLoc IntS = getLoc();
2835 int64_t Val = Tok.getIntVal();
2838 if (getLexer().getKind() == AsmToken::RBrac) {
2839 SMLoc RBracS = getLoc();
2842 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2844 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2846 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2856 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2857 MCAsmParser &Parser = getParser();
2858 bool HasELFModifier = false;
2859 AArch64MCExpr::VariantKind RefKind;
2861 if (Parser.getTok().is(AsmToken::Colon)) {
2862 Parser.Lex(); // Eat ':"
2863 HasELFModifier = true;
2865 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2866 Error(Parser.getTok().getLoc(),
2867 "expect relocation specifier in operand after ':'");
2871 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2872 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2873 .Case("lo12", AArch64MCExpr::VK_LO12)
2874 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2875 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2876 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2877 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2878 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2879 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2880 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2881 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2882 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2883 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2884 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2885 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2886 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2887 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2888 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2889 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2890 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2891 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2892 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2893 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2894 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2895 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2896 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2897 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2898 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2899 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2900 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2901 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2902 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2903 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2904 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2905 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2906 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2907 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2908 .Default(AArch64MCExpr::VK_INVALID);
2910 if (RefKind == AArch64MCExpr::VK_INVALID) {
2911 Error(Parser.getTok().getLoc(),
2912 "expect relocation specifier in operand after ':'");
2916 Parser.Lex(); // Eat identifier
2918 if (Parser.getTok().isNot(AsmToken::Colon)) {
2919 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2922 Parser.Lex(); // Eat ':'
2925 if (getParser().parseExpression(ImmVal))
2929 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2934 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2935 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2936 MCAsmParser &Parser = getParser();
2937 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2939 Parser.Lex(); // Eat left bracket token.
2941 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2944 int64_t PrevReg = FirstReg;
2947 if (Parser.getTok().is(AsmToken::Minus)) {
2948 Parser.Lex(); // Eat the minus.
2950 SMLoc Loc = getLoc();
2952 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2955 // Any Kind suffices must match on all regs in the list.
2956 if (Kind != NextKind)
2957 return Error(Loc, "mismatched register size suffix");
2959 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2961 if (Space == 0 || Space > 3) {
2962 return Error(Loc, "invalid number of vectors");
2968 while (Parser.getTok().is(AsmToken::Comma)) {
2969 Parser.Lex(); // Eat the comma token.
2971 SMLoc Loc = getLoc();
2973 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2976 // Any Kind suffices must match on all regs in the list.
2977 if (Kind != NextKind)
2978 return Error(Loc, "mismatched register size suffix");
2980 // Registers must be incremental (with wraparound at 31)
2981 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2982 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2983 return Error(Loc, "registers must be sequential");
2990 if (Parser.getTok().isNot(AsmToken::RCurly))
2991 return Error(getLoc(), "'}' expected");
2992 Parser.Lex(); // Eat the '}' token.
2995 return Error(S, "invalid number of vectors");
2997 unsigned NumElements = 0;
2998 char ElementKind = 0;
3000 parseValidVectorKind(Kind, NumElements, ElementKind);
3002 Operands.push_back(AArch64Operand::CreateVectorList(
3003 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3005 // If there is an index specifier following the list, parse that too.
3006 if (Parser.getTok().is(AsmToken::LBrac)) {
3007 SMLoc SIdx = getLoc();
3008 Parser.Lex(); // Eat left bracket token.
3010 const MCExpr *ImmVal;
3011 if (getParser().parseExpression(ImmVal))
3013 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3015 TokError("immediate value expected for vector index");
3020 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3021 Error(E, "']' expected");
3025 Parser.Lex(); // Eat right bracket token.
3027 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3033 AArch64AsmParser::OperandMatchResultTy
3034 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3035 MCAsmParser &Parser = getParser();
3036 const AsmToken &Tok = Parser.getTok();
3037 if (!Tok.is(AsmToken::Identifier))
3038 return MatchOperand_NoMatch;
3040 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3042 MCContext &Ctx = getContext();
3043 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3044 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3045 return MatchOperand_NoMatch;
3048 Parser.Lex(); // Eat register
3050 if (Parser.getTok().isNot(AsmToken::Comma)) {
3052 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3053 return MatchOperand_Success;
3055 Parser.Lex(); // Eat comma.
3057 if (Parser.getTok().is(AsmToken::Hash))
3058 Parser.Lex(); // Eat hash
3060 if (Parser.getTok().isNot(AsmToken::Integer)) {
3061 Error(getLoc(), "index must be absent or #0");
3062 return MatchOperand_ParseFail;
3065 const MCExpr *ImmVal;
3066 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3067 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3068 Error(getLoc(), "index must be absent or #0");
3069 return MatchOperand_ParseFail;
3073 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3074 return MatchOperand_Success;
3077 /// parseOperand - Parse a arm instruction operand. For now this parses the
3078 /// operand regardless of the mnemonic.
3079 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3080 bool invertCondCode) {
3081 MCAsmParser &Parser = getParser();
3082 // Check if the current operand has a custom associated parser, if so, try to
3083 // custom parse the operand, or fallback to the general approach.
3084 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3085 if (ResTy == MatchOperand_Success)
3087 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3088 // there was a match, but an error occurred, in which case, just return that
3089 // the operand parsing failed.
3090 if (ResTy == MatchOperand_ParseFail)
3093 // Nothing custom, so do general case parsing.
3095 switch (getLexer().getKind()) {
3099 if (parseSymbolicImmVal(Expr))
3100 return Error(S, "invalid operand");
3102 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3103 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3106 case AsmToken::LBrac: {
3107 SMLoc Loc = Parser.getTok().getLoc();
3108 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3110 Parser.Lex(); // Eat '['
3112 // There's no comma after a '[', so we can parse the next operand
3114 return parseOperand(Operands, false, false);
3116 case AsmToken::LCurly:
3117 return parseVectorList(Operands);
3118 case AsmToken::Identifier: {
3119 // If we're expecting a Condition Code operand, then just parse that.
3121 return parseCondCode(Operands, invertCondCode);
3123 // If it's a register name, parse it.
3124 if (!parseRegister(Operands))
3127 // This could be an optional "shift" or "extend" operand.
3128 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3129 // We can only continue if no tokens were eaten.
3130 if (GotShift != MatchOperand_NoMatch)
3133 // This was not a register so parse other operands that start with an
3134 // identifier (like labels) as expressions and create them as immediates.
3135 const MCExpr *IdVal;
3137 if (getParser().parseExpression(IdVal))
3140 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3141 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3144 case AsmToken::Integer:
3145 case AsmToken::Real:
3146 case AsmToken::Hash: {
3147 // #42 -> immediate.
3149 if (getLexer().is(AsmToken::Hash))
3152 // Parse a negative sign
3153 bool isNegative = false;
3154 if (Parser.getTok().is(AsmToken::Minus)) {
3156 // We need to consume this token only when we have a Real, otherwise
3157 // we let parseSymbolicImmVal take care of it
3158 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3162 // The only Real that should come through here is a literal #0.0 for
3163 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3164 // so convert the value.
3165 const AsmToken &Tok = Parser.getTok();
3166 if (Tok.is(AsmToken::Real)) {
3167 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3168 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3169 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3170 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3171 Mnemonic != "fcmlt")
3172 return TokError("unexpected floating point literal");
3173 else if (IntVal != 0 || isNegative)
3174 return TokError("expected floating-point constant #0.0");
3175 Parser.Lex(); // Eat the token.
3178 AArch64Operand::CreateToken("#0", false, S, getContext()));
3180 AArch64Operand::CreateToken(".0", false, S, getContext()));
3184 const MCExpr *ImmVal;
3185 if (parseSymbolicImmVal(ImmVal))
3188 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3189 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3192 case AsmToken::Equal: {
3193 SMLoc Loc = Parser.getTok().getLoc();
3194 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3195 return Error(Loc, "unexpected token in operand");
3196 Parser.Lex(); // Eat '='
3197 const MCExpr *SubExprVal;
3198 if (getParser().parseExpression(SubExprVal))
3201 if (Operands.size() < 2 ||
3202 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3203 return Error(Loc, "Only valid when first operand is register");
3206 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3207 Operands[1]->getReg());
3209 MCContext& Ctx = getContext();
3210 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3211 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3212 if (isa<MCConstantExpr>(SubExprVal)) {
3213 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3214 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3215 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3219 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3220 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3221 Operands.push_back(AArch64Operand::CreateImm(
3222 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3224 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3225 ShiftAmt, true, S, E, Ctx));
3228 APInt Simm = APInt(64, Imm << ShiftAmt);
3229 // check if the immediate is an unsigned or signed 32-bit int for W regs
3230 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3231 return Error(Loc, "Immediate too large for register");
3233 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3234 const MCExpr *CPLoc =
3235 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3236 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3242 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3244 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3245 StringRef Name, SMLoc NameLoc,
3246 OperandVector &Operands) {
3247 MCAsmParser &Parser = getParser();
3248 Name = StringSwitch<StringRef>(Name.lower())
3249 .Case("beq", "b.eq")
3250 .Case("bne", "b.ne")
3251 .Case("bhs", "b.hs")
3252 .Case("bcs", "b.cs")
3253 .Case("blo", "b.lo")
3254 .Case("bcc", "b.cc")
3255 .Case("bmi", "b.mi")
3256 .Case("bpl", "b.pl")
3257 .Case("bvs", "b.vs")
3258 .Case("bvc", "b.vc")
3259 .Case("bhi", "b.hi")
3260 .Case("bls", "b.ls")
3261 .Case("bge", "b.ge")
3262 .Case("blt", "b.lt")
3263 .Case("bgt", "b.gt")
3264 .Case("ble", "b.le")
3265 .Case("bal", "b.al")
3266 .Case("bnv", "b.nv")
3269 // First check for the AArch64-specific .req directive.
3270 if (Parser.getTok().is(AsmToken::Identifier) &&
3271 Parser.getTok().getIdentifier() == ".req") {
3272 parseDirectiveReq(Name, NameLoc);
3273 // We always return 'error' for this, as we're done with this
3274 // statement and don't need to match the 'instruction."
3278 // Create the leading tokens for the mnemonic, split by '.' characters.
3279 size_t Start = 0, Next = Name.find('.');
3280 StringRef Head = Name.slice(Start, Next);
3282 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3283 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3284 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3285 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3286 Parser.eatToEndOfStatement();
3291 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3294 // Handle condition codes for a branch mnemonic
3295 if (Head == "b" && Next != StringRef::npos) {
3297 Next = Name.find('.', Start + 1);
3298 Head = Name.slice(Start + 1, Next);
3300 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3301 (Head.data() - Name.data()));
3302 AArch64CC::CondCode CC = parseCondCodeString(Head);
3303 if (CC == AArch64CC::Invalid)
3304 return Error(SuffixLoc, "invalid condition code");
3306 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3308 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3311 // Add the remaining tokens in the mnemonic.
3312 while (Next != StringRef::npos) {
3314 Next = Name.find('.', Start + 1);
3315 Head = Name.slice(Start, Next);
3316 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3317 (Head.data() - Name.data()) + 1);
3319 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3322 // Conditional compare instructions have a Condition Code operand, which needs
3323 // to be parsed and an immediate operand created.
3324 bool condCodeFourthOperand =
3325 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3326 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3327 Head == "csinc" || Head == "csinv" || Head == "csneg");
3329 // These instructions are aliases to some of the conditional select
3330 // instructions. However, the condition code is inverted in the aliased
3333 // FIXME: Is this the correct way to handle these? Or should the parser
3334 // generate the aliased instructions directly?
3335 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3336 bool condCodeThirdOperand =
3337 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3339 // Read the remaining operands.
3340 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3341 // Read the first operand.
3342 if (parseOperand(Operands, false, false)) {
3343 Parser.eatToEndOfStatement();
3348 while (getLexer().is(AsmToken::Comma)) {
3349 Parser.Lex(); // Eat the comma.
3351 // Parse and remember the operand.
3352 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3353 (N == 3 && condCodeThirdOperand) ||
3354 (N == 2 && condCodeSecondOperand),
3355 condCodeSecondOperand || condCodeThirdOperand)) {
3356 Parser.eatToEndOfStatement();
3360 // After successfully parsing some operands there are two special cases to
3361 // consider (i.e. notional operands not separated by commas). Both are due
3362 // to memory specifiers:
3363 // + An RBrac will end an address for load/store/prefetch
3364 // + An '!' will indicate a pre-indexed operation.
3366 // It's someone else's responsibility to make sure these tokens are sane
3367 // in the given context!
3368 if (Parser.getTok().is(AsmToken::RBrac)) {
3369 SMLoc Loc = Parser.getTok().getLoc();
3370 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3375 if (Parser.getTok().is(AsmToken::Exclaim)) {
3376 SMLoc Loc = Parser.getTok().getLoc();
3377 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3386 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3387 SMLoc Loc = Parser.getTok().getLoc();
3388 Parser.eatToEndOfStatement();
3389 return Error(Loc, "unexpected token in argument list");
3392 Parser.Lex(); // Consume the EndOfStatement
3396 // FIXME: This entire function is a giant hack to provide us with decent
3397 // operand range validation/diagnostics until TableGen/MC can be extended
3398 // to support autogeneration of this kind of validation.
3399 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3400 SmallVectorImpl<SMLoc> &Loc) {
3401 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3402 // Check for indexed addressing modes w/ the base register being the
3403 // same as a destination/source register or pair load where
3404 // the Rt == Rt2. All of those are undefined behaviour.
3405 switch (Inst.getOpcode()) {
3406 case AArch64::LDPSWpre:
3407 case AArch64::LDPWpost:
3408 case AArch64::LDPWpre:
3409 case AArch64::LDPXpost:
3410 case AArch64::LDPXpre: {
3411 unsigned Rt = Inst.getOperand(1).getReg();
3412 unsigned Rt2 = Inst.getOperand(2).getReg();
3413 unsigned Rn = Inst.getOperand(3).getReg();
3414 if (RI->isSubRegisterEq(Rn, Rt))
3415 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3416 "is also a destination");
3417 if (RI->isSubRegisterEq(Rn, Rt2))
3418 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3419 "is also a destination");
3422 case AArch64::LDPDi:
3423 case AArch64::LDPQi:
3424 case AArch64::LDPSi:
3425 case AArch64::LDPSWi:
3426 case AArch64::LDPWi:
3427 case AArch64::LDPXi: {
3428 unsigned Rt = Inst.getOperand(0).getReg();
3429 unsigned Rt2 = Inst.getOperand(1).getReg();
3431 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3434 case AArch64::LDPDpost:
3435 case AArch64::LDPDpre:
3436 case AArch64::LDPQpost:
3437 case AArch64::LDPQpre:
3438 case AArch64::LDPSpost:
3439 case AArch64::LDPSpre:
3440 case AArch64::LDPSWpost: {
3441 unsigned Rt = Inst.getOperand(1).getReg();
3442 unsigned Rt2 = Inst.getOperand(2).getReg();
3444 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3447 case AArch64::STPDpost:
3448 case AArch64::STPDpre:
3449 case AArch64::STPQpost:
3450 case AArch64::STPQpre:
3451 case AArch64::STPSpost:
3452 case AArch64::STPSpre:
3453 case AArch64::STPWpost:
3454 case AArch64::STPWpre:
3455 case AArch64::STPXpost:
3456 case AArch64::STPXpre: {
3457 unsigned Rt = Inst.getOperand(1).getReg();
3458 unsigned Rt2 = Inst.getOperand(2).getReg();
3459 unsigned Rn = Inst.getOperand(3).getReg();
3460 if (RI->isSubRegisterEq(Rn, Rt))
3461 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3462 "is also a source");
3463 if (RI->isSubRegisterEq(Rn, Rt2))
3464 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3465 "is also a source");
3468 case AArch64::LDRBBpre:
3469 case AArch64::LDRBpre:
3470 case AArch64::LDRHHpre:
3471 case AArch64::LDRHpre:
3472 case AArch64::LDRSBWpre:
3473 case AArch64::LDRSBXpre:
3474 case AArch64::LDRSHWpre:
3475 case AArch64::LDRSHXpre:
3476 case AArch64::LDRSWpre:
3477 case AArch64::LDRWpre:
3478 case AArch64::LDRXpre:
3479 case AArch64::LDRBBpost:
3480 case AArch64::LDRBpost:
3481 case AArch64::LDRHHpost:
3482 case AArch64::LDRHpost:
3483 case AArch64::LDRSBWpost:
3484 case AArch64::LDRSBXpost:
3485 case AArch64::LDRSHWpost:
3486 case AArch64::LDRSHXpost:
3487 case AArch64::LDRSWpost:
3488 case AArch64::LDRWpost:
3489 case AArch64::LDRXpost: {
3490 unsigned Rt = Inst.getOperand(1).getReg();
3491 unsigned Rn = Inst.getOperand(2).getReg();
3492 if (RI->isSubRegisterEq(Rn, Rt))
3493 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3494 "is also a source");
3497 case AArch64::STRBBpost:
3498 case AArch64::STRBpost:
3499 case AArch64::STRHHpost:
3500 case AArch64::STRHpost:
3501 case AArch64::STRWpost:
3502 case AArch64::STRXpost:
3503 case AArch64::STRBBpre:
3504 case AArch64::STRBpre:
3505 case AArch64::STRHHpre:
3506 case AArch64::STRHpre:
3507 case AArch64::STRWpre:
3508 case AArch64::STRXpre: {
3509 unsigned Rt = Inst.getOperand(1).getReg();
3510 unsigned Rn = Inst.getOperand(2).getReg();
3511 if (RI->isSubRegisterEq(Rn, Rt))
3512 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3513 "is also a source");
3518 // Now check immediate ranges. Separate from the above as there is overlap
3519 // in the instructions being checked and this keeps the nested conditionals
3521 switch (Inst.getOpcode()) {
3522 case AArch64::ADDSWri:
3523 case AArch64::ADDSXri:
3524 case AArch64::ADDWri:
3525 case AArch64::ADDXri:
3526 case AArch64::SUBSWri:
3527 case AArch64::SUBSXri:
3528 case AArch64::SUBWri:
3529 case AArch64::SUBXri: {
3530 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3531 // some slight duplication here.
3532 if (Inst.getOperand(2).isExpr()) {
3533 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3534 AArch64MCExpr::VariantKind ELFRefKind;
3535 MCSymbolRefExpr::VariantKind DarwinRefKind;
3537 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3538 return Error(Loc[2], "invalid immediate expression");
3541 // Only allow these with ADDXri.
3542 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3543 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3544 Inst.getOpcode() == AArch64::ADDXri)
3547 // Only allow these with ADDXri/ADDWri
3548 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3549 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3550 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3551 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3552 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3553 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3554 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3555 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3556 (Inst.getOpcode() == AArch64::ADDXri ||
3557 Inst.getOpcode() == AArch64::ADDWri))
3560 // Don't allow expressions in the immediate field otherwise
3561 return Error(Loc[2], "invalid immediate expression");
3570 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3572 case Match_MissingFeature:
3574 "instruction requires a CPU feature not currently enabled");
3575 case Match_InvalidOperand:
3576 return Error(Loc, "invalid operand for instruction");
3577 case Match_InvalidSuffix:
3578 return Error(Loc, "invalid type suffix for instruction");
3579 case Match_InvalidCondCode:
3580 return Error(Loc, "expected AArch64 condition code");
3581 case Match_AddSubRegExtendSmall:
3583 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3584 case Match_AddSubRegExtendLarge:
3586 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3587 case Match_AddSubSecondSource:
3589 "expected compatible register, symbol or integer in range [0, 4095]");
3590 case Match_LogicalSecondSource:
3591 return Error(Loc, "expected compatible register or logical immediate");
3592 case Match_InvalidMovImm32Shift:
3593 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3594 case Match_InvalidMovImm64Shift:
3595 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3596 case Match_AddSubRegShift32:
3598 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3599 case Match_AddSubRegShift64:
3601 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3602 case Match_InvalidFPImm:
3604 "expected compatible register or floating-point constant");
3605 case Match_InvalidMemoryIndexedSImm9:
3606 return Error(Loc, "index must be an integer in range [-256, 255].");
3607 case Match_InvalidMemoryIndexed4SImm7:
3608 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3609 case Match_InvalidMemoryIndexed8SImm7:
3610 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3611 case Match_InvalidMemoryIndexed16SImm7:
3612 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3613 case Match_InvalidMemoryWExtend8:
3615 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3616 case Match_InvalidMemoryWExtend16:
3618 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3619 case Match_InvalidMemoryWExtend32:
3621 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3622 case Match_InvalidMemoryWExtend64:
3624 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3625 case Match_InvalidMemoryWExtend128:
3627 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3628 case Match_InvalidMemoryXExtend8:
3630 "expected 'lsl' or 'sxtx' with optional shift of #0");
3631 case Match_InvalidMemoryXExtend16:
3633 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3634 case Match_InvalidMemoryXExtend32:
3636 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3637 case Match_InvalidMemoryXExtend64:
3639 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3640 case Match_InvalidMemoryXExtend128:
3642 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3643 case Match_InvalidMemoryIndexed1:
3644 return Error(Loc, "index must be an integer in range [0, 4095].");
3645 case Match_InvalidMemoryIndexed2:
3646 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3647 case Match_InvalidMemoryIndexed4:
3648 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3649 case Match_InvalidMemoryIndexed8:
3650 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3651 case Match_InvalidMemoryIndexed16:
3652 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3653 case Match_InvalidImm0_1:
3654 return Error(Loc, "immediate must be an integer in range [0, 1].");
3655 case Match_InvalidImm0_7:
3656 return Error(Loc, "immediate must be an integer in range [0, 7].");
3657 case Match_InvalidImm0_15:
3658 return Error(Loc, "immediate must be an integer in range [0, 15].");
3659 case Match_InvalidImm0_31:
3660 return Error(Loc, "immediate must be an integer in range [0, 31].");
3661 case Match_InvalidImm0_63:
3662 return Error(Loc, "immediate must be an integer in range [0, 63].");
3663 case Match_InvalidImm0_127:
3664 return Error(Loc, "immediate must be an integer in range [0, 127].");
3665 case Match_InvalidImm0_65535:
3666 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3667 case Match_InvalidImm1_8:
3668 return Error(Loc, "immediate must be an integer in range [1, 8].");
3669 case Match_InvalidImm1_16:
3670 return Error(Loc, "immediate must be an integer in range [1, 16].");
3671 case Match_InvalidImm1_32:
3672 return Error(Loc, "immediate must be an integer in range [1, 32].");
3673 case Match_InvalidImm1_64:
3674 return Error(Loc, "immediate must be an integer in range [1, 64].");
3675 case Match_InvalidIndex1:
3676 return Error(Loc, "expected lane specifier '[1]'");
3677 case Match_InvalidIndexB:
3678 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3679 case Match_InvalidIndexH:
3680 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3681 case Match_InvalidIndexS:
3682 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3683 case Match_InvalidIndexD:
3684 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3685 case Match_InvalidLabel:
3686 return Error(Loc, "expected label or encodable integer pc offset");
3688 return Error(Loc, "expected readable system register");
3690 return Error(Loc, "expected writable system register or pstate");
3691 case Match_MnemonicFail:
3692 return Error(Loc, "unrecognized instruction mnemonic");
3694 llvm_unreachable("unexpected error code!");
3698 static const char *getSubtargetFeatureName(uint64_t Val);
3700 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3701 OperandVector &Operands,
3703 uint64_t &ErrorInfo,
3704 bool MatchingInlineAsm) {
3705 assert(!Operands.empty() && "Unexpect empty operand list!");
3706 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3707 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3709 StringRef Tok = Op.getToken();
3710 unsigned NumOperands = Operands.size();
3712 if (NumOperands == 4 && Tok == "lsl") {
3713 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3714 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3715 if (Op2.isReg() && Op3.isImm()) {
3716 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3718 uint64_t Op3Val = Op3CE->getValue();
3719 uint64_t NewOp3Val = 0;
3720 uint64_t NewOp4Val = 0;
3721 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3723 NewOp3Val = (32 - Op3Val) & 0x1f;
3724 NewOp4Val = 31 - Op3Val;
3726 NewOp3Val = (64 - Op3Val) & 0x3f;
3727 NewOp4Val = 63 - Op3Val;
3730 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3731 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3733 Operands[0] = AArch64Operand::CreateToken(
3734 "ubfm", false, Op.getStartLoc(), getContext());
3735 Operands.push_back(AArch64Operand::CreateImm(
3736 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3737 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3738 Op3.getEndLoc(), getContext());
3741 } else if (NumOperands == 4 && Tok == "bfc") {
3742 // FIXME: Horrible hack to handle BFC->BFM alias.
3743 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3744 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3745 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3747 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3748 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3749 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3751 if (LSBCE && WidthCE) {
3752 uint64_t LSB = LSBCE->getValue();
3753 uint64_t Width = WidthCE->getValue();
3755 uint64_t RegWidth = 0;
3756 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3762 if (LSB >= RegWidth)
3763 return Error(LSBOp.getStartLoc(),
3764 "expected integer in range [0, 31]");
3765 if (Width < 1 || Width > RegWidth)
3766 return Error(WidthOp.getStartLoc(),
3767 "expected integer in range [1, 32]");
3771 ImmR = (32 - LSB) & 0x1f;
3773 ImmR = (64 - LSB) & 0x3f;
3775 uint64_t ImmS = Width - 1;
3777 if (ImmR != 0 && ImmS >= ImmR)
3778 return Error(WidthOp.getStartLoc(),
3779 "requested insert overflows register");
3781 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3782 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3783 Operands[0] = AArch64Operand::CreateToken(
3784 "bfm", false, Op.getStartLoc(), getContext());
3785 Operands[2] = AArch64Operand::CreateReg(
3786 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3787 SMLoc(), getContext());
3788 Operands[3] = AArch64Operand::CreateImm(
3789 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3790 Operands.emplace_back(
3791 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3792 WidthOp.getEndLoc(), getContext()));
3795 } else if (NumOperands == 5) {
3796 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3797 // UBFIZ -> UBFM aliases.
3798 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3799 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3800 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3801 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3803 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3804 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3805 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3807 if (Op3CE && Op4CE) {
3808 uint64_t Op3Val = Op3CE->getValue();
3809 uint64_t Op4Val = Op4CE->getValue();
3811 uint64_t RegWidth = 0;
3812 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3818 if (Op3Val >= RegWidth)
3819 return Error(Op3.getStartLoc(),
3820 "expected integer in range [0, 31]");
3821 if (Op4Val < 1 || Op4Val > RegWidth)
3822 return Error(Op4.getStartLoc(),
3823 "expected integer in range [1, 32]");
3825 uint64_t NewOp3Val = 0;
3827 NewOp3Val = (32 - Op3Val) & 0x1f;
3829 NewOp3Val = (64 - Op3Val) & 0x3f;
3831 uint64_t NewOp4Val = Op4Val - 1;
3833 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3834 return Error(Op4.getStartLoc(),
3835 "requested insert overflows register");
3837 const MCExpr *NewOp3 =
3838 MCConstantExpr::create(NewOp3Val, getContext());
3839 const MCExpr *NewOp4 =
3840 MCConstantExpr::create(NewOp4Val, getContext());
3841 Operands[3] = AArch64Operand::CreateImm(
3842 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3843 Operands[4] = AArch64Operand::CreateImm(
3844 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3846 Operands[0] = AArch64Operand::CreateToken(
3847 "bfm", false, Op.getStartLoc(), getContext());
3848 else if (Tok == "sbfiz")
3849 Operands[0] = AArch64Operand::CreateToken(
3850 "sbfm", false, Op.getStartLoc(), getContext());
3851 else if (Tok == "ubfiz")
3852 Operands[0] = AArch64Operand::CreateToken(
3853 "ubfm", false, Op.getStartLoc(), getContext());
3855 llvm_unreachable("No valid mnemonic for alias?");
3859 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3860 // UBFX -> UBFM aliases.
3861 } else if (NumOperands == 5 &&
3862 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3863 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3864 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3865 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3867 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3868 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3869 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3871 if (Op3CE && Op4CE) {
3872 uint64_t Op3Val = Op3CE->getValue();
3873 uint64_t Op4Val = Op4CE->getValue();
3875 uint64_t RegWidth = 0;
3876 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3882 if (Op3Val >= RegWidth)
3883 return Error(Op3.getStartLoc(),
3884 "expected integer in range [0, 31]");
3885 if (Op4Val < 1 || Op4Val > RegWidth)
3886 return Error(Op4.getStartLoc(),
3887 "expected integer in range [1, 32]");
3889 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3891 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3892 return Error(Op4.getStartLoc(),
3893 "requested extract overflows register");
3895 const MCExpr *NewOp4 =
3896 MCConstantExpr::create(NewOp4Val, getContext());
3897 Operands[4] = AArch64Operand::CreateImm(
3898 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3900 Operands[0] = AArch64Operand::CreateToken(
3901 "bfm", false, Op.getStartLoc(), getContext());
3902 else if (Tok == "sbfx")
3903 Operands[0] = AArch64Operand::CreateToken(
3904 "sbfm", false, Op.getStartLoc(), getContext());
3905 else if (Tok == "ubfx")
3906 Operands[0] = AArch64Operand::CreateToken(
3907 "ubfm", false, Op.getStartLoc(), getContext());
3909 llvm_unreachable("No valid mnemonic for alias?");
3914 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3915 // InstAlias can't quite handle this since the reg classes aren't
3917 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3918 // The source register can be Wn here, but the matcher expects a
3919 // GPR64. Twiddle it here if necessary.
3920 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3922 unsigned Reg = getXRegFromWReg(Op.getReg());
3923 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3924 Op.getEndLoc(), getContext());
3927 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3928 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3929 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3931 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3933 // The source register can be Wn here, but the matcher expects a
3934 // GPR64. Twiddle it here if necessary.
3935 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3937 unsigned Reg = getXRegFromWReg(Op.getReg());
3938 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3939 Op.getEndLoc(), getContext());
3943 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3944 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3945 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3947 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3949 // The source register can be Wn here, but the matcher expects a
3950 // GPR32. Twiddle it here if necessary.
3951 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3953 unsigned Reg = getWRegFromXReg(Op.getReg());
3954 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3955 Op.getEndLoc(), getContext());
3960 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3961 if (NumOperands == 3 && Tok == "fmov") {
3962 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3963 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3964 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3966 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3970 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3971 Op.getEndLoc(), getContext());
3976 // First try to match against the secondary set of tables containing the
3977 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3978 unsigned MatchResult =
3979 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3981 // If that fails, try against the alternate table containing long-form NEON:
3982 // "fadd v0.2s, v1.2s, v2.2s"
3983 if (MatchResult != Match_Success) {
3984 // But first, save the short-form match result: we can use it in case the
3985 // long-form match also fails.
3986 auto ShortFormNEONErrorInfo = ErrorInfo;
3987 auto ShortFormNEONMatchResult = MatchResult;
3990 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3992 // Now, both matches failed, and the long-form match failed on the mnemonic
3993 // suffix token operand. The short-form match failure is probably more
3994 // relevant: use it instead.
3995 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3996 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3997 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3998 MatchResult = ShortFormNEONMatchResult;
3999 ErrorInfo = ShortFormNEONErrorInfo;
4004 switch (MatchResult) {
4005 case Match_Success: {
4006 // Perform range checking and other semantic validations
4007 SmallVector<SMLoc, 8> OperandLocs;
4008 NumOperands = Operands.size();
4009 for (unsigned i = 1; i < NumOperands; ++i)
4010 OperandLocs.push_back(Operands[i]->getStartLoc());
4011 if (validateInstruction(Inst, OperandLocs))
4015 Out.EmitInstruction(Inst, getSTI());
4018 case Match_MissingFeature: {
4019 assert(ErrorInfo && "Unknown missing feature!");
4020 // Special case the error message for the very common case where only
4021 // a single subtarget feature is missing (neon, e.g.).
4022 std::string Msg = "instruction requires:";
4024 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4025 if (ErrorInfo & Mask) {
4027 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4031 return Error(IDLoc, Msg);
4033 case Match_MnemonicFail:
4034 return showMatchError(IDLoc, MatchResult);
4035 case Match_InvalidOperand: {
4036 SMLoc ErrorLoc = IDLoc;
4038 if (ErrorInfo != ~0ULL) {
4039 if (ErrorInfo >= Operands.size())
4040 return Error(IDLoc, "too few operands for instruction");
4042 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4043 if (ErrorLoc == SMLoc())
4046 // If the match failed on a suffix token operand, tweak the diagnostic
4048 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4049 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4050 MatchResult = Match_InvalidSuffix;
4052 return showMatchError(ErrorLoc, MatchResult);
4054 case Match_InvalidMemoryIndexed1:
4055 case Match_InvalidMemoryIndexed2:
4056 case Match_InvalidMemoryIndexed4:
4057 case Match_InvalidMemoryIndexed8:
4058 case Match_InvalidMemoryIndexed16:
4059 case Match_InvalidCondCode:
4060 case Match_AddSubRegExtendSmall:
4061 case Match_AddSubRegExtendLarge:
4062 case Match_AddSubSecondSource:
4063 case Match_LogicalSecondSource:
4064 case Match_AddSubRegShift32:
4065 case Match_AddSubRegShift64:
4066 case Match_InvalidMovImm32Shift:
4067 case Match_InvalidMovImm64Shift:
4068 case Match_InvalidFPImm:
4069 case Match_InvalidMemoryWExtend8:
4070 case Match_InvalidMemoryWExtend16:
4071 case Match_InvalidMemoryWExtend32:
4072 case Match_InvalidMemoryWExtend64:
4073 case Match_InvalidMemoryWExtend128:
4074 case Match_InvalidMemoryXExtend8:
4075 case Match_InvalidMemoryXExtend16:
4076 case Match_InvalidMemoryXExtend32:
4077 case Match_InvalidMemoryXExtend64:
4078 case Match_InvalidMemoryXExtend128:
4079 case Match_InvalidMemoryIndexed4SImm7:
4080 case Match_InvalidMemoryIndexed8SImm7:
4081 case Match_InvalidMemoryIndexed16SImm7:
4082 case Match_InvalidMemoryIndexedSImm9:
4083 case Match_InvalidImm0_1:
4084 case Match_InvalidImm0_7:
4085 case Match_InvalidImm0_15:
4086 case Match_InvalidImm0_31:
4087 case Match_InvalidImm0_63:
4088 case Match_InvalidImm0_127:
4089 case Match_InvalidImm0_65535:
4090 case Match_InvalidImm1_8:
4091 case Match_InvalidImm1_16:
4092 case Match_InvalidImm1_32:
4093 case Match_InvalidImm1_64:
4094 case Match_InvalidIndex1:
4095 case Match_InvalidIndexB:
4096 case Match_InvalidIndexH:
4097 case Match_InvalidIndexS:
4098 case Match_InvalidIndexD:
4099 case Match_InvalidLabel:
4102 if (ErrorInfo >= Operands.size())
4103 return Error(IDLoc, "too few operands for instruction");
4104 // Any time we get here, there's nothing fancy to do. Just get the
4105 // operand SMLoc and display the diagnostic.
4106 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4107 if (ErrorLoc == SMLoc())
4109 return showMatchError(ErrorLoc, MatchResult);
4113 llvm_unreachable("Implement any new match types added!");
4116 /// ParseDirective parses the arm specific directives
4117 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4118 const MCObjectFileInfo::Environment Format =
4119 getContext().getObjectFileInfo()->getObjectFileType();
4120 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4121 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4123 StringRef IDVal = DirectiveID.getIdentifier();
4124 SMLoc Loc = DirectiveID.getLoc();
4125 if (IDVal == ".hword")
4126 return parseDirectiveWord(2, Loc);
4127 if (IDVal == ".word")
4128 return parseDirectiveWord(4, Loc);
4129 if (IDVal == ".xword")
4130 return parseDirectiveWord(8, Loc);
4131 if (IDVal == ".tlsdesccall")
4132 return parseDirectiveTLSDescCall(Loc);
4133 if (IDVal == ".ltorg" || IDVal == ".pool")
4134 return parseDirectiveLtorg(Loc);
4135 if (IDVal == ".unreq")
4136 return parseDirectiveUnreq(Loc);
4138 if (!IsMachO && !IsCOFF) {
4139 if (IDVal == ".inst")
4140 return parseDirectiveInst(Loc);
4143 return parseDirectiveLOH(IDVal, Loc);
4146 /// parseDirectiveWord
4147 /// ::= .word [ expression (, expression)* ]
4148 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4149 MCAsmParser &Parser = getParser();
4150 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4152 const MCExpr *Value;
4153 if (getParser().parseExpression(Value))
4156 getParser().getStreamer().EmitValue(Value, Size, L);
4158 if (getLexer().is(AsmToken::EndOfStatement))
4161 // FIXME: Improve diagnostic.
4162 if (getLexer().isNot(AsmToken::Comma))
4163 return Error(L, "unexpected token in directive");
4172 /// parseDirectiveInst
4173 /// ::= .inst opcode [, ...]
4174 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4175 MCAsmParser &Parser = getParser();
4176 if (getLexer().is(AsmToken::EndOfStatement)) {
4177 Parser.eatToEndOfStatement();
4178 Error(Loc, "expected expression following directive");
4185 if (getParser().parseExpression(Expr)) {
4186 Error(Loc, "expected expression");
4190 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4192 Error(Loc, "expected constant expression");
4196 getTargetStreamer().emitInst(Value->getValue());
4198 if (getLexer().is(AsmToken::EndOfStatement))
4201 if (getLexer().isNot(AsmToken::Comma)) {
4202 Error(Loc, "unexpected token in directive");
4206 Parser.Lex(); // Eat comma.
4213 // parseDirectiveTLSDescCall:
4214 // ::= .tlsdesccall symbol
4215 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4217 if (getParser().parseIdentifier(Name))
4218 return Error(L, "expected symbol after directive");
4220 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4221 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4222 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4225 Inst.setOpcode(AArch64::TLSDESCCALL);
4226 Inst.addOperand(MCOperand::createExpr(Expr));
4228 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4232 /// ::= .loh <lohName | lohId> label1, ..., labelN
4233 /// The number of arguments depends on the loh identifier.
4234 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4235 if (IDVal != MCLOHDirectiveName())
4238 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4239 if (getParser().getTok().isNot(AsmToken::Integer))
4240 return TokError("expected an identifier or a number in directive");
4241 // We successfully get a numeric value for the identifier.
4242 // Check if it is valid.
4243 int64_t Id = getParser().getTok().getIntVal();
4244 if (Id <= -1U && !isValidMCLOHType(Id))
4245 return TokError("invalid numeric identifier in directive");
4246 Kind = (MCLOHType)Id;
4248 StringRef Name = getTok().getIdentifier();
4249 // We successfully parse an identifier.
4250 // Check if it is a recognized one.
4251 int Id = MCLOHNameToId(Name);
4254 return TokError("invalid identifier in directive");
4255 Kind = (MCLOHType)Id;
4257 // Consume the identifier.
4259 // Get the number of arguments of this LOH.
4260 int NbArgs = MCLOHIdToNbArgs(Kind);
4262 assert(NbArgs != -1 && "Invalid number of arguments");
4264 SmallVector<MCSymbol *, 3> Args;
4265 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4267 if (getParser().parseIdentifier(Name))
4268 return TokError("expected identifier in directive");
4269 Args.push_back(getContext().getOrCreateSymbol(Name));
4271 if (Idx + 1 == NbArgs)
4273 if (getLexer().isNot(AsmToken::Comma))
4274 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4277 if (getLexer().isNot(AsmToken::EndOfStatement))
4278 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4280 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4284 /// parseDirectiveLtorg
4285 /// ::= .ltorg | .pool
4286 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4287 getTargetStreamer().emitCurrentConstantPool();
4291 /// parseDirectiveReq
4292 /// ::= name .req registername
4293 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4294 MCAsmParser &Parser = getParser();
4295 Parser.Lex(); // Eat the '.req' token.
4296 SMLoc SRegLoc = getLoc();
4297 unsigned RegNum = tryParseRegister();
4298 bool IsVector = false;
4300 if (RegNum == static_cast<unsigned>(-1)) {
4302 RegNum = tryMatchVectorRegister(Kind, false);
4303 if (!Kind.empty()) {
4304 Error(SRegLoc, "vector register without type specifier expected");
4310 if (RegNum == static_cast<unsigned>(-1)) {
4311 Parser.eatToEndOfStatement();
4312 Error(SRegLoc, "register name or alias expected");
4316 // Shouldn't be anything else.
4317 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4318 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4319 Parser.eatToEndOfStatement();
4323 Parser.Lex(); // Consume the EndOfStatement
4325 auto pair = std::make_pair(IsVector, RegNum);
4326 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4327 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4332 /// parseDirectiveUneq
4333 /// ::= .unreq registername
4334 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4335 MCAsmParser &Parser = getParser();
4336 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4337 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4338 Parser.eatToEndOfStatement();
4341 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4342 Parser.Lex(); // Eat the identifier.
4347 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4348 AArch64MCExpr::VariantKind &ELFRefKind,
4349 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4351 ELFRefKind = AArch64MCExpr::VK_INVALID;
4352 DarwinRefKind = MCSymbolRefExpr::VK_None;
4355 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4356 ELFRefKind = AE->getKind();
4357 Expr = AE->getSubExpr();
4360 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4362 // It's a simple symbol reference with no addend.
4363 DarwinRefKind = SE->getKind();
4367 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4371 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4374 DarwinRefKind = SE->getKind();
4376 if (BE->getOpcode() != MCBinaryExpr::Add &&
4377 BE->getOpcode() != MCBinaryExpr::Sub)
4380 // See if the addend is is a constant, otherwise there's more going
4381 // on here than we can deal with.
4382 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4386 Addend = AddendExpr->getValue();
4387 if (BE->getOpcode() == MCBinaryExpr::Sub)
4390 // It's some symbol reference + a constant addend, but really
4391 // shouldn't use both Darwin and ELF syntax.
4392 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4393 DarwinRefKind == MCSymbolRefExpr::VK_None;
4396 /// Force static initialization.
4397 extern "C" void LLVMInitializeAArch64AsmParser() {
4398 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4399 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4400 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4403 #define GET_REGISTER_MATCHER
4404 #define GET_SUBTARGET_FEATURE_NAME
4405 #define GET_MATCHER_IMPLEMENTATION
4406 #include "AArch64GenAsmMatcher.inc"
4408 // Define this matcher function after the auto-generated include so we
4409 // have the match class enum definitions.
4410 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4412 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4413 // If the kind is a token for a literal immediate, check if our asm
4414 // operand matches. This is for InstAliases which have a fixed-value
4415 // immediate in the syntax.
4416 int64_t ExpectedVal;
4419 return Match_InvalidOperand;
4461 return Match_InvalidOperand;
4462 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4464 return Match_InvalidOperand;
4465 if (CE->getValue() == ExpectedVal)
4466 return Match_Success;
4467 return Match_InvalidOperand;
4471 AArch64AsmParser::OperandMatchResultTy
4472 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4476 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4477 Error(S, "expected register");
4478 return MatchOperand_ParseFail;
4481 int FirstReg = tryParseRegister();
4482 if (FirstReg == -1) {
4483 return MatchOperand_ParseFail;
4485 const MCRegisterClass &WRegClass =
4486 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4487 const MCRegisterClass &XRegClass =
4488 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4490 bool isXReg = XRegClass.contains(FirstReg),
4491 isWReg = WRegClass.contains(FirstReg);
4492 if (!isXReg && !isWReg) {
4493 Error(S, "expected first even register of a "
4494 "consecutive same-size even/odd register pair");
4495 return MatchOperand_ParseFail;
4498 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4499 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4501 if (FirstEncoding & 0x1) {
4502 Error(S, "expected first even register of a "
4503 "consecutive same-size even/odd register pair");
4504 return MatchOperand_ParseFail;
4508 if (getParser().getTok().isNot(AsmToken::Comma)) {
4509 Error(M, "expected comma");
4510 return MatchOperand_ParseFail;
4516 int SecondReg = tryParseRegister();
4517 if (SecondReg ==-1) {
4518 return MatchOperand_ParseFail;
4521 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4522 (isXReg && !XRegClass.contains(SecondReg)) ||
4523 (isWReg && !WRegClass.contains(SecondReg))) {
4524 Error(E,"expected second odd register of a "
4525 "consecutive same-size even/odd register pair");
4526 return MatchOperand_ParseFail;
4531 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4532 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4534 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4535 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4538 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4541 return MatchOperand_Success;