1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
118 const MCInstrInfo &MII, const MCTargetOptions &Options)
119 : MCTargetAsmParser(Options, STI) {
120 MCAsmParserExtension::Initialize(Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
217 uint32_t PStateField;
230 struct ShiftExtendOp {
231 AArch64_AM::ShiftExtendType Type;
233 bool HasExplicitAmount;
243 struct VectorListOp VectorList;
244 struct VectorIndexOp VectorIndex;
246 struct ShiftedImmOp ShiftedImm;
247 struct CondCodeOp CondCode;
248 struct FPImmOp FPImm;
249 struct BarrierOp Barrier;
250 struct SysRegOp SysReg;
251 struct SysCRImmOp SysCRImm;
252 struct PrefetchOp Prefetch;
253 struct ShiftExtendOp ShiftExtend;
256 // Keep the MCContext around as the MCExprs may need manipulated during
257 // the add<>Operands() calls.
261 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265 StartLoc = o.StartLoc;
275 ShiftedImm = o.ShiftedImm;
278 CondCode = o.CondCode;
290 VectorList = o.VectorList;
293 VectorIndex = o.VectorIndex;
299 SysCRImm = o.SysCRImm;
302 Prefetch = o.Prefetch;
305 ShiftExtend = o.ShiftExtend;
310 /// getStartLoc - Get the location of the first token of this operand.
311 SMLoc getStartLoc() const override { return StartLoc; }
312 /// getEndLoc - Get the location of the last token of this operand.
313 SMLoc getEndLoc() const override { return EndLoc; }
315 StringRef getToken() const {
316 assert(Kind == k_Token && "Invalid access!");
317 return StringRef(Tok.Data, Tok.Length);
320 bool isTokenSuffix() const {
321 assert(Kind == k_Token && "Invalid access!");
325 const MCExpr *getImm() const {
326 assert(Kind == k_Immediate && "Invalid access!");
330 const MCExpr *getShiftedImmVal() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.Val;
335 unsigned getShiftedImmShift() const {
336 assert(Kind == k_ShiftedImm && "Invalid access!");
337 return ShiftedImm.ShiftAmount;
340 AArch64CC::CondCode getCondCode() const {
341 assert(Kind == k_CondCode && "Invalid access!");
342 return CondCode.Code;
345 unsigned getFPImm() const {
346 assert(Kind == k_FPImm && "Invalid access!");
350 unsigned getBarrier() const {
351 assert(Kind == k_Barrier && "Invalid access!");
355 StringRef getBarrierName() const {
356 assert(Kind == k_Barrier && "Invalid access!");
357 return StringRef(Barrier.Data, Barrier.Length);
360 unsigned getReg() const override {
361 assert(Kind == k_Register && "Invalid access!");
365 unsigned getVectorListStart() const {
366 assert(Kind == k_VectorList && "Invalid access!");
367 return VectorList.RegNum;
370 unsigned getVectorListCount() const {
371 assert(Kind == k_VectorList && "Invalid access!");
372 return VectorList.Count;
375 unsigned getVectorIndex() const {
376 assert(Kind == k_VectorIndex && "Invalid access!");
377 return VectorIndex.Val;
380 StringRef getSysReg() const {
381 assert(Kind == k_SysReg && "Invalid access!");
382 return StringRef(SysReg.Data, SysReg.Length);
385 unsigned getSysCR() const {
386 assert(Kind == k_SysCR && "Invalid access!");
390 unsigned getPrefetch() const {
391 assert(Kind == k_Prefetch && "Invalid access!");
395 StringRef getPrefetchName() const {
396 assert(Kind == k_Prefetch && "Invalid access!");
397 return StringRef(Prefetch.Data, Prefetch.Length);
400 AArch64_AM::ShiftExtendType getShiftExtendType() const {
401 assert(Kind == k_ShiftExtend && "Invalid access!");
402 return ShiftExtend.Type;
405 unsigned getShiftExtendAmount() const {
406 assert(Kind == k_ShiftExtend && "Invalid access!");
407 return ShiftExtend.Amount;
410 bool hasShiftExtendAmount() const {
411 assert(Kind == k_ShiftExtend && "Invalid access!");
412 return ShiftExtend.HasExplicitAmount;
415 bool isImm() const override { return Kind == k_Immediate; }
416 bool isMem() const override { return false; }
417 bool isSImm9() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val < 256);
426 bool isSImm7s4() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435 bool isSImm7s8() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444 bool isSImm7s16() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
454 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
455 AArch64MCExpr::VariantKind ELFRefKind;
456 MCSymbolRefExpr::VariantKind DarwinRefKind;
458 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460 // If we don't understand the expression, assume the best and
461 // let the fixup and relocation code deal with it.
465 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
466 ELFRefKind == AArch64MCExpr::VK_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
474 // Note that we don't range-check the addend. It's adjusted modulo page
475 // size when converted, so there is no "out of range" condition when using
477 return Addend >= 0 && (Addend % Scale) == 0;
478 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
480 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
487 template <int Scale> bool isUImm12Offset() const {
491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 return isSymbolicUImm12Offset(getImm(), Scale);
495 int64_t Val = MCE->getValue();
496 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
499 bool isImm0_1() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 2);
508 bool isImm0_7() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 8);
517 bool isImm1_8() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 9);
526 bool isImm0_15() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 16);
535 bool isImm1_16() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val > 0 && Val < 17);
544 bool isImm0_31() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 0 && Val < 32);
553 bool isImm1_31() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 1 && Val < 32);
562 bool isImm1_32() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 33);
571 bool isImm0_63() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 0 && Val < 64);
580 bool isImm1_63() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 1 && Val < 64);
589 bool isImm1_64() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 1 && Val < 65);
598 bool isImm0_127() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 128);
607 bool isImm0_255() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 0 && Val < 256);
616 bool isImm0_65535() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 return (Val >= 0 && Val < 65536);
625 bool isImm32_63() const {
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
631 int64_t Val = MCE->getValue();
632 return (Val >= 32 && Val < 64);
634 bool isLogicalImm32() const {
637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640 int64_t Val = MCE->getValue();
641 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
644 return AArch64_AM::isLogicalImmediate(Val, 32);
646 bool isLogicalImm64() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
654 bool isLogicalImm32Not() const {
657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
660 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
661 return AArch64_AM::isLogicalImmediate(Val, 32);
663 bool isLogicalImm64Not() const {
666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
669 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
671 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
672 bool isAddSubImm() const {
673 if (!isShiftedImm() && !isImm())
678 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
679 if (isShiftedImm()) {
680 unsigned Shift = ShiftedImm.ShiftAmount;
681 Expr = ShiftedImm.Val;
682 if (Shift != 0 && Shift != 12)
688 AArch64MCExpr::VariantKind ELFRefKind;
689 MCSymbolRefExpr::VariantKind DarwinRefKind;
691 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
692 DarwinRefKind, Addend)) {
693 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
694 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
695 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
696 || ELFRefKind == AArch64MCExpr::VK_LO12
697 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
700 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
703 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
706 // Otherwise it should be a real immediate in range:
707 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
708 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
710 bool isAddSubImmNeg() const {
711 if (!isShiftedImm() && !isImm())
716 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
717 if (isShiftedImm()) {
718 unsigned Shift = ShiftedImm.ShiftAmount;
719 Expr = ShiftedImm.Val;
720 if (Shift != 0 && Shift != 12)
725 // Otherwise it should be a real negative immediate in range:
726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
727 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
729 bool isCondCode() const { return Kind == k_CondCode; }
730 bool isSIMDImmType10() const {
733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
738 bool isBranchTarget26() const {
741 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
744 int64_t Val = MCE->getValue();
747 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
749 bool isPCRelLabel19() const {
752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
755 int64_t Val = MCE->getValue();
758 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
760 bool isBranchTarget14() const {
763 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
766 int64_t Val = MCE->getValue();
769 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
773 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
777 AArch64MCExpr::VariantKind ELFRefKind;
778 MCSymbolRefExpr::VariantKind DarwinRefKind;
780 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
781 DarwinRefKind, Addend)) {
784 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
787 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
788 if (ELFRefKind == AllowedModifiers[i])
795 bool isMovZSymbolG3() const {
796 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
799 bool isMovZSymbolG2() const {
800 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
801 AArch64MCExpr::VK_TPREL_G2,
802 AArch64MCExpr::VK_DTPREL_G2});
805 bool isMovZSymbolG1() const {
806 return isMovWSymbol({
807 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
808 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
809 AArch64MCExpr::VK_DTPREL_G1,
813 bool isMovZSymbolG0() const {
814 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
815 AArch64MCExpr::VK_TPREL_G0,
816 AArch64MCExpr::VK_DTPREL_G0});
819 bool isMovKSymbolG3() const {
820 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 bool isMovKSymbolG2() const {
824 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
827 bool isMovKSymbolG1() const {
828 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
829 AArch64MCExpr::VK_TPREL_G1_NC,
830 AArch64MCExpr::VK_DTPREL_G1_NC});
833 bool isMovKSymbolG0() const {
835 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
836 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
839 template<int RegWidth, int Shift>
840 bool isMOVZMovAlias() const {
841 if (!isImm()) return false;
843 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
844 if (!CE) return false;
845 uint64_t Value = CE->getValue();
848 Value &= 0xffffffffULL;
850 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
851 if (Value == 0 && Shift != 0)
854 return (Value & ~(0xffffULL << Shift)) == 0;
857 template<int RegWidth, int Shift>
858 bool isMOVNMovAlias() const {
859 if (!isImm()) return false;
861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862 if (!CE) return false;
863 uint64_t Value = CE->getValue();
865 // MOVZ takes precedence over MOVN.
866 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
867 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
872 Value &= 0xffffffffULL;
874 return (Value & ~(0xffffULL << Shift)) == 0;
877 bool isFPImm() const { return Kind == k_FPImm; }
878 bool isBarrier() const { return Kind == k_Barrier; }
879 bool isSysReg() const { return Kind == k_SysReg; }
880 bool isMRSSystemRegister() const {
881 if (!isSysReg()) return false;
883 return SysReg.MRSReg != -1U;
885 bool isMSRSystemRegister() const {
886 if (!isSysReg()) return false;
887 return SysReg.MSRReg != -1U;
889 bool isSystemPStateFieldWithImm0_1() const {
890 if (!isSysReg()) return false;
891 return SysReg.PStateField == AArch64PState::PAN;
893 bool isSystemPStateFieldWithImm0_15() const {
894 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
895 return SysReg.PStateField != -1U;
897 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
898 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
899 bool isVectorRegLo() const {
900 return Kind == k_Register && Reg.isVector &&
901 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
904 bool isGPR32as64() const {
905 return Kind == k_Register && !Reg.isVector &&
906 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
908 bool isWSeqPair() const {
909 return Kind == k_Register && !Reg.isVector &&
910 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
913 bool isXSeqPair() const {
914 return Kind == k_Register && !Reg.isVector &&
915 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
919 bool isGPR64sp0() const {
920 return Kind == k_Register && !Reg.isVector &&
921 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
924 /// Is this a vector list with the type implicit (presumably attached to the
925 /// instruction itself)?
926 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
927 return Kind == k_VectorList && VectorList.Count == NumRegs &&
928 !VectorList.ElementKind;
931 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
932 bool isTypedVectorList() const {
933 if (Kind != k_VectorList)
935 if (VectorList.Count != NumRegs)
937 if (VectorList.ElementKind != ElementKind)
939 return VectorList.NumElements == NumElements;
942 bool isVectorIndex1() const {
943 return Kind == k_VectorIndex && VectorIndex.Val == 1;
945 bool isVectorIndexB() const {
946 return Kind == k_VectorIndex && VectorIndex.Val < 16;
948 bool isVectorIndexH() const {
949 return Kind == k_VectorIndex && VectorIndex.Val < 8;
951 bool isVectorIndexS() const {
952 return Kind == k_VectorIndex && VectorIndex.Val < 4;
954 bool isVectorIndexD() const {
955 return Kind == k_VectorIndex && VectorIndex.Val < 2;
957 bool isToken() const override { return Kind == k_Token; }
958 bool isTokenEqual(StringRef Str) const {
959 return Kind == k_Token && getToken() == Str;
961 bool isSysCR() const { return Kind == k_SysCR; }
962 bool isPrefetch() const { return Kind == k_Prefetch; }
963 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
964 bool isShifter() const {
965 if (!isShiftExtend())
968 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
969 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
970 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
971 ST == AArch64_AM::MSL);
973 bool isExtend() const {
974 if (!isShiftExtend())
977 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
978 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
979 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
980 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
981 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
982 ET == AArch64_AM::LSL) &&
983 getShiftExtendAmount() <= 4;
986 bool isExtend64() const {
989 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
990 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
991 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
993 bool isExtendLSL64() const {
996 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
997 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
998 ET == AArch64_AM::LSL) &&
999 getShiftExtendAmount() <= 4;
1002 template<int Width> bool isMemXExtend() const {
1005 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1006 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1007 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1008 getShiftExtendAmount() == 0);
1011 template<int Width> bool isMemWExtend() const {
1014 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1015 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1016 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1017 getShiftExtendAmount() == 0);
1020 template <unsigned width>
1021 bool isArithmeticShifter() const {
1025 // An arithmetic shifter is LSL, LSR, or ASR.
1026 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1027 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1028 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1031 template <unsigned width>
1032 bool isLogicalShifter() const {
1036 // A logical shifter is LSL, LSR, ASR or ROR.
1037 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1038 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1039 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1040 getShiftExtendAmount() < width;
1043 bool isMovImm32Shifter() const {
1047 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1048 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1049 if (ST != AArch64_AM::LSL)
1051 uint64_t Val = getShiftExtendAmount();
1052 return (Val == 0 || Val == 16);
1055 bool isMovImm64Shifter() const {
1059 // A MOVi shifter is LSL of 0 or 16.
1060 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1061 if (ST != AArch64_AM::LSL)
1063 uint64_t Val = getShiftExtendAmount();
1064 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1067 bool isLogicalVecShifter() const {
1071 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1072 unsigned Shift = getShiftExtendAmount();
1073 return getShiftExtendType() == AArch64_AM::LSL &&
1074 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1077 bool isLogicalVecHalfWordShifter() const {
1078 if (!isLogicalVecShifter())
1081 // A logical vector shifter is a left shift by 0 or 8.
1082 unsigned Shift = getShiftExtendAmount();
1083 return getShiftExtendType() == AArch64_AM::LSL &&
1084 (Shift == 0 || Shift == 8);
1087 bool isMoveVecShifter() const {
1088 if (!isShiftExtend())
1091 // A logical vector shifter is a left shift by 8 or 16.
1092 unsigned Shift = getShiftExtendAmount();
1093 return getShiftExtendType() == AArch64_AM::MSL &&
1094 (Shift == 8 || Shift == 16);
1097 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1098 // to LDUR/STUR when the offset is not legal for the former but is for
1099 // the latter. As such, in addition to checking for being a legal unscaled
1100 // address, also check that it is not a legal scaled address. This avoids
1101 // ambiguity in the matcher.
1103 bool isSImm9OffsetFB() const {
1104 return isSImm9() && !isUImm12Offset<Width / 8>();
1107 bool isAdrpLabel() const {
1108 // Validation was handled during parsing, so we just sanity check that
1109 // something didn't go haywire.
1113 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1114 int64_t Val = CE->getValue();
1115 int64_t Min = - (4096 * (1LL << (21 - 1)));
1116 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1117 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1123 bool isAdrLabel() const {
1124 // Validation was handled during parsing, so we just sanity check that
1125 // something didn't go haywire.
1129 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1130 int64_t Val = CE->getValue();
1131 int64_t Min = - (1LL << (21 - 1));
1132 int64_t Max = ((1LL << (21 - 1)) - 1);
1133 return Val >= Min && Val <= Max;
1139 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1140 // Add as immediates when possible. Null MCExpr = 0.
1142 Inst.addOperand(MCOperand::createImm(0));
1143 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1144 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1146 Inst.addOperand(MCOperand::createExpr(Expr));
1149 void addRegOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 Inst.addOperand(MCOperand::createReg(getReg()));
1154 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1157 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1159 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1160 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1161 RI->getEncodingValue(getReg()));
1163 Inst.addOperand(MCOperand::createReg(Reg));
1166 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1169 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1170 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1173 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1176 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1177 Inst.addOperand(MCOperand::createReg(getReg()));
1180 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::createReg(getReg()));
1185 template <unsigned NumRegs>
1186 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 static const unsigned FirstRegs[] = { AArch64::D0,
1191 AArch64::D0_D1_D2_D3 };
1192 unsigned FirstReg = FirstRegs[NumRegs - 1];
1195 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1198 template <unsigned NumRegs>
1199 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1200 assert(N == 1 && "Invalid number of operands!");
1201 static const unsigned FirstRegs[] = { AArch64::Q0,
1204 AArch64::Q0_Q1_Q2_Q3 };
1205 unsigned FirstReg = FirstRegs[NumRegs - 1];
1208 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1211 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1212 assert(N == 1 && "Invalid number of operands!");
1213 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1216 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1221 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1222 assert(N == 1 && "Invalid number of operands!");
1223 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1226 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1231 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1232 assert(N == 1 && "Invalid number of operands!");
1233 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1236 void addImmOperands(MCInst &Inst, unsigned N) const {
1237 assert(N == 1 && "Invalid number of operands!");
1238 // If this is a pageoff symrefexpr with an addend, adjust the addend
1239 // to be only the page-offset portion. Otherwise, just add the expr
1241 addExpr(Inst, getImm());
1244 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 2 && "Invalid number of operands!");
1246 if (isShiftedImm()) {
1247 addExpr(Inst, getShiftedImmVal());
1248 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1250 addExpr(Inst, getImm());
1251 Inst.addOperand(MCOperand::createImm(0));
1255 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1256 assert(N == 2 && "Invalid number of operands!");
1258 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1259 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1260 int64_t Val = -CE->getValue();
1261 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1263 Inst.addOperand(MCOperand::createImm(Val));
1264 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1267 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 Inst.addOperand(MCOperand::createImm(getCondCode()));
1272 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1276 addExpr(Inst, getImm());
1278 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1281 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1282 addImmOperands(Inst, N);
1286 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1287 assert(N == 1 && "Invalid number of operands!");
1288 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1291 Inst.addOperand(MCOperand::createExpr(getImm()));
1294 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1297 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1300 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1303 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1309 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1312 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1315 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!");
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1321 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1327 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1333 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1339 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1345 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 assert(MCE && "Invalid constant immediate operand!");
1349 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1352 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1358 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1364 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1370 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1371 assert(N == 1 && "Invalid number of operands!");
1372 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1373 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1376 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1382 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1383 assert(N == 1 && "Invalid number of operands!");
1384 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1385 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1388 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1389 assert(N == 1 && "Invalid number of operands!");
1390 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1394 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1400 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1406 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1412 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1413 assert(N == 1 && "Invalid number of operands!");
1414 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1416 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1417 Inst.addOperand(MCOperand::createImm(encoding));
1420 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1422 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1423 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1424 Inst.addOperand(MCOperand::createImm(encoding));
1427 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1428 assert(N == 1 && "Invalid number of operands!");
1429 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1430 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1431 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1432 Inst.addOperand(MCOperand::createImm(encoding));
1435 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1437 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1439 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1440 Inst.addOperand(MCOperand::createImm(encoding));
1443 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1446 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1447 Inst.addOperand(MCOperand::createImm(encoding));
1450 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1451 // Branch operands don't encode the low bits, so shift them off
1452 // here. If it's a label, however, just put it on directly as there's
1453 // not enough information now to do anything.
1454 assert(N == 1 && "Invalid number of operands!");
1455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1457 addExpr(Inst, getImm());
1460 assert(MCE && "Invalid constant immediate operand!");
1461 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1464 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1465 // Branch operands don't encode the low bits, so shift them off
1466 // here. If it's a label, however, just put it on directly as there's
1467 // not enough information now to do anything.
1468 assert(N == 1 && "Invalid number of operands!");
1469 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1471 addExpr(Inst, getImm());
1474 assert(MCE && "Invalid constant immediate operand!");
1475 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1478 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1479 // Branch operands don't encode the low bits, so shift them off
1480 // here. If it's a label, however, just put it on directly as there's
1481 // not enough information now to do anything.
1482 assert(N == 1 && "Invalid number of operands!");
1483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1485 addExpr(Inst, getImm());
1488 assert(MCE && "Invalid constant immediate operand!");
1489 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1492 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1493 assert(N == 1 && "Invalid number of operands!");
1494 Inst.addOperand(MCOperand::createImm(getFPImm()));
1497 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1498 assert(N == 1 && "Invalid number of operands!");
1499 Inst.addOperand(MCOperand::createImm(getBarrier()));
1502 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1503 assert(N == 1 && "Invalid number of operands!");
1505 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1508 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1511 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1514 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1515 assert(N == 1 && "Invalid number of operands!");
1517 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1520 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1526 void addSysCROperands(MCInst &Inst, unsigned N) const {
1527 assert(N == 1 && "Invalid number of operands!");
1528 Inst.addOperand(MCOperand::createImm(getSysCR()));
1531 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && "Invalid number of operands!");
1533 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1536 void addShifterOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && "Invalid number of operands!");
1539 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1540 Inst.addOperand(MCOperand::createImm(Imm));
1543 void addExtendOperands(MCInst &Inst, unsigned N) const {
1544 assert(N == 1 && "Invalid number of operands!");
1545 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1546 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1547 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1548 Inst.addOperand(MCOperand::createImm(Imm));
1551 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1552 assert(N == 1 && "Invalid number of operands!");
1553 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1554 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1555 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1556 Inst.addOperand(MCOperand::createImm(Imm));
1559 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1560 assert(N == 2 && "Invalid number of operands!");
1561 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1562 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1563 Inst.addOperand(MCOperand::createImm(IsSigned));
1564 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1567 // For 8-bit load/store instructions with a register offset, both the
1568 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1569 // they're disambiguated by whether the shift was explicit or implicit rather
1571 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1572 assert(N == 2 && "Invalid number of operands!");
1573 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1574 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1575 Inst.addOperand(MCOperand::createImm(IsSigned));
1576 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1580 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1581 assert(N == 1 && "Invalid number of operands!");
1583 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1584 uint64_t Value = CE->getValue();
1585 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1589 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1590 assert(N == 1 && "Invalid number of operands!");
1592 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1593 uint64_t Value = CE->getValue();
1594 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1597 void print(raw_ostream &OS) const override;
1599 static std::unique_ptr<AArch64Operand>
1600 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1601 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1602 Op->Tok.Data = Str.data();
1603 Op->Tok.Length = Str.size();
1604 Op->Tok.IsSuffix = IsSuffix;
1610 static std::unique_ptr<AArch64Operand>
1611 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1612 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1613 Op->Reg.RegNum = RegNum;
1614 Op->Reg.isVector = isVector;
1620 static std::unique_ptr<AArch64Operand>
1621 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1622 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1623 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1624 Op->VectorList.RegNum = RegNum;
1625 Op->VectorList.Count = Count;
1626 Op->VectorList.NumElements = NumElements;
1627 Op->VectorList.ElementKind = ElementKind;
1633 static std::unique_ptr<AArch64Operand>
1634 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1635 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1636 Op->VectorIndex.Val = Idx;
1642 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1643 SMLoc E, MCContext &Ctx) {
1644 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1651 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1652 unsigned ShiftAmount,
1655 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1656 Op->ShiftedImm .Val = Val;
1657 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1663 static std::unique_ptr<AArch64Operand>
1664 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1665 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1666 Op->CondCode.Code = Code;
1672 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1674 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1675 Op->FPImm.Val = Val;
1681 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1685 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1686 Op->Barrier.Val = Val;
1687 Op->Barrier.Data = Str.data();
1688 Op->Barrier.Length = Str.size();
1694 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1697 uint32_t PStateField,
1699 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1700 Op->SysReg.Data = Str.data();
1701 Op->SysReg.Length = Str.size();
1702 Op->SysReg.MRSReg = MRSReg;
1703 Op->SysReg.MSRReg = MSRReg;
1704 Op->SysReg.PStateField = PStateField;
1710 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1711 SMLoc E, MCContext &Ctx) {
1712 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1713 Op->SysCRImm.Val = Val;
1719 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1723 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1724 Op->Prefetch.Val = Val;
1725 Op->Barrier.Data = Str.data();
1726 Op->Barrier.Length = Str.size();
1732 static std::unique_ptr<AArch64Operand>
1733 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1734 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1735 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1736 Op->ShiftExtend.Type = ShOp;
1737 Op->ShiftExtend.Amount = Val;
1738 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1745 } // end anonymous namespace.
1747 void AArch64Operand::print(raw_ostream &OS) const {
1750 OS << "<fpimm " << getFPImm() << "("
1751 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1754 StringRef Name = getBarrierName();
1756 OS << "<barrier " << Name << ">";
1758 OS << "<barrier invalid #" << getBarrier() << ">";
1764 case k_ShiftedImm: {
1765 unsigned Shift = getShiftedImmShift();
1766 OS << "<shiftedimm ";
1767 OS << *getShiftedImmVal();
1768 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1772 OS << "<condcode " << getCondCode() << ">";
1775 OS << "<register " << getReg() << ">";
1777 case k_VectorList: {
1778 OS << "<vectorlist ";
1779 unsigned Reg = getVectorListStart();
1780 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1781 OS << Reg + i << " ";
1786 OS << "<vectorindex " << getVectorIndex() << ">";
1789 OS << "<sysreg: " << getSysReg() << '>';
1792 OS << "'" << getToken() << "'";
1795 OS << "c" << getSysCR();
1798 StringRef Name = getPrefetchName();
1800 OS << "<prfop " << Name << ">";
1802 OS << "<prfop invalid #" << getPrefetch() << ">";
1805 case k_ShiftExtend: {
1806 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1807 << getShiftExtendAmount();
1808 if (!hasShiftExtendAmount())
1816 /// @name Auto-generated Match Functions
1819 static unsigned MatchRegisterName(StringRef Name);
1823 static unsigned matchVectorRegName(StringRef Name) {
1824 return StringSwitch<unsigned>(Name.lower())
1825 .Case("v0", AArch64::Q0)
1826 .Case("v1", AArch64::Q1)
1827 .Case("v2", AArch64::Q2)
1828 .Case("v3", AArch64::Q3)
1829 .Case("v4", AArch64::Q4)
1830 .Case("v5", AArch64::Q5)
1831 .Case("v6", AArch64::Q6)
1832 .Case("v7", AArch64::Q7)
1833 .Case("v8", AArch64::Q8)
1834 .Case("v9", AArch64::Q9)
1835 .Case("v10", AArch64::Q10)
1836 .Case("v11", AArch64::Q11)
1837 .Case("v12", AArch64::Q12)
1838 .Case("v13", AArch64::Q13)
1839 .Case("v14", AArch64::Q14)
1840 .Case("v15", AArch64::Q15)
1841 .Case("v16", AArch64::Q16)
1842 .Case("v17", AArch64::Q17)
1843 .Case("v18", AArch64::Q18)
1844 .Case("v19", AArch64::Q19)
1845 .Case("v20", AArch64::Q20)
1846 .Case("v21", AArch64::Q21)
1847 .Case("v22", AArch64::Q22)
1848 .Case("v23", AArch64::Q23)
1849 .Case("v24", AArch64::Q24)
1850 .Case("v25", AArch64::Q25)
1851 .Case("v26", AArch64::Q26)
1852 .Case("v27", AArch64::Q27)
1853 .Case("v28", AArch64::Q28)
1854 .Case("v29", AArch64::Q29)
1855 .Case("v30", AArch64::Q30)
1856 .Case("v31", AArch64::Q31)
1860 static bool isValidVectorKind(StringRef Name) {
1861 return StringSwitch<bool>(Name.lower())
1871 // Accept the width neutral ones, too, for verbose syntax. If those
1872 // aren't used in the right places, the token operand won't match so
1873 // all will work out.
1881 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1882 char &ElementKind) {
1883 assert(isValidVectorKind(Name));
1885 ElementKind = Name.lower()[Name.size() - 1];
1888 if (Name.size() == 2)
1891 // Parse the lane count
1892 Name = Name.drop_front();
1893 while (isdigit(Name.front())) {
1894 NumElements = 10 * NumElements + (Name.front() - '0');
1895 Name = Name.drop_front();
1899 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1901 StartLoc = getLoc();
1902 RegNo = tryParseRegister();
1903 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1904 return (RegNo == (unsigned)-1);
1907 // Matches a register name or register alias previously defined by '.req'
1908 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1910 unsigned RegNum = isVector ? matchVectorRegName(Name)
1911 : MatchRegisterName(Name);
1914 // Check for aliases registered via .req. Canonicalize to lower case.
1915 // That's more consistent since register names are case insensitive, and
1916 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1917 auto Entry = RegisterReqs.find(Name.lower());
1918 if (Entry == RegisterReqs.end())
1920 // set RegNum if the match is the right kind of register
1921 if (isVector == Entry->getValue().first)
1922 RegNum = Entry->getValue().second;
1927 /// tryParseRegister - Try to parse a register name. The token must be an
1928 /// Identifier when called, and if it is a register name the token is eaten and
1929 /// the register is added to the operand list.
1930 int AArch64AsmParser::tryParseRegister() {
1931 MCAsmParser &Parser = getParser();
1932 const AsmToken &Tok = Parser.getTok();
1933 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1935 std::string lowerCase = Tok.getString().lower();
1936 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1937 // Also handle a few aliases of registers.
1939 RegNum = StringSwitch<unsigned>(lowerCase)
1940 .Case("fp", AArch64::FP)
1941 .Case("lr", AArch64::LR)
1942 .Case("x31", AArch64::XZR)
1943 .Case("w31", AArch64::WZR)
1949 Parser.Lex(); // Eat identifier token.
1953 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1954 /// kind specifier. If it is a register specifier, eat the token and return it.
1955 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1956 MCAsmParser &Parser = getParser();
1957 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1958 TokError("vector register expected");
1962 StringRef Name = Parser.getTok().getString();
1963 // If there is a kind specifier, it's separated from the register name by
1965 size_t Start = 0, Next = Name.find('.');
1966 StringRef Head = Name.slice(Start, Next);
1967 unsigned RegNum = matchRegisterNameAlias(Head, true);
1970 if (Next != StringRef::npos) {
1971 Kind = Name.slice(Next, StringRef::npos);
1972 if (!isValidVectorKind(Kind)) {
1973 TokError("invalid vector kind qualifier");
1977 Parser.Lex(); // Eat the register token.
1982 TokError("vector register expected");
1986 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1987 AArch64AsmParser::OperandMatchResultTy
1988 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1989 MCAsmParser &Parser = getParser();
1992 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1993 Error(S, "Expected cN operand where 0 <= N <= 15");
1994 return MatchOperand_ParseFail;
1997 StringRef Tok = Parser.getTok().getIdentifier();
1998 if (Tok[0] != 'c' && Tok[0] != 'C') {
1999 Error(S, "Expected cN operand where 0 <= N <= 15");
2000 return MatchOperand_ParseFail;
2004 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2005 if (BadNum || CRNum > 15) {
2006 Error(S, "Expected cN operand where 0 <= N <= 15");
2007 return MatchOperand_ParseFail;
2010 Parser.Lex(); // Eat identifier token.
2012 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2013 return MatchOperand_Success;
2016 /// tryParsePrefetch - Try to parse a prefetch operand.
2017 AArch64AsmParser::OperandMatchResultTy
2018 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2019 MCAsmParser &Parser = getParser();
2021 const AsmToken &Tok = Parser.getTok();
2022 // Either an identifier for named values or a 5-bit immediate.
2023 bool Hash = Tok.is(AsmToken::Hash);
2024 if (Hash || Tok.is(AsmToken::Integer)) {
2026 Parser.Lex(); // Eat hash token.
2027 const MCExpr *ImmVal;
2028 if (getParser().parseExpression(ImmVal))
2029 return MatchOperand_ParseFail;
2031 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2033 TokError("immediate value expected for prefetch operand");
2034 return MatchOperand_ParseFail;
2036 unsigned prfop = MCE->getValue();
2038 TokError("prefetch operand out of range, [0,31] expected");
2039 return MatchOperand_ParseFail;
2043 auto Mapper = AArch64PRFM::PRFMMapper();
2045 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2046 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2048 return MatchOperand_Success;
2051 if (Tok.isNot(AsmToken::Identifier)) {
2052 TokError("pre-fetch hint expected");
2053 return MatchOperand_ParseFail;
2057 auto Mapper = AArch64PRFM::PRFMMapper();
2059 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2061 TokError("pre-fetch hint expected");
2062 return MatchOperand_ParseFail;
2065 Parser.Lex(); // Eat identifier token.
2066 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2068 return MatchOperand_Success;
2071 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2073 AArch64AsmParser::OperandMatchResultTy
2074 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2075 MCAsmParser &Parser = getParser();
2079 if (Parser.getTok().is(AsmToken::Hash)) {
2080 Parser.Lex(); // Eat hash token.
2083 if (parseSymbolicImmVal(Expr))
2084 return MatchOperand_ParseFail;
2086 AArch64MCExpr::VariantKind ELFRefKind;
2087 MCSymbolRefExpr::VariantKind DarwinRefKind;
2089 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2090 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2091 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2092 // No modifier was specified at all; this is the syntax for an ELF basic
2093 // ADRP relocation (unfortunately).
2095 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2096 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2097 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2099 Error(S, "gotpage label reference not allowed an addend");
2100 return MatchOperand_ParseFail;
2101 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2102 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2103 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2104 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2105 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2106 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2107 // The operand must be an @page or @gotpage qualified symbolref.
2108 Error(S, "page or gotpage label reference expected");
2109 return MatchOperand_ParseFail;
2113 // We have either a label reference possibly with addend or an immediate. The
2114 // addend is a raw value here. The linker will adjust it to only reference the
2116 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2117 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2119 return MatchOperand_Success;
2122 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2124 AArch64AsmParser::OperandMatchResultTy
2125 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2126 MCAsmParser &Parser = getParser();
2130 if (Parser.getTok().is(AsmToken::Hash)) {
2131 Parser.Lex(); // Eat hash token.
2134 if (getParser().parseExpression(Expr))
2135 return MatchOperand_ParseFail;
2137 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2138 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2140 return MatchOperand_Success;
2143 /// tryParseFPImm - A floating point immediate expression operand.
2144 AArch64AsmParser::OperandMatchResultTy
2145 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2146 MCAsmParser &Parser = getParser();
2150 if (Parser.getTok().is(AsmToken::Hash)) {
2151 Parser.Lex(); // Eat '#'
2155 // Handle negation, as that still comes through as a separate token.
2156 bool isNegative = false;
2157 if (Parser.getTok().is(AsmToken::Minus)) {
2161 const AsmToken &Tok = Parser.getTok();
2162 if (Tok.is(AsmToken::Real)) {
2163 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2165 RealVal.changeSign();
2167 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2168 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2169 Parser.Lex(); // Eat the token.
2170 // Check for out of range values. As an exception, we let Zero through,
2171 // as we handle that special case in post-processing before matching in
2172 // order to use the zero register for it.
2173 if (Val == -1 && !RealVal.isPosZero()) {
2174 TokError("expected compatible register or floating-point constant");
2175 return MatchOperand_ParseFail;
2177 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2178 return MatchOperand_Success;
2180 if (Tok.is(AsmToken::Integer)) {
2182 if (!isNegative && Tok.getString().startswith("0x")) {
2183 Val = Tok.getIntVal();
2184 if (Val > 255 || Val < 0) {
2185 TokError("encoded floating point value out of range");
2186 return MatchOperand_ParseFail;
2189 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2190 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2191 // If we had a '-' in front, toggle the sign bit.
2192 IntVal ^= (uint64_t)isNegative << 63;
2193 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2195 Parser.Lex(); // Eat the token.
2196 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2197 return MatchOperand_Success;
2201 return MatchOperand_NoMatch;
2203 TokError("invalid floating point immediate");
2204 return MatchOperand_ParseFail;
2207 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2208 AArch64AsmParser::OperandMatchResultTy
2209 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2210 MCAsmParser &Parser = getParser();
2213 if (Parser.getTok().is(AsmToken::Hash))
2214 Parser.Lex(); // Eat '#'
2215 else if (Parser.getTok().isNot(AsmToken::Integer))
2216 // Operand should start from # or should be integer, emit error otherwise.
2217 return MatchOperand_NoMatch;
2220 if (parseSymbolicImmVal(Imm))
2221 return MatchOperand_ParseFail;
2222 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2223 uint64_t ShiftAmount = 0;
2224 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2226 int64_t Val = MCE->getValue();
2227 if (Val > 0xfff && (Val & 0xfff) == 0) {
2228 Imm = MCConstantExpr::create(Val >> 12, getContext());
2232 SMLoc E = Parser.getTok().getLoc();
2233 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2235 return MatchOperand_Success;
2241 // The optional operand must be "lsl #N" where N is non-negative.
2242 if (!Parser.getTok().is(AsmToken::Identifier) ||
2243 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2244 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2245 return MatchOperand_ParseFail;
2251 if (Parser.getTok().is(AsmToken::Hash)) {
2255 if (Parser.getTok().isNot(AsmToken::Integer)) {
2256 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2257 return MatchOperand_ParseFail;
2260 int64_t ShiftAmount = Parser.getTok().getIntVal();
2262 if (ShiftAmount < 0) {
2263 Error(Parser.getTok().getLoc(), "positive shift amount required");
2264 return MatchOperand_ParseFail;
2266 Parser.Lex(); // Eat the number
2268 SMLoc E = Parser.getTok().getLoc();
2269 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2270 S, E, getContext()));
2271 return MatchOperand_Success;
2274 /// parseCondCodeString - Parse a Condition Code string.
2275 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2276 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2277 .Case("eq", AArch64CC::EQ)
2278 .Case("ne", AArch64CC::NE)
2279 .Case("cs", AArch64CC::HS)
2280 .Case("hs", AArch64CC::HS)
2281 .Case("cc", AArch64CC::LO)
2282 .Case("lo", AArch64CC::LO)
2283 .Case("mi", AArch64CC::MI)
2284 .Case("pl", AArch64CC::PL)
2285 .Case("vs", AArch64CC::VS)
2286 .Case("vc", AArch64CC::VC)
2287 .Case("hi", AArch64CC::HI)
2288 .Case("ls", AArch64CC::LS)
2289 .Case("ge", AArch64CC::GE)
2290 .Case("lt", AArch64CC::LT)
2291 .Case("gt", AArch64CC::GT)
2292 .Case("le", AArch64CC::LE)
2293 .Case("al", AArch64CC::AL)
2294 .Case("nv", AArch64CC::NV)
2295 .Default(AArch64CC::Invalid);
2299 /// parseCondCode - Parse a Condition Code operand.
2300 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2301 bool invertCondCode) {
2302 MCAsmParser &Parser = getParser();
2304 const AsmToken &Tok = Parser.getTok();
2305 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2307 StringRef Cond = Tok.getString();
2308 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2309 if (CC == AArch64CC::Invalid)
2310 return TokError("invalid condition code");
2311 Parser.Lex(); // Eat identifier token.
2313 if (invertCondCode) {
2314 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2315 return TokError("condition codes AL and NV are invalid for this instruction");
2316 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2320 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2324 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2325 /// them if present.
2326 AArch64AsmParser::OperandMatchResultTy
2327 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2328 MCAsmParser &Parser = getParser();
2329 const AsmToken &Tok = Parser.getTok();
2330 std::string LowerID = Tok.getString().lower();
2331 AArch64_AM::ShiftExtendType ShOp =
2332 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2333 .Case("lsl", AArch64_AM::LSL)
2334 .Case("lsr", AArch64_AM::LSR)
2335 .Case("asr", AArch64_AM::ASR)
2336 .Case("ror", AArch64_AM::ROR)
2337 .Case("msl", AArch64_AM::MSL)
2338 .Case("uxtb", AArch64_AM::UXTB)
2339 .Case("uxth", AArch64_AM::UXTH)
2340 .Case("uxtw", AArch64_AM::UXTW)
2341 .Case("uxtx", AArch64_AM::UXTX)
2342 .Case("sxtb", AArch64_AM::SXTB)
2343 .Case("sxth", AArch64_AM::SXTH)
2344 .Case("sxtw", AArch64_AM::SXTW)
2345 .Case("sxtx", AArch64_AM::SXTX)
2346 .Default(AArch64_AM::InvalidShiftExtend);
2348 if (ShOp == AArch64_AM::InvalidShiftExtend)
2349 return MatchOperand_NoMatch;
2351 SMLoc S = Tok.getLoc();
2354 bool Hash = getLexer().is(AsmToken::Hash);
2355 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2356 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2357 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2358 ShOp == AArch64_AM::MSL) {
2359 // We expect a number here.
2360 TokError("expected #imm after shift specifier");
2361 return MatchOperand_ParseFail;
2364 // "extend" type operatoins don't need an immediate, #0 is implicit.
2365 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2367 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2368 return MatchOperand_Success;
2372 Parser.Lex(); // Eat the '#'.
2374 // Make sure we do actually have a number or a parenthesized expression.
2375 SMLoc E = Parser.getTok().getLoc();
2376 if (!Parser.getTok().is(AsmToken::Integer) &&
2377 !Parser.getTok().is(AsmToken::LParen)) {
2378 Error(E, "expected integer shift amount");
2379 return MatchOperand_ParseFail;
2382 const MCExpr *ImmVal;
2383 if (getParser().parseExpression(ImmVal))
2384 return MatchOperand_ParseFail;
2386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2388 Error(E, "expected constant '#imm' after shift specifier");
2389 return MatchOperand_ParseFail;
2392 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2393 Operands.push_back(AArch64Operand::CreateShiftExtend(
2394 ShOp, MCE->getValue(), true, S, E, getContext()));
2395 return MatchOperand_Success;
2398 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2399 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2400 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2401 OperandVector &Operands) {
2402 if (Name.find('.') != StringRef::npos)
2403 return TokError("invalid operand");
2407 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2409 MCAsmParser &Parser = getParser();
2410 const AsmToken &Tok = Parser.getTok();
2411 StringRef Op = Tok.getString();
2412 SMLoc S = Tok.getLoc();
2414 const MCExpr *Expr = nullptr;
2416 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2418 Expr = MCConstantExpr::create(op1, getContext()); \
2419 Operands.push_back( \
2420 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2421 Operands.push_back( \
2422 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2423 Operands.push_back( \
2424 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2425 Expr = MCConstantExpr::create(op2, getContext()); \
2426 Operands.push_back( \
2427 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2430 if (Mnemonic == "ic") {
2431 if (!Op.compare_lower("ialluis")) {
2432 // SYS #0, C7, C1, #0
2433 SYS_ALIAS(0, 7, 1, 0);
2434 } else if (!Op.compare_lower("iallu")) {
2435 // SYS #0, C7, C5, #0
2436 SYS_ALIAS(0, 7, 5, 0);
2437 } else if (!Op.compare_lower("ivau")) {
2438 // SYS #3, C7, C5, #1
2439 SYS_ALIAS(3, 7, 5, 1);
2441 return TokError("invalid operand for IC instruction");
2443 } else if (Mnemonic == "dc") {
2444 if (!Op.compare_lower("zva")) {
2445 // SYS #3, C7, C4, #1
2446 SYS_ALIAS(3, 7, 4, 1);
2447 } else if (!Op.compare_lower("ivac")) {
2448 // SYS #3, C7, C6, #1
2449 SYS_ALIAS(0, 7, 6, 1);
2450 } else if (!Op.compare_lower("isw")) {
2451 // SYS #0, C7, C6, #2
2452 SYS_ALIAS(0, 7, 6, 2);
2453 } else if (!Op.compare_lower("cvac")) {
2454 // SYS #3, C7, C10, #1
2455 SYS_ALIAS(3, 7, 10, 1);
2456 } else if (!Op.compare_lower("csw")) {
2457 // SYS #0, C7, C10, #2
2458 SYS_ALIAS(0, 7, 10, 2);
2459 } else if (!Op.compare_lower("cvau")) {
2460 // SYS #3, C7, C11, #1
2461 SYS_ALIAS(3, 7, 11, 1);
2462 } else if (!Op.compare_lower("civac")) {
2463 // SYS #3, C7, C14, #1
2464 SYS_ALIAS(3, 7, 14, 1);
2465 } else if (!Op.compare_lower("cisw")) {
2466 // SYS #0, C7, C14, #2
2467 SYS_ALIAS(0, 7, 14, 2);
2469 return TokError("invalid operand for DC instruction");
2471 } else if (Mnemonic == "at") {
2472 if (!Op.compare_lower("s1e1r")) {
2473 // SYS #0, C7, C8, #0
2474 SYS_ALIAS(0, 7, 8, 0);
2475 } else if (!Op.compare_lower("s1e2r")) {
2476 // SYS #4, C7, C8, #0
2477 SYS_ALIAS(4, 7, 8, 0);
2478 } else if (!Op.compare_lower("s1e3r")) {
2479 // SYS #6, C7, C8, #0
2480 SYS_ALIAS(6, 7, 8, 0);
2481 } else if (!Op.compare_lower("s1e1w")) {
2482 // SYS #0, C7, C8, #1
2483 SYS_ALIAS(0, 7, 8, 1);
2484 } else if (!Op.compare_lower("s1e2w")) {
2485 // SYS #4, C7, C8, #1
2486 SYS_ALIAS(4, 7, 8, 1);
2487 } else if (!Op.compare_lower("s1e3w")) {
2488 // SYS #6, C7, C8, #1
2489 SYS_ALIAS(6, 7, 8, 1);
2490 } else if (!Op.compare_lower("s1e0r")) {
2491 // SYS #0, C7, C8, #3
2492 SYS_ALIAS(0, 7, 8, 2);
2493 } else if (!Op.compare_lower("s1e0w")) {
2494 // SYS #0, C7, C8, #3
2495 SYS_ALIAS(0, 7, 8, 3);
2496 } else if (!Op.compare_lower("s12e1r")) {
2497 // SYS #4, C7, C8, #4
2498 SYS_ALIAS(4, 7, 8, 4);
2499 } else if (!Op.compare_lower("s12e1w")) {
2500 // SYS #4, C7, C8, #5
2501 SYS_ALIAS(4, 7, 8, 5);
2502 } else if (!Op.compare_lower("s12e0r")) {
2503 // SYS #4, C7, C8, #6
2504 SYS_ALIAS(4, 7, 8, 6);
2505 } else if (!Op.compare_lower("s12e0w")) {
2506 // SYS #4, C7, C8, #7
2507 SYS_ALIAS(4, 7, 8, 7);
2509 return TokError("invalid operand for AT instruction");
2511 } else if (Mnemonic == "tlbi") {
2512 if (!Op.compare_lower("vmalle1is")) {
2513 // SYS #0, C8, C3, #0
2514 SYS_ALIAS(0, 8, 3, 0);
2515 } else if (!Op.compare_lower("alle2is")) {
2516 // SYS #4, C8, C3, #0
2517 SYS_ALIAS(4, 8, 3, 0);
2518 } else if (!Op.compare_lower("alle3is")) {
2519 // SYS #6, C8, C3, #0
2520 SYS_ALIAS(6, 8, 3, 0);
2521 } else if (!Op.compare_lower("vae1is")) {
2522 // SYS #0, C8, C3, #1
2523 SYS_ALIAS(0, 8, 3, 1);
2524 } else if (!Op.compare_lower("vae2is")) {
2525 // SYS #4, C8, C3, #1
2526 SYS_ALIAS(4, 8, 3, 1);
2527 } else if (!Op.compare_lower("vae3is")) {
2528 // SYS #6, C8, C3, #1
2529 SYS_ALIAS(6, 8, 3, 1);
2530 } else if (!Op.compare_lower("aside1is")) {
2531 // SYS #0, C8, C3, #2
2532 SYS_ALIAS(0, 8, 3, 2);
2533 } else if (!Op.compare_lower("vaae1is")) {
2534 // SYS #0, C8, C3, #3
2535 SYS_ALIAS(0, 8, 3, 3);
2536 } else if (!Op.compare_lower("alle1is")) {
2537 // SYS #4, C8, C3, #4
2538 SYS_ALIAS(4, 8, 3, 4);
2539 } else if (!Op.compare_lower("vale1is")) {
2540 // SYS #0, C8, C3, #5
2541 SYS_ALIAS(0, 8, 3, 5);
2542 } else if (!Op.compare_lower("vaale1is")) {
2543 // SYS #0, C8, C3, #7
2544 SYS_ALIAS(0, 8, 3, 7);
2545 } else if (!Op.compare_lower("vmalle1")) {
2546 // SYS #0, C8, C7, #0
2547 SYS_ALIAS(0, 8, 7, 0);
2548 } else if (!Op.compare_lower("alle2")) {
2549 // SYS #4, C8, C7, #0
2550 SYS_ALIAS(4, 8, 7, 0);
2551 } else if (!Op.compare_lower("vale2is")) {
2552 // SYS #4, C8, C3, #5
2553 SYS_ALIAS(4, 8, 3, 5);
2554 } else if (!Op.compare_lower("vale3is")) {
2555 // SYS #6, C8, C3, #5
2556 SYS_ALIAS(6, 8, 3, 5);
2557 } else if (!Op.compare_lower("alle3")) {
2558 // SYS #6, C8, C7, #0
2559 SYS_ALIAS(6, 8, 7, 0);
2560 } else if (!Op.compare_lower("vae1")) {
2561 // SYS #0, C8, C7, #1
2562 SYS_ALIAS(0, 8, 7, 1);
2563 } else if (!Op.compare_lower("vae2")) {
2564 // SYS #4, C8, C7, #1
2565 SYS_ALIAS(4, 8, 7, 1);
2566 } else if (!Op.compare_lower("vae3")) {
2567 // SYS #6, C8, C7, #1
2568 SYS_ALIAS(6, 8, 7, 1);
2569 } else if (!Op.compare_lower("aside1")) {
2570 // SYS #0, C8, C7, #2
2571 SYS_ALIAS(0, 8, 7, 2);
2572 } else if (!Op.compare_lower("vaae1")) {
2573 // SYS #0, C8, C7, #3
2574 SYS_ALIAS(0, 8, 7, 3);
2575 } else if (!Op.compare_lower("alle1")) {
2576 // SYS #4, C8, C7, #4
2577 SYS_ALIAS(4, 8, 7, 4);
2578 } else if (!Op.compare_lower("vale1")) {
2579 // SYS #0, C8, C7, #5
2580 SYS_ALIAS(0, 8, 7, 5);
2581 } else if (!Op.compare_lower("vale2")) {
2582 // SYS #4, C8, C7, #5
2583 SYS_ALIAS(4, 8, 7, 5);
2584 } else if (!Op.compare_lower("vale3")) {
2585 // SYS #6, C8, C7, #5
2586 SYS_ALIAS(6, 8, 7, 5);
2587 } else if (!Op.compare_lower("vaale1")) {
2588 // SYS #0, C8, C7, #7
2589 SYS_ALIAS(0, 8, 7, 7);
2590 } else if (!Op.compare_lower("ipas2e1")) {
2591 // SYS #4, C8, C4, #1
2592 SYS_ALIAS(4, 8, 4, 1);
2593 } else if (!Op.compare_lower("ipas2le1")) {
2594 // SYS #4, C8, C4, #5
2595 SYS_ALIAS(4, 8, 4, 5);
2596 } else if (!Op.compare_lower("ipas2e1is")) {
2597 // SYS #4, C8, C4, #1
2598 SYS_ALIAS(4, 8, 0, 1);
2599 } else if (!Op.compare_lower("ipas2le1is")) {
2600 // SYS #4, C8, C4, #5
2601 SYS_ALIAS(4, 8, 0, 5);
2602 } else if (!Op.compare_lower("vmalls12e1")) {
2603 // SYS #4, C8, C7, #6
2604 SYS_ALIAS(4, 8, 7, 6);
2605 } else if (!Op.compare_lower("vmalls12e1is")) {
2606 // SYS #4, C8, C3, #6
2607 SYS_ALIAS(4, 8, 3, 6);
2609 return TokError("invalid operand for TLBI instruction");
2615 Parser.Lex(); // Eat operand.
2617 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2618 bool HasRegister = false;
2620 // Check for the optional register operand.
2621 if (getLexer().is(AsmToken::Comma)) {
2622 Parser.Lex(); // Eat comma.
2624 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2625 return TokError("expected register operand");
2630 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2631 Parser.eatToEndOfStatement();
2632 return TokError("unexpected token in argument list");
2635 if (ExpectRegister && !HasRegister) {
2636 return TokError("specified " + Mnemonic + " op requires a register");
2638 else if (!ExpectRegister && HasRegister) {
2639 return TokError("specified " + Mnemonic + " op does not use a register");
2642 Parser.Lex(); // Consume the EndOfStatement
2646 AArch64AsmParser::OperandMatchResultTy
2647 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2648 MCAsmParser &Parser = getParser();
2649 const AsmToken &Tok = Parser.getTok();
2651 // Can be either a #imm style literal or an option name
2652 bool Hash = Tok.is(AsmToken::Hash);
2653 if (Hash || Tok.is(AsmToken::Integer)) {
2654 // Immediate operand.
2656 Parser.Lex(); // Eat the '#'
2657 const MCExpr *ImmVal;
2658 SMLoc ExprLoc = getLoc();
2659 if (getParser().parseExpression(ImmVal))
2660 return MatchOperand_ParseFail;
2661 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2663 Error(ExprLoc, "immediate value expected for barrier operand");
2664 return MatchOperand_ParseFail;
2666 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2667 Error(ExprLoc, "barrier operand out of range");
2668 return MatchOperand_ParseFail;
2671 auto Mapper = AArch64DB::DBarrierMapper();
2673 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2674 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2675 ExprLoc, getContext()));
2676 return MatchOperand_Success;
2679 if (Tok.isNot(AsmToken::Identifier)) {
2680 TokError("invalid operand for instruction");
2681 return MatchOperand_ParseFail;
2685 auto Mapper = AArch64DB::DBarrierMapper();
2687 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2689 TokError("invalid barrier option name");
2690 return MatchOperand_ParseFail;
2693 // The only valid named option for ISB is 'sy'
2694 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2695 TokError("'sy' or #imm operand expected");
2696 return MatchOperand_ParseFail;
2699 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2700 getLoc(), getContext()));
2701 Parser.Lex(); // Consume the option
2703 return MatchOperand_Success;
2706 AArch64AsmParser::OperandMatchResultTy
2707 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2708 MCAsmParser &Parser = getParser();
2709 const AsmToken &Tok = Parser.getTok();
2711 if (Tok.isNot(AsmToken::Identifier))
2712 return MatchOperand_NoMatch;
2715 auto MRSMapper = AArch64SysReg::MRSMapper();
2716 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2717 getSTI().getFeatureBits(), IsKnown);
2718 assert(IsKnown == (MRSReg != -1U) &&
2719 "register should be -1 if and only if it's unknown");
2721 auto MSRMapper = AArch64SysReg::MSRMapper();
2722 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2723 getSTI().getFeatureBits(), IsKnown);
2724 assert(IsKnown == (MSRReg != -1U) &&
2725 "register should be -1 if and only if it's unknown");
2727 auto PStateMapper = AArch64PState::PStateMapper();
2728 uint32_t PStateField =
2729 PStateMapper.fromString(Tok.getString(),
2730 getSTI().getFeatureBits(), IsKnown);
2731 assert(IsKnown == (PStateField != -1U) &&
2732 "register should be -1 if and only if it's unknown");
2734 Operands.push_back(AArch64Operand::CreateSysReg(
2735 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2736 Parser.Lex(); // Eat identifier
2738 return MatchOperand_Success;
2741 /// tryParseVectorRegister - Parse a vector register operand.
2742 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2743 MCAsmParser &Parser = getParser();
2744 if (Parser.getTok().isNot(AsmToken::Identifier))
2748 // Check for a vector register specifier first.
2750 int64_t Reg = tryMatchVectorRegister(Kind, false);
2754 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2755 // If there was an explicit qualifier, that goes on as a literal text
2759 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2761 // If there is an index specifier following the register, parse that too.
2762 if (Parser.getTok().is(AsmToken::LBrac)) {
2763 SMLoc SIdx = getLoc();
2764 Parser.Lex(); // Eat left bracket token.
2766 const MCExpr *ImmVal;
2767 if (getParser().parseExpression(ImmVal))
2769 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2771 TokError("immediate value expected for vector index");
2776 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2777 Error(E, "']' expected");
2781 Parser.Lex(); // Eat right bracket token.
2783 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2790 /// parseRegister - Parse a non-vector register operand.
2791 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2792 MCAsmParser &Parser = getParser();
2794 // Try for a vector register.
2795 if (!tryParseVectorRegister(Operands))
2798 // Try for a scalar register.
2799 int64_t Reg = tryParseRegister();
2803 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2805 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2806 // as a string token in the instruction itself.
2807 if (getLexer().getKind() == AsmToken::LBrac) {
2808 SMLoc LBracS = getLoc();
2810 const AsmToken &Tok = Parser.getTok();
2811 if (Tok.is(AsmToken::Integer)) {
2812 SMLoc IntS = getLoc();
2813 int64_t Val = Tok.getIntVal();
2816 if (getLexer().getKind() == AsmToken::RBrac) {
2817 SMLoc RBracS = getLoc();
2820 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2822 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2824 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2834 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2835 MCAsmParser &Parser = getParser();
2836 bool HasELFModifier = false;
2837 AArch64MCExpr::VariantKind RefKind;
2839 if (Parser.getTok().is(AsmToken::Colon)) {
2840 Parser.Lex(); // Eat ':"
2841 HasELFModifier = true;
2843 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2844 Error(Parser.getTok().getLoc(),
2845 "expect relocation specifier in operand after ':'");
2849 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2850 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2851 .Case("lo12", AArch64MCExpr::VK_LO12)
2852 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2853 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2854 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2855 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2856 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2857 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2858 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2859 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2860 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2861 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2862 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2863 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2864 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2865 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2866 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2867 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2868 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2869 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2870 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2871 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2872 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2873 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2874 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2875 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2876 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2877 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2878 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2879 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2880 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2881 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2882 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2883 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2884 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2885 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2886 .Default(AArch64MCExpr::VK_INVALID);
2888 if (RefKind == AArch64MCExpr::VK_INVALID) {
2889 Error(Parser.getTok().getLoc(),
2890 "expect relocation specifier in operand after ':'");
2894 Parser.Lex(); // Eat identifier
2896 if (Parser.getTok().isNot(AsmToken::Colon)) {
2897 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2900 Parser.Lex(); // Eat ':'
2903 if (getParser().parseExpression(ImmVal))
2907 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2912 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2913 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2914 MCAsmParser &Parser = getParser();
2915 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2917 Parser.Lex(); // Eat left bracket token.
2919 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2922 int64_t PrevReg = FirstReg;
2925 if (Parser.getTok().is(AsmToken::Minus)) {
2926 Parser.Lex(); // Eat the minus.
2928 SMLoc Loc = getLoc();
2930 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2933 // Any Kind suffices must match on all regs in the list.
2934 if (Kind != NextKind)
2935 return Error(Loc, "mismatched register size suffix");
2937 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2939 if (Space == 0 || Space > 3) {
2940 return Error(Loc, "invalid number of vectors");
2946 while (Parser.getTok().is(AsmToken::Comma)) {
2947 Parser.Lex(); // Eat the comma token.
2949 SMLoc Loc = getLoc();
2951 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2954 // Any Kind suffices must match on all regs in the list.
2955 if (Kind != NextKind)
2956 return Error(Loc, "mismatched register size suffix");
2958 // Registers must be incremental (with wraparound at 31)
2959 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2960 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2961 return Error(Loc, "registers must be sequential");
2968 if (Parser.getTok().isNot(AsmToken::RCurly))
2969 return Error(getLoc(), "'}' expected");
2970 Parser.Lex(); // Eat the '}' token.
2973 return Error(S, "invalid number of vectors");
2975 unsigned NumElements = 0;
2976 char ElementKind = 0;
2978 parseValidVectorKind(Kind, NumElements, ElementKind);
2980 Operands.push_back(AArch64Operand::CreateVectorList(
2981 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2983 // If there is an index specifier following the list, parse that too.
2984 if (Parser.getTok().is(AsmToken::LBrac)) {
2985 SMLoc SIdx = getLoc();
2986 Parser.Lex(); // Eat left bracket token.
2988 const MCExpr *ImmVal;
2989 if (getParser().parseExpression(ImmVal))
2991 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2993 TokError("immediate value expected for vector index");
2998 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2999 Error(E, "']' expected");
3003 Parser.Lex(); // Eat right bracket token.
3005 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3011 AArch64AsmParser::OperandMatchResultTy
3012 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3013 MCAsmParser &Parser = getParser();
3014 const AsmToken &Tok = Parser.getTok();
3015 if (!Tok.is(AsmToken::Identifier))
3016 return MatchOperand_NoMatch;
3018 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3020 MCContext &Ctx = getContext();
3021 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3022 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3023 return MatchOperand_NoMatch;
3026 Parser.Lex(); // Eat register
3028 if (Parser.getTok().isNot(AsmToken::Comma)) {
3030 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3031 return MatchOperand_Success;
3033 Parser.Lex(); // Eat comma.
3035 if (Parser.getTok().is(AsmToken::Hash))
3036 Parser.Lex(); // Eat hash
3038 if (Parser.getTok().isNot(AsmToken::Integer)) {
3039 Error(getLoc(), "index must be absent or #0");
3040 return MatchOperand_ParseFail;
3043 const MCExpr *ImmVal;
3044 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3045 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3046 Error(getLoc(), "index must be absent or #0");
3047 return MatchOperand_ParseFail;
3051 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3052 return MatchOperand_Success;
3055 /// parseOperand - Parse a arm instruction operand. For now this parses the
3056 /// operand regardless of the mnemonic.
3057 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3058 bool invertCondCode) {
3059 MCAsmParser &Parser = getParser();
3060 // Check if the current operand has a custom associated parser, if so, try to
3061 // custom parse the operand, or fallback to the general approach.
3062 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3063 if (ResTy == MatchOperand_Success)
3065 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3066 // there was a match, but an error occurred, in which case, just return that
3067 // the operand parsing failed.
3068 if (ResTy == MatchOperand_ParseFail)
3071 // Nothing custom, so do general case parsing.
3073 switch (getLexer().getKind()) {
3077 if (parseSymbolicImmVal(Expr))
3078 return Error(S, "invalid operand");
3080 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3081 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3084 case AsmToken::LBrac: {
3085 SMLoc Loc = Parser.getTok().getLoc();
3086 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3088 Parser.Lex(); // Eat '['
3090 // There's no comma after a '[', so we can parse the next operand
3092 return parseOperand(Operands, false, false);
3094 case AsmToken::LCurly:
3095 return parseVectorList(Operands);
3096 case AsmToken::Identifier: {
3097 // If we're expecting a Condition Code operand, then just parse that.
3099 return parseCondCode(Operands, invertCondCode);
3101 // If it's a register name, parse it.
3102 if (!parseRegister(Operands))
3105 // This could be an optional "shift" or "extend" operand.
3106 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3107 // We can only continue if no tokens were eaten.
3108 if (GotShift != MatchOperand_NoMatch)
3111 // This was not a register so parse other operands that start with an
3112 // identifier (like labels) as expressions and create them as immediates.
3113 const MCExpr *IdVal;
3115 if (getParser().parseExpression(IdVal))
3118 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3119 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3122 case AsmToken::Integer:
3123 case AsmToken::Real:
3124 case AsmToken::Hash: {
3125 // #42 -> immediate.
3127 if (getLexer().is(AsmToken::Hash))
3130 // Parse a negative sign
3131 bool isNegative = false;
3132 if (Parser.getTok().is(AsmToken::Minus)) {
3134 // We need to consume this token only when we have a Real, otherwise
3135 // we let parseSymbolicImmVal take care of it
3136 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3140 // The only Real that should come through here is a literal #0.0 for
3141 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3142 // so convert the value.
3143 const AsmToken &Tok = Parser.getTok();
3144 if (Tok.is(AsmToken::Real)) {
3145 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3146 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3147 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3148 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3149 Mnemonic != "fcmlt")
3150 return TokError("unexpected floating point literal");
3151 else if (IntVal != 0 || isNegative)
3152 return TokError("expected floating-point constant #0.0");
3153 Parser.Lex(); // Eat the token.
3156 AArch64Operand::CreateToken("#0", false, S, getContext()));
3158 AArch64Operand::CreateToken(".0", false, S, getContext()));
3162 const MCExpr *ImmVal;
3163 if (parseSymbolicImmVal(ImmVal))
3166 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3167 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3170 case AsmToken::Equal: {
3171 SMLoc Loc = Parser.getTok().getLoc();
3172 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3173 return Error(Loc, "unexpected token in operand");
3174 Parser.Lex(); // Eat '='
3175 const MCExpr *SubExprVal;
3176 if (getParser().parseExpression(SubExprVal))
3179 if (Operands.size() < 2 ||
3180 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3181 return Error(Loc, "Only valid when first operand is register");
3184 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3185 Operands[1]->getReg());
3187 MCContext& Ctx = getContext();
3188 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3189 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3190 if (isa<MCConstantExpr>(SubExprVal)) {
3191 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3192 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3193 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3197 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3198 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3199 Operands.push_back(AArch64Operand::CreateImm(
3200 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3202 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3203 ShiftAmt, true, S, E, Ctx));
3206 APInt Simm = APInt(64, Imm << ShiftAmt);
3207 // check if the immediate is an unsigned or signed 32-bit int for W regs
3208 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3209 return Error(Loc, "Immediate too large for register");
3211 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3212 const MCExpr *CPLoc =
3213 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3214 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3220 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3222 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3223 StringRef Name, SMLoc NameLoc,
3224 OperandVector &Operands) {
3225 MCAsmParser &Parser = getParser();
3226 Name = StringSwitch<StringRef>(Name.lower())
3227 .Case("beq", "b.eq")
3228 .Case("bne", "b.ne")
3229 .Case("bhs", "b.hs")
3230 .Case("bcs", "b.cs")
3231 .Case("blo", "b.lo")
3232 .Case("bcc", "b.cc")
3233 .Case("bmi", "b.mi")
3234 .Case("bpl", "b.pl")
3235 .Case("bvs", "b.vs")
3236 .Case("bvc", "b.vc")
3237 .Case("bhi", "b.hi")
3238 .Case("bls", "b.ls")
3239 .Case("bge", "b.ge")
3240 .Case("blt", "b.lt")
3241 .Case("bgt", "b.gt")
3242 .Case("ble", "b.le")
3243 .Case("bal", "b.al")
3244 .Case("bnv", "b.nv")
3247 // First check for the AArch64-specific .req directive.
3248 if (Parser.getTok().is(AsmToken::Identifier) &&
3249 Parser.getTok().getIdentifier() == ".req") {
3250 parseDirectiveReq(Name, NameLoc);
3251 // We always return 'error' for this, as we're done with this
3252 // statement and don't need to match the 'instruction."
3256 // Create the leading tokens for the mnemonic, split by '.' characters.
3257 size_t Start = 0, Next = Name.find('.');
3258 StringRef Head = Name.slice(Start, Next);
3260 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3261 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3262 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3263 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3264 Parser.eatToEndOfStatement();
3269 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3272 // Handle condition codes for a branch mnemonic
3273 if (Head == "b" && Next != StringRef::npos) {
3275 Next = Name.find('.', Start + 1);
3276 Head = Name.slice(Start + 1, Next);
3278 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3279 (Head.data() - Name.data()));
3280 AArch64CC::CondCode CC = parseCondCodeString(Head);
3281 if (CC == AArch64CC::Invalid)
3282 return Error(SuffixLoc, "invalid condition code");
3284 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3286 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3289 // Add the remaining tokens in the mnemonic.
3290 while (Next != StringRef::npos) {
3292 Next = Name.find('.', Start + 1);
3293 Head = Name.slice(Start, Next);
3294 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3295 (Head.data() - Name.data()) + 1);
3297 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3300 // Conditional compare instructions have a Condition Code operand, which needs
3301 // to be parsed and an immediate operand created.
3302 bool condCodeFourthOperand =
3303 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3304 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3305 Head == "csinc" || Head == "csinv" || Head == "csneg");
3307 // These instructions are aliases to some of the conditional select
3308 // instructions. However, the condition code is inverted in the aliased
3311 // FIXME: Is this the correct way to handle these? Or should the parser
3312 // generate the aliased instructions directly?
3313 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3314 bool condCodeThirdOperand =
3315 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3317 // Read the remaining operands.
3318 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3319 // Read the first operand.
3320 if (parseOperand(Operands, false, false)) {
3321 Parser.eatToEndOfStatement();
3326 while (getLexer().is(AsmToken::Comma)) {
3327 Parser.Lex(); // Eat the comma.
3329 // Parse and remember the operand.
3330 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3331 (N == 3 && condCodeThirdOperand) ||
3332 (N == 2 && condCodeSecondOperand),
3333 condCodeSecondOperand || condCodeThirdOperand)) {
3334 Parser.eatToEndOfStatement();
3338 // After successfully parsing some operands there are two special cases to
3339 // consider (i.e. notional operands not separated by commas). Both are due
3340 // to memory specifiers:
3341 // + An RBrac will end an address for load/store/prefetch
3342 // + An '!' will indicate a pre-indexed operation.
3344 // It's someone else's responsibility to make sure these tokens are sane
3345 // in the given context!
3346 if (Parser.getTok().is(AsmToken::RBrac)) {
3347 SMLoc Loc = Parser.getTok().getLoc();
3348 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3353 if (Parser.getTok().is(AsmToken::Exclaim)) {
3354 SMLoc Loc = Parser.getTok().getLoc();
3355 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3364 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3365 SMLoc Loc = Parser.getTok().getLoc();
3366 Parser.eatToEndOfStatement();
3367 return Error(Loc, "unexpected token in argument list");
3370 Parser.Lex(); // Consume the EndOfStatement
3374 // FIXME: This entire function is a giant hack to provide us with decent
3375 // operand range validation/diagnostics until TableGen/MC can be extended
3376 // to support autogeneration of this kind of validation.
3377 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3378 SmallVectorImpl<SMLoc> &Loc) {
3379 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3380 // Check for indexed addressing modes w/ the base register being the
3381 // same as a destination/source register or pair load where
3382 // the Rt == Rt2. All of those are undefined behaviour.
3383 switch (Inst.getOpcode()) {
3384 case AArch64::LDPSWpre:
3385 case AArch64::LDPWpost:
3386 case AArch64::LDPWpre:
3387 case AArch64::LDPXpost:
3388 case AArch64::LDPXpre: {
3389 unsigned Rt = Inst.getOperand(1).getReg();
3390 unsigned Rt2 = Inst.getOperand(2).getReg();
3391 unsigned Rn = Inst.getOperand(3).getReg();
3392 if (RI->isSubRegisterEq(Rn, Rt))
3393 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3394 "is also a destination");
3395 if (RI->isSubRegisterEq(Rn, Rt2))
3396 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3397 "is also a destination");
3400 case AArch64::LDPDi:
3401 case AArch64::LDPQi:
3402 case AArch64::LDPSi:
3403 case AArch64::LDPSWi:
3404 case AArch64::LDPWi:
3405 case AArch64::LDPXi: {
3406 unsigned Rt = Inst.getOperand(0).getReg();
3407 unsigned Rt2 = Inst.getOperand(1).getReg();
3409 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3412 case AArch64::LDPDpost:
3413 case AArch64::LDPDpre:
3414 case AArch64::LDPQpost:
3415 case AArch64::LDPQpre:
3416 case AArch64::LDPSpost:
3417 case AArch64::LDPSpre:
3418 case AArch64::LDPSWpost: {
3419 unsigned Rt = Inst.getOperand(1).getReg();
3420 unsigned Rt2 = Inst.getOperand(2).getReg();
3422 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3425 case AArch64::STPDpost:
3426 case AArch64::STPDpre:
3427 case AArch64::STPQpost:
3428 case AArch64::STPQpre:
3429 case AArch64::STPSpost:
3430 case AArch64::STPSpre:
3431 case AArch64::STPWpost:
3432 case AArch64::STPWpre:
3433 case AArch64::STPXpost:
3434 case AArch64::STPXpre: {
3435 unsigned Rt = Inst.getOperand(1).getReg();
3436 unsigned Rt2 = Inst.getOperand(2).getReg();
3437 unsigned Rn = Inst.getOperand(3).getReg();
3438 if (RI->isSubRegisterEq(Rn, Rt))
3439 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3440 "is also a source");
3441 if (RI->isSubRegisterEq(Rn, Rt2))
3442 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3443 "is also a source");
3446 case AArch64::LDRBBpre:
3447 case AArch64::LDRBpre:
3448 case AArch64::LDRHHpre:
3449 case AArch64::LDRHpre:
3450 case AArch64::LDRSBWpre:
3451 case AArch64::LDRSBXpre:
3452 case AArch64::LDRSHWpre:
3453 case AArch64::LDRSHXpre:
3454 case AArch64::LDRSWpre:
3455 case AArch64::LDRWpre:
3456 case AArch64::LDRXpre:
3457 case AArch64::LDRBBpost:
3458 case AArch64::LDRBpost:
3459 case AArch64::LDRHHpost:
3460 case AArch64::LDRHpost:
3461 case AArch64::LDRSBWpost:
3462 case AArch64::LDRSBXpost:
3463 case AArch64::LDRSHWpost:
3464 case AArch64::LDRSHXpost:
3465 case AArch64::LDRSWpost:
3466 case AArch64::LDRWpost:
3467 case AArch64::LDRXpost: {
3468 unsigned Rt = Inst.getOperand(1).getReg();
3469 unsigned Rn = Inst.getOperand(2).getReg();
3470 if (RI->isSubRegisterEq(Rn, Rt))
3471 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3472 "is also a source");
3475 case AArch64::STRBBpost:
3476 case AArch64::STRBpost:
3477 case AArch64::STRHHpost:
3478 case AArch64::STRHpost:
3479 case AArch64::STRWpost:
3480 case AArch64::STRXpost:
3481 case AArch64::STRBBpre:
3482 case AArch64::STRBpre:
3483 case AArch64::STRHHpre:
3484 case AArch64::STRHpre:
3485 case AArch64::STRWpre:
3486 case AArch64::STRXpre: {
3487 unsigned Rt = Inst.getOperand(1).getReg();
3488 unsigned Rn = Inst.getOperand(2).getReg();
3489 if (RI->isSubRegisterEq(Rn, Rt))
3490 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3491 "is also a source");
3496 // Now check immediate ranges. Separate from the above as there is overlap
3497 // in the instructions being checked and this keeps the nested conditionals
3499 switch (Inst.getOpcode()) {
3500 case AArch64::ADDSWri:
3501 case AArch64::ADDSXri:
3502 case AArch64::ADDWri:
3503 case AArch64::ADDXri:
3504 case AArch64::SUBSWri:
3505 case AArch64::SUBSXri:
3506 case AArch64::SUBWri:
3507 case AArch64::SUBXri: {
3508 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3509 // some slight duplication here.
3510 if (Inst.getOperand(2).isExpr()) {
3511 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3512 AArch64MCExpr::VariantKind ELFRefKind;
3513 MCSymbolRefExpr::VariantKind DarwinRefKind;
3515 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3516 return Error(Loc[2], "invalid immediate expression");
3519 // Only allow these with ADDXri.
3520 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3521 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3522 Inst.getOpcode() == AArch64::ADDXri)
3525 // Only allow these with ADDXri/ADDWri
3526 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3527 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3528 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3529 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3530 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3531 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3532 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3533 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3534 (Inst.getOpcode() == AArch64::ADDXri ||
3535 Inst.getOpcode() == AArch64::ADDWri))
3538 // Don't allow expressions in the immediate field otherwise
3539 return Error(Loc[2], "invalid immediate expression");
3548 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3550 case Match_MissingFeature:
3552 "instruction requires a CPU feature not currently enabled");
3553 case Match_InvalidOperand:
3554 return Error(Loc, "invalid operand for instruction");
3555 case Match_InvalidSuffix:
3556 return Error(Loc, "invalid type suffix for instruction");
3557 case Match_InvalidCondCode:
3558 return Error(Loc, "expected AArch64 condition code");
3559 case Match_AddSubRegExtendSmall:
3561 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3562 case Match_AddSubRegExtendLarge:
3564 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3565 case Match_AddSubSecondSource:
3567 "expected compatible register, symbol or integer in range [0, 4095]");
3568 case Match_LogicalSecondSource:
3569 return Error(Loc, "expected compatible register or logical immediate");
3570 case Match_InvalidMovImm32Shift:
3571 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3572 case Match_InvalidMovImm64Shift:
3573 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3574 case Match_AddSubRegShift32:
3576 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3577 case Match_AddSubRegShift64:
3579 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3580 case Match_InvalidFPImm:
3582 "expected compatible register or floating-point constant");
3583 case Match_InvalidMemoryIndexedSImm9:
3584 return Error(Loc, "index must be an integer in range [-256, 255].");
3585 case Match_InvalidMemoryIndexed4SImm7:
3586 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3587 case Match_InvalidMemoryIndexed8SImm7:
3588 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3589 case Match_InvalidMemoryIndexed16SImm7:
3590 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3591 case Match_InvalidMemoryWExtend8:
3593 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3594 case Match_InvalidMemoryWExtend16:
3596 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3597 case Match_InvalidMemoryWExtend32:
3599 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3600 case Match_InvalidMemoryWExtend64:
3602 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3603 case Match_InvalidMemoryWExtend128:
3605 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3606 case Match_InvalidMemoryXExtend8:
3608 "expected 'lsl' or 'sxtx' with optional shift of #0");
3609 case Match_InvalidMemoryXExtend16:
3611 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3612 case Match_InvalidMemoryXExtend32:
3614 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3615 case Match_InvalidMemoryXExtend64:
3617 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3618 case Match_InvalidMemoryXExtend128:
3620 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3621 case Match_InvalidMemoryIndexed1:
3622 return Error(Loc, "index must be an integer in range [0, 4095].");
3623 case Match_InvalidMemoryIndexed2:
3624 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3625 case Match_InvalidMemoryIndexed4:
3626 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3627 case Match_InvalidMemoryIndexed8:
3628 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3629 case Match_InvalidMemoryIndexed16:
3630 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3631 case Match_InvalidImm0_1:
3632 return Error(Loc, "immediate must be an integer in range [0, 1].");
3633 case Match_InvalidImm0_7:
3634 return Error(Loc, "immediate must be an integer in range [0, 7].");
3635 case Match_InvalidImm0_15:
3636 return Error(Loc, "immediate must be an integer in range [0, 15].");
3637 case Match_InvalidImm0_31:
3638 return Error(Loc, "immediate must be an integer in range [0, 31].");
3639 case Match_InvalidImm0_63:
3640 return Error(Loc, "immediate must be an integer in range [0, 63].");
3641 case Match_InvalidImm0_127:
3642 return Error(Loc, "immediate must be an integer in range [0, 127].");
3643 case Match_InvalidImm0_65535:
3644 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3645 case Match_InvalidImm1_8:
3646 return Error(Loc, "immediate must be an integer in range [1, 8].");
3647 case Match_InvalidImm1_16:
3648 return Error(Loc, "immediate must be an integer in range [1, 16].");
3649 case Match_InvalidImm1_32:
3650 return Error(Loc, "immediate must be an integer in range [1, 32].");
3651 case Match_InvalidImm1_64:
3652 return Error(Loc, "immediate must be an integer in range [1, 64].");
3653 case Match_InvalidIndex1:
3654 return Error(Loc, "expected lane specifier '[1]'");
3655 case Match_InvalidIndexB:
3656 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3657 case Match_InvalidIndexH:
3658 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3659 case Match_InvalidIndexS:
3660 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3661 case Match_InvalidIndexD:
3662 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3663 case Match_InvalidLabel:
3664 return Error(Loc, "expected label or encodable integer pc offset");
3666 return Error(Loc, "expected readable system register");
3668 return Error(Loc, "expected writable system register or pstate");
3669 case Match_MnemonicFail:
3670 return Error(Loc, "unrecognized instruction mnemonic");
3672 llvm_unreachable("unexpected error code!");
3676 static const char *getSubtargetFeatureName(uint64_t Val);
3678 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3679 OperandVector &Operands,
3681 uint64_t &ErrorInfo,
3682 bool MatchingInlineAsm) {
3683 assert(!Operands.empty() && "Unexpect empty operand list!");
3684 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3685 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3687 StringRef Tok = Op.getToken();
3688 unsigned NumOperands = Operands.size();
3690 if (NumOperands == 4 && Tok == "lsl") {
3691 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3692 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3693 if (Op2.isReg() && Op3.isImm()) {
3694 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3696 uint64_t Op3Val = Op3CE->getValue();
3697 uint64_t NewOp3Val = 0;
3698 uint64_t NewOp4Val = 0;
3699 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3701 NewOp3Val = (32 - Op3Val) & 0x1f;
3702 NewOp4Val = 31 - Op3Val;
3704 NewOp3Val = (64 - Op3Val) & 0x3f;
3705 NewOp4Val = 63 - Op3Val;
3708 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3709 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3711 Operands[0] = AArch64Operand::CreateToken(
3712 "ubfm", false, Op.getStartLoc(), getContext());
3713 Operands.push_back(AArch64Operand::CreateImm(
3714 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3715 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3716 Op3.getEndLoc(), getContext());
3719 } else if (NumOperands == 4 && Tok == "bfc") {
3720 // FIXME: Horrible hack to handle BFC->BFM alias.
3721 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3722 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3723 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3725 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3726 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3727 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3729 if (LSBCE && WidthCE) {
3730 uint64_t LSB = LSBCE->getValue();
3731 uint64_t Width = WidthCE->getValue();
3733 uint64_t RegWidth = 0;
3734 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3740 if (LSB >= RegWidth)
3741 return Error(LSBOp.getStartLoc(),
3742 "expected integer in range [0, 31]");
3743 if (Width < 1 || Width > RegWidth)
3744 return Error(WidthOp.getStartLoc(),
3745 "expected integer in range [1, 32]");
3749 ImmR = (32 - LSB) & 0x1f;
3751 ImmR = (64 - LSB) & 0x3f;
3753 uint64_t ImmS = Width - 1;
3755 if (ImmR != 0 && ImmS >= ImmR)
3756 return Error(WidthOp.getStartLoc(),
3757 "requested insert overflows register");
3759 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3760 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3761 Operands[0] = AArch64Operand::CreateToken(
3762 "bfm", false, Op.getStartLoc(), getContext());
3763 Operands[2] = AArch64Operand::CreateReg(
3764 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3765 SMLoc(), getContext());
3766 Operands[3] = AArch64Operand::CreateImm(
3767 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3768 Operands.emplace_back(
3769 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3770 WidthOp.getEndLoc(), getContext()));
3773 } else if (NumOperands == 5) {
3774 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3775 // UBFIZ -> UBFM aliases.
3776 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3777 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3779 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3781 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3782 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3783 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3785 if (Op3CE && Op4CE) {
3786 uint64_t Op3Val = Op3CE->getValue();
3787 uint64_t Op4Val = Op4CE->getValue();
3789 uint64_t RegWidth = 0;
3790 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3796 if (Op3Val >= RegWidth)
3797 return Error(Op3.getStartLoc(),
3798 "expected integer in range [0, 31]");
3799 if (Op4Val < 1 || Op4Val > RegWidth)
3800 return Error(Op4.getStartLoc(),
3801 "expected integer in range [1, 32]");
3803 uint64_t NewOp3Val = 0;
3805 NewOp3Val = (32 - Op3Val) & 0x1f;
3807 NewOp3Val = (64 - Op3Val) & 0x3f;
3809 uint64_t NewOp4Val = Op4Val - 1;
3811 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3812 return Error(Op4.getStartLoc(),
3813 "requested insert overflows register");
3815 const MCExpr *NewOp3 =
3816 MCConstantExpr::create(NewOp3Val, getContext());
3817 const MCExpr *NewOp4 =
3818 MCConstantExpr::create(NewOp4Val, getContext());
3819 Operands[3] = AArch64Operand::CreateImm(
3820 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3821 Operands[4] = AArch64Operand::CreateImm(
3822 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3824 Operands[0] = AArch64Operand::CreateToken(
3825 "bfm", false, Op.getStartLoc(), getContext());
3826 else if (Tok == "sbfiz")
3827 Operands[0] = AArch64Operand::CreateToken(
3828 "sbfm", false, Op.getStartLoc(), getContext());
3829 else if (Tok == "ubfiz")
3830 Operands[0] = AArch64Operand::CreateToken(
3831 "ubfm", false, Op.getStartLoc(), getContext());
3833 llvm_unreachable("No valid mnemonic for alias?");
3837 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3838 // UBFX -> UBFM aliases.
3839 } else if (NumOperands == 5 &&
3840 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3841 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3842 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3843 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3845 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3846 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3847 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3849 if (Op3CE && Op4CE) {
3850 uint64_t Op3Val = Op3CE->getValue();
3851 uint64_t Op4Val = Op4CE->getValue();
3853 uint64_t RegWidth = 0;
3854 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3860 if (Op3Val >= RegWidth)
3861 return Error(Op3.getStartLoc(),
3862 "expected integer in range [0, 31]");
3863 if (Op4Val < 1 || Op4Val > RegWidth)
3864 return Error(Op4.getStartLoc(),
3865 "expected integer in range [1, 32]");
3867 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3869 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3870 return Error(Op4.getStartLoc(),
3871 "requested extract overflows register");
3873 const MCExpr *NewOp4 =
3874 MCConstantExpr::create(NewOp4Val, getContext());
3875 Operands[4] = AArch64Operand::CreateImm(
3876 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3878 Operands[0] = AArch64Operand::CreateToken(
3879 "bfm", false, Op.getStartLoc(), getContext());
3880 else if (Tok == "sbfx")
3881 Operands[0] = AArch64Operand::CreateToken(
3882 "sbfm", false, Op.getStartLoc(), getContext());
3883 else if (Tok == "ubfx")
3884 Operands[0] = AArch64Operand::CreateToken(
3885 "ubfm", false, Op.getStartLoc(), getContext());
3887 llvm_unreachable("No valid mnemonic for alias?");
3892 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3893 // InstAlias can't quite handle this since the reg classes aren't
3895 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3896 // The source register can be Wn here, but the matcher expects a
3897 // GPR64. Twiddle it here if necessary.
3898 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3900 unsigned Reg = getXRegFromWReg(Op.getReg());
3901 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3902 Op.getEndLoc(), getContext());
3905 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3906 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3907 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3909 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3911 // The source register can be Wn here, but the matcher expects a
3912 // GPR64. Twiddle it here if necessary.
3913 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3915 unsigned Reg = getXRegFromWReg(Op.getReg());
3916 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3917 Op.getEndLoc(), getContext());
3921 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3922 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3923 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3925 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3927 // The source register can be Wn here, but the matcher expects a
3928 // GPR32. Twiddle it here if necessary.
3929 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3931 unsigned Reg = getWRegFromXReg(Op.getReg());
3932 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3933 Op.getEndLoc(), getContext());
3938 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3939 if (NumOperands == 3 && Tok == "fmov") {
3940 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3941 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3942 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3944 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3948 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3949 Op.getEndLoc(), getContext());
3954 // First try to match against the secondary set of tables containing the
3955 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3956 unsigned MatchResult =
3957 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3959 // If that fails, try against the alternate table containing long-form NEON:
3960 // "fadd v0.2s, v1.2s, v2.2s"
3961 if (MatchResult != Match_Success) {
3962 // But first, save the short-form match result: we can use it in case the
3963 // long-form match also fails.
3964 auto ShortFormNEONErrorInfo = ErrorInfo;
3965 auto ShortFormNEONMatchResult = MatchResult;
3968 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3970 // Now, both matches failed, and the long-form match failed on the mnemonic
3971 // suffix token operand. The short-form match failure is probably more
3972 // relevant: use it instead.
3973 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3974 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3975 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3976 MatchResult = ShortFormNEONMatchResult;
3977 ErrorInfo = ShortFormNEONErrorInfo;
3982 switch (MatchResult) {
3983 case Match_Success: {
3984 // Perform range checking and other semantic validations
3985 SmallVector<SMLoc, 8> OperandLocs;
3986 NumOperands = Operands.size();
3987 for (unsigned i = 1; i < NumOperands; ++i)
3988 OperandLocs.push_back(Operands[i]->getStartLoc());
3989 if (validateInstruction(Inst, OperandLocs))
3993 Out.EmitInstruction(Inst, getSTI());
3996 case Match_MissingFeature: {
3997 assert(ErrorInfo && "Unknown missing feature!");
3998 // Special case the error message for the very common case where only
3999 // a single subtarget feature is missing (neon, e.g.).
4000 std::string Msg = "instruction requires:";
4002 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4003 if (ErrorInfo & Mask) {
4005 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4009 return Error(IDLoc, Msg);
4011 case Match_MnemonicFail:
4012 return showMatchError(IDLoc, MatchResult);
4013 case Match_InvalidOperand: {
4014 SMLoc ErrorLoc = IDLoc;
4016 if (ErrorInfo != ~0ULL) {
4017 if (ErrorInfo >= Operands.size())
4018 return Error(IDLoc, "too few operands for instruction");
4020 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4021 if (ErrorLoc == SMLoc())
4024 // If the match failed on a suffix token operand, tweak the diagnostic
4026 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4027 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4028 MatchResult = Match_InvalidSuffix;
4030 return showMatchError(ErrorLoc, MatchResult);
4032 case Match_InvalidMemoryIndexed1:
4033 case Match_InvalidMemoryIndexed2:
4034 case Match_InvalidMemoryIndexed4:
4035 case Match_InvalidMemoryIndexed8:
4036 case Match_InvalidMemoryIndexed16:
4037 case Match_InvalidCondCode:
4038 case Match_AddSubRegExtendSmall:
4039 case Match_AddSubRegExtendLarge:
4040 case Match_AddSubSecondSource:
4041 case Match_LogicalSecondSource:
4042 case Match_AddSubRegShift32:
4043 case Match_AddSubRegShift64:
4044 case Match_InvalidMovImm32Shift:
4045 case Match_InvalidMovImm64Shift:
4046 case Match_InvalidFPImm:
4047 case Match_InvalidMemoryWExtend8:
4048 case Match_InvalidMemoryWExtend16:
4049 case Match_InvalidMemoryWExtend32:
4050 case Match_InvalidMemoryWExtend64:
4051 case Match_InvalidMemoryWExtend128:
4052 case Match_InvalidMemoryXExtend8:
4053 case Match_InvalidMemoryXExtend16:
4054 case Match_InvalidMemoryXExtend32:
4055 case Match_InvalidMemoryXExtend64:
4056 case Match_InvalidMemoryXExtend128:
4057 case Match_InvalidMemoryIndexed4SImm7:
4058 case Match_InvalidMemoryIndexed8SImm7:
4059 case Match_InvalidMemoryIndexed16SImm7:
4060 case Match_InvalidMemoryIndexedSImm9:
4061 case Match_InvalidImm0_1:
4062 case Match_InvalidImm0_7:
4063 case Match_InvalidImm0_15:
4064 case Match_InvalidImm0_31:
4065 case Match_InvalidImm0_63:
4066 case Match_InvalidImm0_127:
4067 case Match_InvalidImm0_65535:
4068 case Match_InvalidImm1_8:
4069 case Match_InvalidImm1_16:
4070 case Match_InvalidImm1_32:
4071 case Match_InvalidImm1_64:
4072 case Match_InvalidIndex1:
4073 case Match_InvalidIndexB:
4074 case Match_InvalidIndexH:
4075 case Match_InvalidIndexS:
4076 case Match_InvalidIndexD:
4077 case Match_InvalidLabel:
4080 if (ErrorInfo >= Operands.size())
4081 return Error(IDLoc, "too few operands for instruction");
4082 // Any time we get here, there's nothing fancy to do. Just get the
4083 // operand SMLoc and display the diagnostic.
4084 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4085 if (ErrorLoc == SMLoc())
4087 return showMatchError(ErrorLoc, MatchResult);
4091 llvm_unreachable("Implement any new match types added!");
4094 /// ParseDirective parses the arm specific directives
4095 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4096 const MCObjectFileInfo::Environment Format =
4097 getContext().getObjectFileInfo()->getObjectFileType();
4098 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4099 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4101 StringRef IDVal = DirectiveID.getIdentifier();
4102 SMLoc Loc = DirectiveID.getLoc();
4103 if (IDVal == ".hword")
4104 return parseDirectiveWord(2, Loc);
4105 if (IDVal == ".word")
4106 return parseDirectiveWord(4, Loc);
4107 if (IDVal == ".xword")
4108 return parseDirectiveWord(8, Loc);
4109 if (IDVal == ".tlsdesccall")
4110 return parseDirectiveTLSDescCall(Loc);
4111 if (IDVal == ".ltorg" || IDVal == ".pool")
4112 return parseDirectiveLtorg(Loc);
4113 if (IDVal == ".unreq")
4114 return parseDirectiveUnreq(Loc);
4116 if (!IsMachO && !IsCOFF) {
4117 if (IDVal == ".inst")
4118 return parseDirectiveInst(Loc);
4121 return parseDirectiveLOH(IDVal, Loc);
4124 /// parseDirectiveWord
4125 /// ::= .word [ expression (, expression)* ]
4126 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4127 MCAsmParser &Parser = getParser();
4128 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4130 const MCExpr *Value;
4131 if (getParser().parseExpression(Value))
4134 getParser().getStreamer().EmitValue(Value, Size, L);
4136 if (getLexer().is(AsmToken::EndOfStatement))
4139 // FIXME: Improve diagnostic.
4140 if (getLexer().isNot(AsmToken::Comma))
4141 return Error(L, "unexpected token in directive");
4150 /// parseDirectiveInst
4151 /// ::= .inst opcode [, ...]
4152 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4153 MCAsmParser &Parser = getParser();
4154 if (getLexer().is(AsmToken::EndOfStatement)) {
4155 Parser.eatToEndOfStatement();
4156 Error(Loc, "expected expression following directive");
4163 if (getParser().parseExpression(Expr)) {
4164 Error(Loc, "expected expression");
4168 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4170 Error(Loc, "expected constant expression");
4174 getTargetStreamer().emitInst(Value->getValue());
4176 if (getLexer().is(AsmToken::EndOfStatement))
4179 if (getLexer().isNot(AsmToken::Comma)) {
4180 Error(Loc, "unexpected token in directive");
4184 Parser.Lex(); // Eat comma.
4191 // parseDirectiveTLSDescCall:
4192 // ::= .tlsdesccall symbol
4193 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4195 if (getParser().parseIdentifier(Name))
4196 return Error(L, "expected symbol after directive");
4198 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4199 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4200 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4203 Inst.setOpcode(AArch64::TLSDESCCALL);
4204 Inst.addOperand(MCOperand::createExpr(Expr));
4206 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4210 /// ::= .loh <lohName | lohId> label1, ..., labelN
4211 /// The number of arguments depends on the loh identifier.
4212 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4213 if (IDVal != MCLOHDirectiveName())
4216 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4217 if (getParser().getTok().isNot(AsmToken::Integer))
4218 return TokError("expected an identifier or a number in directive");
4219 // We successfully get a numeric value for the identifier.
4220 // Check if it is valid.
4221 int64_t Id = getParser().getTok().getIntVal();
4222 if (Id <= -1U && !isValidMCLOHType(Id))
4223 return TokError("invalid numeric identifier in directive");
4224 Kind = (MCLOHType)Id;
4226 StringRef Name = getTok().getIdentifier();
4227 // We successfully parse an identifier.
4228 // Check if it is a recognized one.
4229 int Id = MCLOHNameToId(Name);
4232 return TokError("invalid identifier in directive");
4233 Kind = (MCLOHType)Id;
4235 // Consume the identifier.
4237 // Get the number of arguments of this LOH.
4238 int NbArgs = MCLOHIdToNbArgs(Kind);
4240 assert(NbArgs != -1 && "Invalid number of arguments");
4242 SmallVector<MCSymbol *, 3> Args;
4243 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4245 if (getParser().parseIdentifier(Name))
4246 return TokError("expected identifier in directive");
4247 Args.push_back(getContext().getOrCreateSymbol(Name));
4249 if (Idx + 1 == NbArgs)
4251 if (getLexer().isNot(AsmToken::Comma))
4252 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4255 if (getLexer().isNot(AsmToken::EndOfStatement))
4256 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4258 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4262 /// parseDirectiveLtorg
4263 /// ::= .ltorg | .pool
4264 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4265 getTargetStreamer().emitCurrentConstantPool();
4269 /// parseDirectiveReq
4270 /// ::= name .req registername
4271 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4272 MCAsmParser &Parser = getParser();
4273 Parser.Lex(); // Eat the '.req' token.
4274 SMLoc SRegLoc = getLoc();
4275 unsigned RegNum = tryParseRegister();
4276 bool IsVector = false;
4278 if (RegNum == static_cast<unsigned>(-1)) {
4280 RegNum = tryMatchVectorRegister(Kind, false);
4281 if (!Kind.empty()) {
4282 Error(SRegLoc, "vector register without type specifier expected");
4288 if (RegNum == static_cast<unsigned>(-1)) {
4289 Parser.eatToEndOfStatement();
4290 Error(SRegLoc, "register name or alias expected");
4294 // Shouldn't be anything else.
4295 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4296 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4297 Parser.eatToEndOfStatement();
4301 Parser.Lex(); // Consume the EndOfStatement
4303 auto pair = std::make_pair(IsVector, RegNum);
4304 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4305 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4310 /// parseDirectiveUneq
4311 /// ::= .unreq registername
4312 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4313 MCAsmParser &Parser = getParser();
4314 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4315 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4316 Parser.eatToEndOfStatement();
4319 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4320 Parser.Lex(); // Eat the identifier.
4325 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4326 AArch64MCExpr::VariantKind &ELFRefKind,
4327 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4329 ELFRefKind = AArch64MCExpr::VK_INVALID;
4330 DarwinRefKind = MCSymbolRefExpr::VK_None;
4333 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4334 ELFRefKind = AE->getKind();
4335 Expr = AE->getSubExpr();
4338 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4340 // It's a simple symbol reference with no addend.
4341 DarwinRefKind = SE->getKind();
4345 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4349 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4352 DarwinRefKind = SE->getKind();
4354 if (BE->getOpcode() != MCBinaryExpr::Add &&
4355 BE->getOpcode() != MCBinaryExpr::Sub)
4358 // See if the addend is is a constant, otherwise there's more going
4359 // on here than we can deal with.
4360 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4364 Addend = AddendExpr->getValue();
4365 if (BE->getOpcode() == MCBinaryExpr::Sub)
4368 // It's some symbol reference + a constant addend, but really
4369 // shouldn't use both Darwin and ELF syntax.
4370 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4371 DarwinRefKind == MCSymbolRefExpr::VK_None;
4374 /// Force static initialization.
4375 extern "C" void LLVMInitializeAArch64AsmParser() {
4376 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4377 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4378 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4381 #define GET_REGISTER_MATCHER
4382 #define GET_SUBTARGET_FEATURE_NAME
4383 #define GET_MATCHER_IMPLEMENTATION
4384 #include "AArch64GenAsmMatcher.inc"
4386 // Define this matcher function after the auto-generated include so we
4387 // have the match class enum definitions.
4388 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4390 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4391 // If the kind is a token for a literal immediate, check if our asm
4392 // operand matches. This is for InstAliases which have a fixed-value
4393 // immediate in the syntax.
4394 int64_t ExpectedVal;
4397 return Match_InvalidOperand;
4439 return Match_InvalidOperand;
4440 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4442 return Match_InvalidOperand;
4443 if (CE->getValue() == ExpectedVal)
4444 return Match_Success;
4445 return Match_InvalidOperand;
4449 AArch64AsmParser::OperandMatchResultTy
4450 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4454 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4455 Error(S, "expected register");
4456 return MatchOperand_ParseFail;
4459 int FirstReg = tryParseRegister();
4460 if (FirstReg == -1) {
4461 return MatchOperand_ParseFail;
4463 const MCRegisterClass &WRegClass =
4464 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4465 const MCRegisterClass &XRegClass =
4466 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4468 bool isXReg = XRegClass.contains(FirstReg),
4469 isWReg = WRegClass.contains(FirstReg);
4470 if (!isXReg && !isWReg) {
4471 Error(S, "expected first even register of a "
4472 "consecutive same-size even/odd register pair");
4473 return MatchOperand_ParseFail;
4476 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4477 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4479 if (FirstEncoding & 0x1) {
4480 Error(S, "expected first even register of a "
4481 "consecutive same-size even/odd register pair");
4482 return MatchOperand_ParseFail;
4486 if (getParser().getTok().isNot(AsmToken::Comma)) {
4487 Error(M, "expected comma");
4488 return MatchOperand_ParseFail;
4494 int SecondReg = tryParseRegister();
4495 if (SecondReg ==-1) {
4496 return MatchOperand_ParseFail;
4499 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4500 (isXReg && !XRegClass.contains(SecondReg)) ||
4501 (isWReg && !WRegClass.contains(SecondReg))) {
4502 Error(E,"expected second odd register of a "
4503 "consecutive same-size even/odd register pair");
4504 return MatchOperand_ParseFail;
4509 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4510 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4512 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4513 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4516 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4519 return MatchOperand_Success;