1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
118 const MCInstrInfo &MII, const MCTargetOptions &Options)
119 : MCTargetAsmParser(Options, STI) {
120 MCAsmParserExtension::Initialize(Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
217 uint32_t PStateField;
230 struct ShiftExtendOp {
231 AArch64_AM::ShiftExtendType Type;
233 bool HasExplicitAmount;
243 struct VectorListOp VectorList;
244 struct VectorIndexOp VectorIndex;
246 struct ShiftedImmOp ShiftedImm;
247 struct CondCodeOp CondCode;
248 struct FPImmOp FPImm;
249 struct BarrierOp Barrier;
250 struct SysRegOp SysReg;
251 struct SysCRImmOp SysCRImm;
252 struct PrefetchOp Prefetch;
253 struct ShiftExtendOp ShiftExtend;
256 // Keep the MCContext around as the MCExprs may need manipulated during
257 // the add<>Operands() calls.
261 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265 StartLoc = o.StartLoc;
275 ShiftedImm = o.ShiftedImm;
278 CondCode = o.CondCode;
290 VectorList = o.VectorList;
293 VectorIndex = o.VectorIndex;
299 SysCRImm = o.SysCRImm;
302 Prefetch = o.Prefetch;
305 ShiftExtend = o.ShiftExtend;
310 /// getStartLoc - Get the location of the first token of this operand.
311 SMLoc getStartLoc() const override { return StartLoc; }
312 /// getEndLoc - Get the location of the last token of this operand.
313 SMLoc getEndLoc() const override { return EndLoc; }
315 StringRef getToken() const {
316 assert(Kind == k_Token && "Invalid access!");
317 return StringRef(Tok.Data, Tok.Length);
320 bool isTokenSuffix() const {
321 assert(Kind == k_Token && "Invalid access!");
325 const MCExpr *getImm() const {
326 assert(Kind == k_Immediate && "Invalid access!");
330 const MCExpr *getShiftedImmVal() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.Val;
335 unsigned getShiftedImmShift() const {
336 assert(Kind == k_ShiftedImm && "Invalid access!");
337 return ShiftedImm.ShiftAmount;
340 AArch64CC::CondCode getCondCode() const {
341 assert(Kind == k_CondCode && "Invalid access!");
342 return CondCode.Code;
345 unsigned getFPImm() const {
346 assert(Kind == k_FPImm && "Invalid access!");
350 unsigned getBarrier() const {
351 assert(Kind == k_Barrier && "Invalid access!");
355 StringRef getBarrierName() const {
356 assert(Kind == k_Barrier && "Invalid access!");
357 return StringRef(Barrier.Data, Barrier.Length);
360 unsigned getReg() const override {
361 assert(Kind == k_Register && "Invalid access!");
365 unsigned getVectorListStart() const {
366 assert(Kind == k_VectorList && "Invalid access!");
367 return VectorList.RegNum;
370 unsigned getVectorListCount() const {
371 assert(Kind == k_VectorList && "Invalid access!");
372 return VectorList.Count;
375 unsigned getVectorIndex() const {
376 assert(Kind == k_VectorIndex && "Invalid access!");
377 return VectorIndex.Val;
380 StringRef getSysReg() const {
381 assert(Kind == k_SysReg && "Invalid access!");
382 return StringRef(SysReg.Data, SysReg.Length);
385 unsigned getSysCR() const {
386 assert(Kind == k_SysCR && "Invalid access!");
390 unsigned getPrefetch() const {
391 assert(Kind == k_Prefetch && "Invalid access!");
395 StringRef getPrefetchName() const {
396 assert(Kind == k_Prefetch && "Invalid access!");
397 return StringRef(Prefetch.Data, Prefetch.Length);
400 AArch64_AM::ShiftExtendType getShiftExtendType() const {
401 assert(Kind == k_ShiftExtend && "Invalid access!");
402 return ShiftExtend.Type;
405 unsigned getShiftExtendAmount() const {
406 assert(Kind == k_ShiftExtend && "Invalid access!");
407 return ShiftExtend.Amount;
410 bool hasShiftExtendAmount() const {
411 assert(Kind == k_ShiftExtend && "Invalid access!");
412 return ShiftExtend.HasExplicitAmount;
415 bool isImm() const override { return Kind == k_Immediate; }
416 bool isMem() const override { return false; }
417 bool isSImm9() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val < 256);
426 bool isSImm7s4() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435 bool isSImm7s8() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444 bool isSImm7s16() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
454 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
455 AArch64MCExpr::VariantKind ELFRefKind;
456 MCSymbolRefExpr::VariantKind DarwinRefKind;
458 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460 // If we don't understand the expression, assume the best and
461 // let the fixup and relocation code deal with it.
465 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
466 ELFRefKind == AArch64MCExpr::VK_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
474 // Note that we don't range-check the addend. It's adjusted modulo page
475 // size when converted, so there is no "out of range" condition when using
477 return Addend >= 0 && (Addend % Scale) == 0;
478 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
480 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
487 template <int Scale> bool isUImm12Offset() const {
491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 return isSymbolicUImm12Offset(getImm(), Scale);
495 int64_t Val = MCE->getValue();
496 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
499 bool isImm0_1() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 2);
508 bool isImm0_7() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 8);
517 bool isImm1_8() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 9);
526 bool isImm0_15() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 16);
535 bool isImm1_16() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val > 0 && Val < 17);
544 bool isImm0_31() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 0 && Val < 32);
553 bool isImm1_31() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 1 && Val < 32);
562 bool isImm1_32() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 33);
571 bool isImm0_63() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 0 && Val < 64);
580 bool isImm1_63() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 1 && Val < 64);
589 bool isImm1_64() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 1 && Val < 65);
598 bool isImm0_127() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 128);
607 bool isImm0_255() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 0 && Val < 256);
616 bool isImm0_65535() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 return (Val >= 0 && Val < 65536);
625 bool isImm32_63() const {
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
631 int64_t Val = MCE->getValue();
632 return (Val >= 32 && Val < 64);
634 bool isLogicalImm32() const {
637 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640 int64_t Val = MCE->getValue();
641 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
644 return AArch64_AM::isLogicalImmediate(Val, 32);
646 bool isLogicalImm64() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
654 bool isLogicalImm32Not() const {
657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
660 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
661 return AArch64_AM::isLogicalImmediate(Val, 32);
663 bool isLogicalImm64Not() const {
666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
669 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
671 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
672 bool isAddSubImm() const {
673 if (!isShiftedImm() && !isImm())
678 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
679 if (isShiftedImm()) {
680 unsigned Shift = ShiftedImm.ShiftAmount;
681 Expr = ShiftedImm.Val;
682 if (Shift != 0 && Shift != 12)
688 AArch64MCExpr::VariantKind ELFRefKind;
689 MCSymbolRefExpr::VariantKind DarwinRefKind;
691 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
692 DarwinRefKind, Addend)) {
693 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
694 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
695 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
696 || ELFRefKind == AArch64MCExpr::VK_LO12
697 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
700 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
703 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
706 // Otherwise it should be a real immediate in range:
707 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
708 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
710 bool isAddSubImmNeg() const {
711 if (!isShiftedImm() && !isImm())
716 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
717 if (isShiftedImm()) {
718 unsigned Shift = ShiftedImm.ShiftAmount;
719 Expr = ShiftedImm.Val;
720 if (Shift != 0 && Shift != 12)
725 // Otherwise it should be a real negative immediate in range:
726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
727 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
729 bool isCondCode() const { return Kind == k_CondCode; }
730 bool isSIMDImmType10() const {
733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
738 bool isBranchTarget26() const {
741 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
744 int64_t Val = MCE->getValue();
747 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
749 bool isPCRelLabel19() const {
752 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
755 int64_t Val = MCE->getValue();
758 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
760 bool isBranchTarget14() const {
763 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
766 int64_t Val = MCE->getValue();
769 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
773 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
777 AArch64MCExpr::VariantKind ELFRefKind;
778 MCSymbolRefExpr::VariantKind DarwinRefKind;
780 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
781 DarwinRefKind, Addend)) {
784 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
787 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
788 if (ELFRefKind == AllowedModifiers[i])
795 bool isMovZSymbolG3() const {
796 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
799 bool isMovZSymbolG2() const {
800 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
801 AArch64MCExpr::VK_TPREL_G2,
802 AArch64MCExpr::VK_DTPREL_G2});
805 bool isMovZSymbolG1() const {
806 return isMovWSymbol({
807 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
808 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
809 AArch64MCExpr::VK_DTPREL_G1,
813 bool isMovZSymbolG0() const {
814 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
815 AArch64MCExpr::VK_TPREL_G0,
816 AArch64MCExpr::VK_DTPREL_G0});
819 bool isMovKSymbolG3() const {
820 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 bool isMovKSymbolG2() const {
824 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
827 bool isMovKSymbolG1() const {
828 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
829 AArch64MCExpr::VK_TPREL_G1_NC,
830 AArch64MCExpr::VK_DTPREL_G1_NC});
833 bool isMovKSymbolG0() const {
835 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
836 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
839 template<int RegWidth, int Shift>
840 bool isMOVZMovAlias() const {
841 if (!isImm()) return false;
843 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
844 if (!CE) return false;
845 uint64_t Value = CE->getValue();
848 Value &= 0xffffffffULL;
850 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
851 if (Value == 0 && Shift != 0)
854 return (Value & ~(0xffffULL << Shift)) == 0;
857 template<int RegWidth, int Shift>
858 bool isMOVNMovAlias() const {
859 if (!isImm()) return false;
861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862 if (!CE) return false;
863 uint64_t Value = CE->getValue();
865 // MOVZ takes precedence over MOVN.
866 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
867 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
872 Value &= 0xffffffffULL;
874 return (Value & ~(0xffffULL << Shift)) == 0;
877 bool isFPImm() const { return Kind == k_FPImm; }
878 bool isBarrier() const { return Kind == k_Barrier; }
879 bool isSysReg() const { return Kind == k_SysReg; }
880 bool isMRSSystemRegister() const {
881 if (!isSysReg()) return false;
883 return SysReg.MRSReg != -1U;
885 bool isMSRSystemRegister() const {
886 if (!isSysReg()) return false;
887 return SysReg.MSRReg != -1U;
889 bool isSystemPStateFieldWithImm0_1() const {
890 if (!isSysReg()) return false;
891 return SysReg.PStateField == AArch64PState::PAN;
893 bool isSystemPStateFieldWithImm0_15() const {
894 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
895 return SysReg.PStateField != -1U;
897 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
898 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
899 bool isVectorRegLo() const {
900 return Kind == k_Register && Reg.isVector &&
901 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
904 bool isGPR32as64() const {
905 return Kind == k_Register && !Reg.isVector &&
906 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
908 bool isWSeqPair() const {
909 return Kind == k_Register && !Reg.isVector &&
910 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
913 bool isXSeqPair() const {
914 return Kind == k_Register && !Reg.isVector &&
915 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
919 bool isGPR64sp0() const {
920 return Kind == k_Register && !Reg.isVector &&
921 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
924 /// Is this a vector list with the type implicit (presumably attached to the
925 /// instruction itself)?
926 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
927 return Kind == k_VectorList && VectorList.Count == NumRegs &&
928 !VectorList.ElementKind;
931 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
932 bool isTypedVectorList() const {
933 if (Kind != k_VectorList)
935 if (VectorList.Count != NumRegs)
937 if (VectorList.ElementKind != ElementKind)
939 return VectorList.NumElements == NumElements;
942 bool isVectorIndex1() const {
943 return Kind == k_VectorIndex && VectorIndex.Val == 1;
945 bool isVectorIndexB() const {
946 return Kind == k_VectorIndex && VectorIndex.Val < 16;
948 bool isVectorIndexH() const {
949 return Kind == k_VectorIndex && VectorIndex.Val < 8;
951 bool isVectorIndexS() const {
952 return Kind == k_VectorIndex && VectorIndex.Val < 4;
954 bool isVectorIndexD() const {
955 return Kind == k_VectorIndex && VectorIndex.Val < 2;
957 bool isToken() const override { return Kind == k_Token; }
958 bool isTokenEqual(StringRef Str) const {
959 return Kind == k_Token && getToken() == Str;
961 bool isSysCR() const { return Kind == k_SysCR; }
962 bool isPrefetch() const { return Kind == k_Prefetch; }
963 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
964 bool isShifter() const {
965 if (!isShiftExtend())
968 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
969 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
970 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
971 ST == AArch64_AM::MSL);
973 bool isExtend() const {
974 if (!isShiftExtend())
977 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
978 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
979 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
980 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
981 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
982 ET == AArch64_AM::LSL) &&
983 getShiftExtendAmount() <= 4;
986 bool isExtend64() const {
989 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
990 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
991 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
993 bool isExtendLSL64() const {
996 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
997 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
998 ET == AArch64_AM::LSL) &&
999 getShiftExtendAmount() <= 4;
1002 template<int Width> bool isMemXExtend() const {
1005 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1006 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1007 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1008 getShiftExtendAmount() == 0);
1011 template<int Width> bool isMemWExtend() const {
1014 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1015 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1016 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1017 getShiftExtendAmount() == 0);
1020 template <unsigned width>
1021 bool isArithmeticShifter() const {
1025 // An arithmetic shifter is LSL, LSR, or ASR.
1026 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1027 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1028 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1031 template <unsigned width>
1032 bool isLogicalShifter() const {
1036 // A logical shifter is LSL, LSR, ASR or ROR.
1037 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1038 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1039 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1040 getShiftExtendAmount() < width;
1043 bool isMovImm32Shifter() const {
1047 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1048 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1049 if (ST != AArch64_AM::LSL)
1051 uint64_t Val = getShiftExtendAmount();
1052 return (Val == 0 || Val == 16);
1055 bool isMovImm64Shifter() const {
1059 // A MOVi shifter is LSL of 0 or 16.
1060 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1061 if (ST != AArch64_AM::LSL)
1063 uint64_t Val = getShiftExtendAmount();
1064 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1067 bool isLogicalVecShifter() const {
1071 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1072 unsigned Shift = getShiftExtendAmount();
1073 return getShiftExtendType() == AArch64_AM::LSL &&
1074 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1077 bool isLogicalVecHalfWordShifter() const {
1078 if (!isLogicalVecShifter())
1081 // A logical vector shifter is a left shift by 0 or 8.
1082 unsigned Shift = getShiftExtendAmount();
1083 return getShiftExtendType() == AArch64_AM::LSL &&
1084 (Shift == 0 || Shift == 8);
1087 bool isMoveVecShifter() const {
1088 if (!isShiftExtend())
1091 // A logical vector shifter is a left shift by 8 or 16.
1092 unsigned Shift = getShiftExtendAmount();
1093 return getShiftExtendType() == AArch64_AM::MSL &&
1094 (Shift == 8 || Shift == 16);
1097 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1098 // to LDUR/STUR when the offset is not legal for the former but is for
1099 // the latter. As such, in addition to checking for being a legal unscaled
1100 // address, also check that it is not a legal scaled address. This avoids
1101 // ambiguity in the matcher.
1103 bool isSImm9OffsetFB() const {
1104 return isSImm9() && !isUImm12Offset<Width / 8>();
1107 bool isAdrpLabel() const {
1108 // Validation was handled during parsing, so we just sanity check that
1109 // something didn't go haywire.
1113 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1114 int64_t Val = CE->getValue();
1115 int64_t Min = - (4096 * (1LL << (21 - 1)));
1116 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1117 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1123 bool isAdrLabel() const {
1124 // Validation was handled during parsing, so we just sanity check that
1125 // something didn't go haywire.
1129 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1130 int64_t Val = CE->getValue();
1131 int64_t Min = - (1LL << (21 - 1));
1132 int64_t Max = ((1LL << (21 - 1)) - 1);
1133 return Val >= Min && Val <= Max;
1139 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1140 // Add as immediates when possible. Null MCExpr = 0.
1142 Inst.addOperand(MCOperand::createImm(0));
1143 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1144 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1146 Inst.addOperand(MCOperand::createExpr(Expr));
1149 void addRegOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 Inst.addOperand(MCOperand::createReg(getReg()));
1154 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1157 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1159 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1160 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1161 RI->getEncodingValue(getReg()));
1163 Inst.addOperand(MCOperand::createReg(Reg));
1166 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1169 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1170 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1173 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1176 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1177 Inst.addOperand(MCOperand::createReg(getReg()));
1180 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::createReg(getReg()));
1185 template <unsigned NumRegs>
1186 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 static const unsigned FirstRegs[] = { AArch64::D0,
1191 AArch64::D0_D1_D2_D3 };
1192 unsigned FirstReg = FirstRegs[NumRegs - 1];
1195 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1198 template <unsigned NumRegs>
1199 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1200 assert(N == 1 && "Invalid number of operands!");
1201 static const unsigned FirstRegs[] = { AArch64::Q0,
1204 AArch64::Q0_Q1_Q2_Q3 };
1205 unsigned FirstReg = FirstRegs[NumRegs - 1];
1208 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1211 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1212 assert(N == 1 && "Invalid number of operands!");
1213 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1216 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1221 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1222 assert(N == 1 && "Invalid number of operands!");
1223 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1226 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1231 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1232 assert(N == 1 && "Invalid number of operands!");
1233 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1236 void addImmOperands(MCInst &Inst, unsigned N) const {
1237 assert(N == 1 && "Invalid number of operands!");
1238 // If this is a pageoff symrefexpr with an addend, adjust the addend
1239 // to be only the page-offset portion. Otherwise, just add the expr
1241 addExpr(Inst, getImm());
1244 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 2 && "Invalid number of operands!");
1246 if (isShiftedImm()) {
1247 addExpr(Inst, getShiftedImmVal());
1248 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1250 addExpr(Inst, getImm());
1251 Inst.addOperand(MCOperand::createImm(0));
1255 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1256 assert(N == 2 && "Invalid number of operands!");
1258 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1259 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1260 int64_t Val = -CE->getValue();
1261 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1263 Inst.addOperand(MCOperand::createImm(Val));
1264 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1267 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 Inst.addOperand(MCOperand::createImm(getCondCode()));
1272 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1276 addExpr(Inst, getImm());
1278 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1281 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1282 addImmOperands(Inst, N);
1286 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1287 assert(N == 1 && "Invalid number of operands!");
1288 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1291 Inst.addOperand(MCOperand::createExpr(getImm()));
1294 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1297 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1300 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1303 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1309 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1312 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1315 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!");
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1321 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1327 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1333 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1339 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1345 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 assert(MCE && "Invalid constant immediate operand!");
1349 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1352 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1358 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1364 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1370 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1371 assert(N == 1 && "Invalid number of operands!");
1372 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1373 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1376 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1382 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1383 assert(N == 1 && "Invalid number of operands!");
1384 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1385 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1388 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1389 assert(N == 1 && "Invalid number of operands!");
1390 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1394 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1400 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1406 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1412 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1413 assert(N == 1 && "Invalid number of operands!");
1414 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1416 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1417 Inst.addOperand(MCOperand::createImm(encoding));
1420 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1422 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1423 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1424 Inst.addOperand(MCOperand::createImm(encoding));
1427 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1428 assert(N == 1 && "Invalid number of operands!");
1429 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1430 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1431 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1432 Inst.addOperand(MCOperand::createImm(encoding));
1435 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1437 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1439 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1440 Inst.addOperand(MCOperand::createImm(encoding));
1443 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1446 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1447 Inst.addOperand(MCOperand::createImm(encoding));
1450 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1451 // Branch operands don't encode the low bits, so shift them off
1452 // here. If it's a label, however, just put it on directly as there's
1453 // not enough information now to do anything.
1454 assert(N == 1 && "Invalid number of operands!");
1455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1457 addExpr(Inst, getImm());
1460 assert(MCE && "Invalid constant immediate operand!");
1461 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1464 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1465 // Branch operands don't encode the low bits, so shift them off
1466 // here. If it's a label, however, just put it on directly as there's
1467 // not enough information now to do anything.
1468 assert(N == 1 && "Invalid number of operands!");
1469 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1471 addExpr(Inst, getImm());
1474 assert(MCE && "Invalid constant immediate operand!");
1475 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1478 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1479 // Branch operands don't encode the low bits, so shift them off
1480 // here. If it's a label, however, just put it on directly as there's
1481 // not enough information now to do anything.
1482 assert(N == 1 && "Invalid number of operands!");
1483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1485 addExpr(Inst, getImm());
1488 assert(MCE && "Invalid constant immediate operand!");
1489 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1492 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1493 assert(N == 1 && "Invalid number of operands!");
1494 Inst.addOperand(MCOperand::createImm(getFPImm()));
1497 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1498 assert(N == 1 && "Invalid number of operands!");
1499 Inst.addOperand(MCOperand::createImm(getBarrier()));
1502 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1503 assert(N == 1 && "Invalid number of operands!");
1505 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1508 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1511 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1514 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1515 assert(N == 1 && "Invalid number of operands!");
1517 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1520 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1526 void addSysCROperands(MCInst &Inst, unsigned N) const {
1527 assert(N == 1 && "Invalid number of operands!");
1528 Inst.addOperand(MCOperand::createImm(getSysCR()));
1531 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && "Invalid number of operands!");
1533 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1536 void addShifterOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && "Invalid number of operands!");
1539 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1540 Inst.addOperand(MCOperand::createImm(Imm));
1543 void addExtendOperands(MCInst &Inst, unsigned N) const {
1544 assert(N == 1 && "Invalid number of operands!");
1545 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1546 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1547 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1548 Inst.addOperand(MCOperand::createImm(Imm));
1551 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1552 assert(N == 1 && "Invalid number of operands!");
1553 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1554 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1555 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1556 Inst.addOperand(MCOperand::createImm(Imm));
1559 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1560 assert(N == 2 && "Invalid number of operands!");
1561 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1562 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1563 Inst.addOperand(MCOperand::createImm(IsSigned));
1564 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1567 // For 8-bit load/store instructions with a register offset, both the
1568 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1569 // they're disambiguated by whether the shift was explicit or implicit rather
1571 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1572 assert(N == 2 && "Invalid number of operands!");
1573 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1574 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1575 Inst.addOperand(MCOperand::createImm(IsSigned));
1576 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1580 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1581 assert(N == 1 && "Invalid number of operands!");
1583 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1584 uint64_t Value = CE->getValue();
1585 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1589 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1590 assert(N == 1 && "Invalid number of operands!");
1592 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1593 uint64_t Value = CE->getValue();
1594 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1597 void print(raw_ostream &OS) const override;
1599 static std::unique_ptr<AArch64Operand>
1600 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1601 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1602 Op->Tok.Data = Str.data();
1603 Op->Tok.Length = Str.size();
1604 Op->Tok.IsSuffix = IsSuffix;
1610 static std::unique_ptr<AArch64Operand>
1611 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1612 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1613 Op->Reg.RegNum = RegNum;
1614 Op->Reg.isVector = isVector;
1620 static std::unique_ptr<AArch64Operand>
1621 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1622 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1623 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1624 Op->VectorList.RegNum = RegNum;
1625 Op->VectorList.Count = Count;
1626 Op->VectorList.NumElements = NumElements;
1627 Op->VectorList.ElementKind = ElementKind;
1633 static std::unique_ptr<AArch64Operand>
1634 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1635 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1636 Op->VectorIndex.Val = Idx;
1642 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1643 SMLoc E, MCContext &Ctx) {
1644 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1651 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1652 unsigned ShiftAmount,
1655 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1656 Op->ShiftedImm .Val = Val;
1657 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1663 static std::unique_ptr<AArch64Operand>
1664 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1665 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1666 Op->CondCode.Code = Code;
1672 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1674 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1675 Op->FPImm.Val = Val;
1681 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1685 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1686 Op->Barrier.Val = Val;
1687 Op->Barrier.Data = Str.data();
1688 Op->Barrier.Length = Str.size();
1694 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1697 uint32_t PStateField,
1699 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1700 Op->SysReg.Data = Str.data();
1701 Op->SysReg.Length = Str.size();
1702 Op->SysReg.MRSReg = MRSReg;
1703 Op->SysReg.MSRReg = MSRReg;
1704 Op->SysReg.PStateField = PStateField;
1710 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1711 SMLoc E, MCContext &Ctx) {
1712 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1713 Op->SysCRImm.Val = Val;
1719 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1723 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1724 Op->Prefetch.Val = Val;
1725 Op->Barrier.Data = Str.data();
1726 Op->Barrier.Length = Str.size();
1732 static std::unique_ptr<AArch64Operand>
1733 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1734 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1735 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1736 Op->ShiftExtend.Type = ShOp;
1737 Op->ShiftExtend.Amount = Val;
1738 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1745 } // end anonymous namespace.
1747 void AArch64Operand::print(raw_ostream &OS) const {
1750 OS << "<fpimm " << getFPImm() << "("
1751 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1754 StringRef Name = getBarrierName();
1756 OS << "<barrier " << Name << ">";
1758 OS << "<barrier invalid #" << getBarrier() << ">";
1764 case k_ShiftedImm: {
1765 unsigned Shift = getShiftedImmShift();
1766 OS << "<shiftedimm ";
1767 OS << *getShiftedImmVal();
1768 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1772 OS << "<condcode " << getCondCode() << ">";
1775 OS << "<register " << getReg() << ">";
1777 case k_VectorList: {
1778 OS << "<vectorlist ";
1779 unsigned Reg = getVectorListStart();
1780 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1781 OS << Reg + i << " ";
1786 OS << "<vectorindex " << getVectorIndex() << ">";
1789 OS << "<sysreg: " << getSysReg() << '>';
1792 OS << "'" << getToken() << "'";
1795 OS << "c" << getSysCR();
1798 StringRef Name = getPrefetchName();
1800 OS << "<prfop " << Name << ">";
1802 OS << "<prfop invalid #" << getPrefetch() << ">";
1805 case k_ShiftExtend: {
1806 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1807 << getShiftExtendAmount();
1808 if (!hasShiftExtendAmount())
1816 /// @name Auto-generated Match Functions
1819 static unsigned MatchRegisterName(StringRef Name);
1823 static unsigned matchVectorRegName(StringRef Name) {
1824 return StringSwitch<unsigned>(Name.lower())
1825 .Case("v0", AArch64::Q0)
1826 .Case("v1", AArch64::Q1)
1827 .Case("v2", AArch64::Q2)
1828 .Case("v3", AArch64::Q3)
1829 .Case("v4", AArch64::Q4)
1830 .Case("v5", AArch64::Q5)
1831 .Case("v6", AArch64::Q6)
1832 .Case("v7", AArch64::Q7)
1833 .Case("v8", AArch64::Q8)
1834 .Case("v9", AArch64::Q9)
1835 .Case("v10", AArch64::Q10)
1836 .Case("v11", AArch64::Q11)
1837 .Case("v12", AArch64::Q12)
1838 .Case("v13", AArch64::Q13)
1839 .Case("v14", AArch64::Q14)
1840 .Case("v15", AArch64::Q15)
1841 .Case("v16", AArch64::Q16)
1842 .Case("v17", AArch64::Q17)
1843 .Case("v18", AArch64::Q18)
1844 .Case("v19", AArch64::Q19)
1845 .Case("v20", AArch64::Q20)
1846 .Case("v21", AArch64::Q21)
1847 .Case("v22", AArch64::Q22)
1848 .Case("v23", AArch64::Q23)
1849 .Case("v24", AArch64::Q24)
1850 .Case("v25", AArch64::Q25)
1851 .Case("v26", AArch64::Q26)
1852 .Case("v27", AArch64::Q27)
1853 .Case("v28", AArch64::Q28)
1854 .Case("v29", AArch64::Q29)
1855 .Case("v30", AArch64::Q30)
1856 .Case("v31", AArch64::Q31)
1860 static bool isValidVectorKind(StringRef Name) {
1861 return StringSwitch<bool>(Name.lower())
1871 // Accept the width neutral ones, too, for verbose syntax. If those
1872 // aren't used in the right places, the token operand won't match so
1873 // all will work out.
1881 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1882 char &ElementKind) {
1883 assert(isValidVectorKind(Name));
1885 ElementKind = Name.lower()[Name.size() - 1];
1888 if (Name.size() == 2)
1891 // Parse the lane count
1892 Name = Name.drop_front();
1893 while (isdigit(Name.front())) {
1894 NumElements = 10 * NumElements + (Name.front() - '0');
1895 Name = Name.drop_front();
1899 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1901 StartLoc = getLoc();
1902 RegNo = tryParseRegister();
1903 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1904 return (RegNo == (unsigned)-1);
1907 // Matches a register name or register alias previously defined by '.req'
1908 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1910 unsigned RegNum = isVector ? matchVectorRegName(Name)
1911 : MatchRegisterName(Name);
1914 // Check for aliases registered via .req. Canonicalize to lower case.
1915 // That's more consistent since register names are case insensitive, and
1916 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1917 auto Entry = RegisterReqs.find(Name.lower());
1918 if (Entry == RegisterReqs.end())
1920 // set RegNum if the match is the right kind of register
1921 if (isVector == Entry->getValue().first)
1922 RegNum = Entry->getValue().second;
1927 /// tryParseRegister - Try to parse a register name. The token must be an
1928 /// Identifier when called, and if it is a register name the token is eaten and
1929 /// the register is added to the operand list.
1930 int AArch64AsmParser::tryParseRegister() {
1931 MCAsmParser &Parser = getParser();
1932 const AsmToken &Tok = Parser.getTok();
1933 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1935 std::string lowerCase = Tok.getString().lower();
1936 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1937 // Also handle a few aliases of registers.
1939 RegNum = StringSwitch<unsigned>(lowerCase)
1940 .Case("fp", AArch64::FP)
1941 .Case("lr", AArch64::LR)
1942 .Case("x31", AArch64::XZR)
1943 .Case("w31", AArch64::WZR)
1949 Parser.Lex(); // Eat identifier token.
1953 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1954 /// kind specifier. If it is a register specifier, eat the token and return it.
1955 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1956 MCAsmParser &Parser = getParser();
1957 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1958 TokError("vector register expected");
1962 StringRef Name = Parser.getTok().getString();
1963 // If there is a kind specifier, it's separated from the register name by
1965 size_t Start = 0, Next = Name.find('.');
1966 StringRef Head = Name.slice(Start, Next);
1967 unsigned RegNum = matchRegisterNameAlias(Head, true);
1970 if (Next != StringRef::npos) {
1971 Kind = Name.slice(Next, StringRef::npos);
1972 if (!isValidVectorKind(Kind)) {
1973 TokError("invalid vector kind qualifier");
1977 Parser.Lex(); // Eat the register token.
1982 TokError("vector register expected");
1986 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1987 AArch64AsmParser::OperandMatchResultTy
1988 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1989 MCAsmParser &Parser = getParser();
1992 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1993 Error(S, "Expected cN operand where 0 <= N <= 15");
1994 return MatchOperand_ParseFail;
1997 StringRef Tok = Parser.getTok().getIdentifier();
1998 if (Tok[0] != 'c' && Tok[0] != 'C') {
1999 Error(S, "Expected cN operand where 0 <= N <= 15");
2000 return MatchOperand_ParseFail;
2004 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2005 if (BadNum || CRNum > 15) {
2006 Error(S, "Expected cN operand where 0 <= N <= 15");
2007 return MatchOperand_ParseFail;
2010 Parser.Lex(); // Eat identifier token.
2012 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2013 return MatchOperand_Success;
2016 /// tryParsePrefetch - Try to parse a prefetch operand.
2017 AArch64AsmParser::OperandMatchResultTy
2018 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2019 MCAsmParser &Parser = getParser();
2021 const AsmToken &Tok = Parser.getTok();
2022 // Either an identifier for named values or a 5-bit immediate.
2023 bool Hash = Tok.is(AsmToken::Hash);
2024 if (Hash || Tok.is(AsmToken::Integer)) {
2026 Parser.Lex(); // Eat hash token.
2027 const MCExpr *ImmVal;
2028 if (getParser().parseExpression(ImmVal))
2029 return MatchOperand_ParseFail;
2031 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2033 TokError("immediate value expected for prefetch operand");
2034 return MatchOperand_ParseFail;
2036 unsigned prfop = MCE->getValue();
2038 TokError("prefetch operand out of range, [0,31] expected");
2039 return MatchOperand_ParseFail;
2043 auto Mapper = AArch64PRFM::PRFMMapper();
2045 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2046 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2048 return MatchOperand_Success;
2051 if (Tok.isNot(AsmToken::Identifier)) {
2052 TokError("pre-fetch hint expected");
2053 return MatchOperand_ParseFail;
2057 auto Mapper = AArch64PRFM::PRFMMapper();
2059 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2061 TokError("pre-fetch hint expected");
2062 return MatchOperand_ParseFail;
2065 Parser.Lex(); // Eat identifier token.
2066 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2068 return MatchOperand_Success;
2071 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2073 AArch64AsmParser::OperandMatchResultTy
2074 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2075 MCAsmParser &Parser = getParser();
2079 if (Parser.getTok().is(AsmToken::Hash)) {
2080 Parser.Lex(); // Eat hash token.
2083 if (parseSymbolicImmVal(Expr))
2084 return MatchOperand_ParseFail;
2086 AArch64MCExpr::VariantKind ELFRefKind;
2087 MCSymbolRefExpr::VariantKind DarwinRefKind;
2089 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2090 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2091 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2092 // No modifier was specified at all; this is the syntax for an ELF basic
2093 // ADRP relocation (unfortunately).
2095 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2096 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2097 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2099 Error(S, "gotpage label reference not allowed an addend");
2100 return MatchOperand_ParseFail;
2101 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2102 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2103 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2104 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2105 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2106 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2107 // The operand must be an @page or @gotpage qualified symbolref.
2108 Error(S, "page or gotpage label reference expected");
2109 return MatchOperand_ParseFail;
2113 // We have either a label reference possibly with addend or an immediate. The
2114 // addend is a raw value here. The linker will adjust it to only reference the
2116 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2117 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2119 return MatchOperand_Success;
2122 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2124 AArch64AsmParser::OperandMatchResultTy
2125 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2126 MCAsmParser &Parser = getParser();
2130 if (Parser.getTok().is(AsmToken::Hash)) {
2131 Parser.Lex(); // Eat hash token.
2134 if (getParser().parseExpression(Expr))
2135 return MatchOperand_ParseFail;
2137 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2138 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2140 return MatchOperand_Success;
2143 /// tryParseFPImm - A floating point immediate expression operand.
2144 AArch64AsmParser::OperandMatchResultTy
2145 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2146 MCAsmParser &Parser = getParser();
2150 if (Parser.getTok().is(AsmToken::Hash)) {
2151 Parser.Lex(); // Eat '#'
2155 // Handle negation, as that still comes through as a separate token.
2156 bool isNegative = false;
2157 if (Parser.getTok().is(AsmToken::Minus)) {
2161 const AsmToken &Tok = Parser.getTok();
2162 if (Tok.is(AsmToken::Real)) {
2163 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2165 RealVal.changeSign();
2167 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2168 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2169 Parser.Lex(); // Eat the token.
2170 // Check for out of range values. As an exception, we let Zero through,
2171 // as we handle that special case in post-processing before matching in
2172 // order to use the zero register for it.
2173 if (Val == -1 && !RealVal.isPosZero()) {
2174 TokError("expected compatible register or floating-point constant");
2175 return MatchOperand_ParseFail;
2177 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2178 return MatchOperand_Success;
2180 if (Tok.is(AsmToken::Integer)) {
2182 if (!isNegative && Tok.getString().startswith("0x")) {
2183 Val = Tok.getIntVal();
2184 if (Val > 255 || Val < 0) {
2185 TokError("encoded floating point value out of range");
2186 return MatchOperand_ParseFail;
2189 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2190 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2191 // If we had a '-' in front, toggle the sign bit.
2192 IntVal ^= (uint64_t)isNegative << 63;
2193 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2195 Parser.Lex(); // Eat the token.
2196 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2197 return MatchOperand_Success;
2201 return MatchOperand_NoMatch;
2203 TokError("invalid floating point immediate");
2204 return MatchOperand_ParseFail;
2207 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2208 AArch64AsmParser::OperandMatchResultTy
2209 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2210 MCAsmParser &Parser = getParser();
2213 if (Parser.getTok().is(AsmToken::Hash))
2214 Parser.Lex(); // Eat '#'
2215 else if (Parser.getTok().isNot(AsmToken::Integer))
2216 // Operand should start from # or should be integer, emit error otherwise.
2217 return MatchOperand_NoMatch;
2220 if (parseSymbolicImmVal(Imm))
2221 return MatchOperand_ParseFail;
2222 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2223 uint64_t ShiftAmount = 0;
2224 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2226 int64_t Val = MCE->getValue();
2227 if (Val > 0xfff && (Val & 0xfff) == 0) {
2228 Imm = MCConstantExpr::create(Val >> 12, getContext());
2232 SMLoc E = Parser.getTok().getLoc();
2233 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2235 return MatchOperand_Success;
2241 // The optional operand must be "lsl #N" where N is non-negative.
2242 if (!Parser.getTok().is(AsmToken::Identifier) ||
2243 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2244 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2245 return MatchOperand_ParseFail;
2251 if (Parser.getTok().is(AsmToken::Hash)) {
2255 if (Parser.getTok().isNot(AsmToken::Integer)) {
2256 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2257 return MatchOperand_ParseFail;
2260 int64_t ShiftAmount = Parser.getTok().getIntVal();
2262 if (ShiftAmount < 0) {
2263 Error(Parser.getTok().getLoc(), "positive shift amount required");
2264 return MatchOperand_ParseFail;
2266 Parser.Lex(); // Eat the number
2268 SMLoc E = Parser.getTok().getLoc();
2269 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2270 S, E, getContext()));
2271 return MatchOperand_Success;
2274 /// parseCondCodeString - Parse a Condition Code string.
2275 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2276 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2277 .Case("eq", AArch64CC::EQ)
2278 .Case("ne", AArch64CC::NE)
2279 .Case("cs", AArch64CC::HS)
2280 .Case("hs", AArch64CC::HS)
2281 .Case("cc", AArch64CC::LO)
2282 .Case("lo", AArch64CC::LO)
2283 .Case("mi", AArch64CC::MI)
2284 .Case("pl", AArch64CC::PL)
2285 .Case("vs", AArch64CC::VS)
2286 .Case("vc", AArch64CC::VC)
2287 .Case("hi", AArch64CC::HI)
2288 .Case("ls", AArch64CC::LS)
2289 .Case("ge", AArch64CC::GE)
2290 .Case("lt", AArch64CC::LT)
2291 .Case("gt", AArch64CC::GT)
2292 .Case("le", AArch64CC::LE)
2293 .Case("al", AArch64CC::AL)
2294 .Case("nv", AArch64CC::NV)
2295 .Default(AArch64CC::Invalid);
2299 /// parseCondCode - Parse a Condition Code operand.
2300 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2301 bool invertCondCode) {
2302 MCAsmParser &Parser = getParser();
2304 const AsmToken &Tok = Parser.getTok();
2305 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2307 StringRef Cond = Tok.getString();
2308 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2309 if (CC == AArch64CC::Invalid)
2310 return TokError("invalid condition code");
2311 Parser.Lex(); // Eat identifier token.
2313 if (invertCondCode) {
2314 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2315 return TokError("condition codes AL and NV are invalid for this instruction");
2316 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2320 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2324 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2325 /// them if present.
2326 AArch64AsmParser::OperandMatchResultTy
2327 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2328 MCAsmParser &Parser = getParser();
2329 const AsmToken &Tok = Parser.getTok();
2330 std::string LowerID = Tok.getString().lower();
2331 AArch64_AM::ShiftExtendType ShOp =
2332 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2333 .Case("lsl", AArch64_AM::LSL)
2334 .Case("lsr", AArch64_AM::LSR)
2335 .Case("asr", AArch64_AM::ASR)
2336 .Case("ror", AArch64_AM::ROR)
2337 .Case("msl", AArch64_AM::MSL)
2338 .Case("uxtb", AArch64_AM::UXTB)
2339 .Case("uxth", AArch64_AM::UXTH)
2340 .Case("uxtw", AArch64_AM::UXTW)
2341 .Case("uxtx", AArch64_AM::UXTX)
2342 .Case("sxtb", AArch64_AM::SXTB)
2343 .Case("sxth", AArch64_AM::SXTH)
2344 .Case("sxtw", AArch64_AM::SXTW)
2345 .Case("sxtx", AArch64_AM::SXTX)
2346 .Default(AArch64_AM::InvalidShiftExtend);
2348 if (ShOp == AArch64_AM::InvalidShiftExtend)
2349 return MatchOperand_NoMatch;
2351 SMLoc S = Tok.getLoc();
2354 bool Hash = getLexer().is(AsmToken::Hash);
2355 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2356 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2357 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2358 ShOp == AArch64_AM::MSL) {
2359 // We expect a number here.
2360 TokError("expected #imm after shift specifier");
2361 return MatchOperand_ParseFail;
2364 // "extend" type operatoins don't need an immediate, #0 is implicit.
2365 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2367 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2368 return MatchOperand_Success;
2372 Parser.Lex(); // Eat the '#'.
2374 // Make sure we do actually have a number or a parenthesized expression.
2375 SMLoc E = Parser.getTok().getLoc();
2376 if (!Parser.getTok().is(AsmToken::Integer) &&
2377 !Parser.getTok().is(AsmToken::LParen)) {
2378 Error(E, "expected integer shift amount");
2379 return MatchOperand_ParseFail;
2382 const MCExpr *ImmVal;
2383 if (getParser().parseExpression(ImmVal))
2384 return MatchOperand_ParseFail;
2386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2388 Error(E, "expected constant '#imm' after shift specifier");
2389 return MatchOperand_ParseFail;
2392 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2393 Operands.push_back(AArch64Operand::CreateShiftExtend(
2394 ShOp, MCE->getValue(), true, S, E, getContext()));
2395 return MatchOperand_Success;
2398 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2399 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2400 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2401 OperandVector &Operands) {
2402 if (Name.find('.') != StringRef::npos)
2403 return TokError("invalid operand");
2407 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2409 MCAsmParser &Parser = getParser();
2410 const AsmToken &Tok = Parser.getTok();
2411 StringRef Op = Tok.getString();
2412 SMLoc S = Tok.getLoc();
2414 const MCExpr *Expr = nullptr;
2416 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2418 Expr = MCConstantExpr::create(op1, getContext()); \
2419 Operands.push_back( \
2420 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2421 Operands.push_back( \
2422 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2423 Operands.push_back( \
2424 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2425 Expr = MCConstantExpr::create(op2, getContext()); \
2426 Operands.push_back( \
2427 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2430 if (Mnemonic == "ic") {
2431 if (!Op.compare_lower("ialluis")) {
2432 // SYS #0, C7, C1, #0
2433 SYS_ALIAS(0, 7, 1, 0);
2434 } else if (!Op.compare_lower("iallu")) {
2435 // SYS #0, C7, C5, #0
2436 SYS_ALIAS(0, 7, 5, 0);
2437 } else if (!Op.compare_lower("ivau")) {
2438 // SYS #3, C7, C5, #1
2439 SYS_ALIAS(3, 7, 5, 1);
2441 return TokError("invalid operand for IC instruction");
2443 } else if (Mnemonic == "dc") {
2444 if (!Op.compare_lower("zva")) {
2445 // SYS #3, C7, C4, #1
2446 SYS_ALIAS(3, 7, 4, 1);
2447 } else if (!Op.compare_lower("ivac")) {
2448 // SYS #3, C7, C6, #1
2449 SYS_ALIAS(0, 7, 6, 1);
2450 } else if (!Op.compare_lower("isw")) {
2451 // SYS #0, C7, C6, #2
2452 SYS_ALIAS(0, 7, 6, 2);
2453 } else if (!Op.compare_lower("cvac")) {
2454 // SYS #3, C7, C10, #1
2455 SYS_ALIAS(3, 7, 10, 1);
2456 } else if (!Op.compare_lower("csw")) {
2457 // SYS #0, C7, C10, #2
2458 SYS_ALIAS(0, 7, 10, 2);
2459 } else if (!Op.compare_lower("cvau")) {
2460 // SYS #3, C7, C11, #1
2461 SYS_ALIAS(3, 7, 11, 1);
2462 } else if (!Op.compare_lower("civac")) {
2463 // SYS #3, C7, C14, #1
2464 SYS_ALIAS(3, 7, 14, 1);
2465 } else if (!Op.compare_lower("cisw")) {
2466 // SYS #0, C7, C14, #2
2467 SYS_ALIAS(0, 7, 14, 2);
2468 } else if (!Op.compare_lower("cvap")) {
2469 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2470 // SYS #3, C7, C12, #1
2471 SYS_ALIAS(3, 7, 12, 1);
2473 return TokError("DC CVAP requires ARMv8.2a");
2476 return TokError("invalid operand for DC instruction");
2478 } else if (Mnemonic == "at") {
2479 if (!Op.compare_lower("s1e1r")) {
2480 // SYS #0, C7, C8, #0
2481 SYS_ALIAS(0, 7, 8, 0);
2482 } else if (!Op.compare_lower("s1e2r")) {
2483 // SYS #4, C7, C8, #0
2484 SYS_ALIAS(4, 7, 8, 0);
2485 } else if (!Op.compare_lower("s1e3r")) {
2486 // SYS #6, C7, C8, #0
2487 SYS_ALIAS(6, 7, 8, 0);
2488 } else if (!Op.compare_lower("s1e1w")) {
2489 // SYS #0, C7, C8, #1
2490 SYS_ALIAS(0, 7, 8, 1);
2491 } else if (!Op.compare_lower("s1e2w")) {
2492 // SYS #4, C7, C8, #1
2493 SYS_ALIAS(4, 7, 8, 1);
2494 } else if (!Op.compare_lower("s1e3w")) {
2495 // SYS #6, C7, C8, #1
2496 SYS_ALIAS(6, 7, 8, 1);
2497 } else if (!Op.compare_lower("s1e0r")) {
2498 // SYS #0, C7, C8, #3
2499 SYS_ALIAS(0, 7, 8, 2);
2500 } else if (!Op.compare_lower("s1e0w")) {
2501 // SYS #0, C7, C8, #3
2502 SYS_ALIAS(0, 7, 8, 3);
2503 } else if (!Op.compare_lower("s12e1r")) {
2504 // SYS #4, C7, C8, #4
2505 SYS_ALIAS(4, 7, 8, 4);
2506 } else if (!Op.compare_lower("s12e1w")) {
2507 // SYS #4, C7, C8, #5
2508 SYS_ALIAS(4, 7, 8, 5);
2509 } else if (!Op.compare_lower("s12e0r")) {
2510 // SYS #4, C7, C8, #6
2511 SYS_ALIAS(4, 7, 8, 6);
2512 } else if (!Op.compare_lower("s12e0w")) {
2513 // SYS #4, C7, C8, #7
2514 SYS_ALIAS(4, 7, 8, 7);
2516 return TokError("invalid operand for AT instruction");
2518 } else if (Mnemonic == "tlbi") {
2519 if (!Op.compare_lower("vmalle1is")) {
2520 // SYS #0, C8, C3, #0
2521 SYS_ALIAS(0, 8, 3, 0);
2522 } else if (!Op.compare_lower("alle2is")) {
2523 // SYS #4, C8, C3, #0
2524 SYS_ALIAS(4, 8, 3, 0);
2525 } else if (!Op.compare_lower("alle3is")) {
2526 // SYS #6, C8, C3, #0
2527 SYS_ALIAS(6, 8, 3, 0);
2528 } else if (!Op.compare_lower("vae1is")) {
2529 // SYS #0, C8, C3, #1
2530 SYS_ALIAS(0, 8, 3, 1);
2531 } else if (!Op.compare_lower("vae2is")) {
2532 // SYS #4, C8, C3, #1
2533 SYS_ALIAS(4, 8, 3, 1);
2534 } else if (!Op.compare_lower("vae3is")) {
2535 // SYS #6, C8, C3, #1
2536 SYS_ALIAS(6, 8, 3, 1);
2537 } else if (!Op.compare_lower("aside1is")) {
2538 // SYS #0, C8, C3, #2
2539 SYS_ALIAS(0, 8, 3, 2);
2540 } else if (!Op.compare_lower("vaae1is")) {
2541 // SYS #0, C8, C3, #3
2542 SYS_ALIAS(0, 8, 3, 3);
2543 } else if (!Op.compare_lower("alle1is")) {
2544 // SYS #4, C8, C3, #4
2545 SYS_ALIAS(4, 8, 3, 4);
2546 } else if (!Op.compare_lower("vale1is")) {
2547 // SYS #0, C8, C3, #5
2548 SYS_ALIAS(0, 8, 3, 5);
2549 } else if (!Op.compare_lower("vaale1is")) {
2550 // SYS #0, C8, C3, #7
2551 SYS_ALIAS(0, 8, 3, 7);
2552 } else if (!Op.compare_lower("vmalle1")) {
2553 // SYS #0, C8, C7, #0
2554 SYS_ALIAS(0, 8, 7, 0);
2555 } else if (!Op.compare_lower("alle2")) {
2556 // SYS #4, C8, C7, #0
2557 SYS_ALIAS(4, 8, 7, 0);
2558 } else if (!Op.compare_lower("vale2is")) {
2559 // SYS #4, C8, C3, #5
2560 SYS_ALIAS(4, 8, 3, 5);
2561 } else if (!Op.compare_lower("vale3is")) {
2562 // SYS #6, C8, C3, #5
2563 SYS_ALIAS(6, 8, 3, 5);
2564 } else if (!Op.compare_lower("alle3")) {
2565 // SYS #6, C8, C7, #0
2566 SYS_ALIAS(6, 8, 7, 0);
2567 } else if (!Op.compare_lower("vae1")) {
2568 // SYS #0, C8, C7, #1
2569 SYS_ALIAS(0, 8, 7, 1);
2570 } else if (!Op.compare_lower("vae2")) {
2571 // SYS #4, C8, C7, #1
2572 SYS_ALIAS(4, 8, 7, 1);
2573 } else if (!Op.compare_lower("vae3")) {
2574 // SYS #6, C8, C7, #1
2575 SYS_ALIAS(6, 8, 7, 1);
2576 } else if (!Op.compare_lower("aside1")) {
2577 // SYS #0, C8, C7, #2
2578 SYS_ALIAS(0, 8, 7, 2);
2579 } else if (!Op.compare_lower("vaae1")) {
2580 // SYS #0, C8, C7, #3
2581 SYS_ALIAS(0, 8, 7, 3);
2582 } else if (!Op.compare_lower("alle1")) {
2583 // SYS #4, C8, C7, #4
2584 SYS_ALIAS(4, 8, 7, 4);
2585 } else if (!Op.compare_lower("vale1")) {
2586 // SYS #0, C8, C7, #5
2587 SYS_ALIAS(0, 8, 7, 5);
2588 } else if (!Op.compare_lower("vale2")) {
2589 // SYS #4, C8, C7, #5
2590 SYS_ALIAS(4, 8, 7, 5);
2591 } else if (!Op.compare_lower("vale3")) {
2592 // SYS #6, C8, C7, #5
2593 SYS_ALIAS(6, 8, 7, 5);
2594 } else if (!Op.compare_lower("vaale1")) {
2595 // SYS #0, C8, C7, #7
2596 SYS_ALIAS(0, 8, 7, 7);
2597 } else if (!Op.compare_lower("ipas2e1")) {
2598 // SYS #4, C8, C4, #1
2599 SYS_ALIAS(4, 8, 4, 1);
2600 } else if (!Op.compare_lower("ipas2le1")) {
2601 // SYS #4, C8, C4, #5
2602 SYS_ALIAS(4, 8, 4, 5);
2603 } else if (!Op.compare_lower("ipas2e1is")) {
2604 // SYS #4, C8, C4, #1
2605 SYS_ALIAS(4, 8, 0, 1);
2606 } else if (!Op.compare_lower("ipas2le1is")) {
2607 // SYS #4, C8, C4, #5
2608 SYS_ALIAS(4, 8, 0, 5);
2609 } else if (!Op.compare_lower("vmalls12e1")) {
2610 // SYS #4, C8, C7, #6
2611 SYS_ALIAS(4, 8, 7, 6);
2612 } else if (!Op.compare_lower("vmalls12e1is")) {
2613 // SYS #4, C8, C3, #6
2614 SYS_ALIAS(4, 8, 3, 6);
2616 return TokError("invalid operand for TLBI instruction");
2622 Parser.Lex(); // Eat operand.
2624 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2625 bool HasRegister = false;
2627 // Check for the optional register operand.
2628 if (getLexer().is(AsmToken::Comma)) {
2629 Parser.Lex(); // Eat comma.
2631 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2632 return TokError("expected register operand");
2637 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2638 Parser.eatToEndOfStatement();
2639 return TokError("unexpected token in argument list");
2642 if (ExpectRegister && !HasRegister) {
2643 return TokError("specified " + Mnemonic + " op requires a register");
2645 else if (!ExpectRegister && HasRegister) {
2646 return TokError("specified " + Mnemonic + " op does not use a register");
2649 Parser.Lex(); // Consume the EndOfStatement
2653 AArch64AsmParser::OperandMatchResultTy
2654 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2655 MCAsmParser &Parser = getParser();
2656 const AsmToken &Tok = Parser.getTok();
2658 // Can be either a #imm style literal or an option name
2659 bool Hash = Tok.is(AsmToken::Hash);
2660 if (Hash || Tok.is(AsmToken::Integer)) {
2661 // Immediate operand.
2663 Parser.Lex(); // Eat the '#'
2664 const MCExpr *ImmVal;
2665 SMLoc ExprLoc = getLoc();
2666 if (getParser().parseExpression(ImmVal))
2667 return MatchOperand_ParseFail;
2668 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2670 Error(ExprLoc, "immediate value expected for barrier operand");
2671 return MatchOperand_ParseFail;
2673 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2674 Error(ExprLoc, "barrier operand out of range");
2675 return MatchOperand_ParseFail;
2678 auto Mapper = AArch64DB::DBarrierMapper();
2680 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2681 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2682 ExprLoc, getContext()));
2683 return MatchOperand_Success;
2686 if (Tok.isNot(AsmToken::Identifier)) {
2687 TokError("invalid operand for instruction");
2688 return MatchOperand_ParseFail;
2692 auto Mapper = AArch64DB::DBarrierMapper();
2694 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2696 TokError("invalid barrier option name");
2697 return MatchOperand_ParseFail;
2700 // The only valid named option for ISB is 'sy'
2701 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2702 TokError("'sy' or #imm operand expected");
2703 return MatchOperand_ParseFail;
2706 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2707 getLoc(), getContext()));
2708 Parser.Lex(); // Consume the option
2710 return MatchOperand_Success;
2713 AArch64AsmParser::OperandMatchResultTy
2714 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2715 MCAsmParser &Parser = getParser();
2716 const AsmToken &Tok = Parser.getTok();
2718 if (Tok.isNot(AsmToken::Identifier))
2719 return MatchOperand_NoMatch;
2722 auto MRSMapper = AArch64SysReg::MRSMapper();
2723 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2724 getSTI().getFeatureBits(), IsKnown);
2725 assert(IsKnown == (MRSReg != -1U) &&
2726 "register should be -1 if and only if it's unknown");
2728 auto MSRMapper = AArch64SysReg::MSRMapper();
2729 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2730 getSTI().getFeatureBits(), IsKnown);
2731 assert(IsKnown == (MSRReg != -1U) &&
2732 "register should be -1 if and only if it's unknown");
2734 auto PStateMapper = AArch64PState::PStateMapper();
2735 uint32_t PStateField =
2736 PStateMapper.fromString(Tok.getString(),
2737 getSTI().getFeatureBits(), IsKnown);
2738 assert(IsKnown == (PStateField != -1U) &&
2739 "register should be -1 if and only if it's unknown");
2741 Operands.push_back(AArch64Operand::CreateSysReg(
2742 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2743 Parser.Lex(); // Eat identifier
2745 return MatchOperand_Success;
2748 /// tryParseVectorRegister - Parse a vector register operand.
2749 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2750 MCAsmParser &Parser = getParser();
2751 if (Parser.getTok().isNot(AsmToken::Identifier))
2755 // Check for a vector register specifier first.
2757 int64_t Reg = tryMatchVectorRegister(Kind, false);
2761 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2762 // If there was an explicit qualifier, that goes on as a literal text
2766 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2768 // If there is an index specifier following the register, parse that too.
2769 if (Parser.getTok().is(AsmToken::LBrac)) {
2770 SMLoc SIdx = getLoc();
2771 Parser.Lex(); // Eat left bracket token.
2773 const MCExpr *ImmVal;
2774 if (getParser().parseExpression(ImmVal))
2776 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2778 TokError("immediate value expected for vector index");
2783 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2784 Error(E, "']' expected");
2788 Parser.Lex(); // Eat right bracket token.
2790 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2797 /// parseRegister - Parse a non-vector register operand.
2798 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2799 MCAsmParser &Parser = getParser();
2801 // Try for a vector register.
2802 if (!tryParseVectorRegister(Operands))
2805 // Try for a scalar register.
2806 int64_t Reg = tryParseRegister();
2810 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2812 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2813 // as a string token in the instruction itself.
2814 if (getLexer().getKind() == AsmToken::LBrac) {
2815 SMLoc LBracS = getLoc();
2817 const AsmToken &Tok = Parser.getTok();
2818 if (Tok.is(AsmToken::Integer)) {
2819 SMLoc IntS = getLoc();
2820 int64_t Val = Tok.getIntVal();
2823 if (getLexer().getKind() == AsmToken::RBrac) {
2824 SMLoc RBracS = getLoc();
2827 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2829 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2831 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2841 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2842 MCAsmParser &Parser = getParser();
2843 bool HasELFModifier = false;
2844 AArch64MCExpr::VariantKind RefKind;
2846 if (Parser.getTok().is(AsmToken::Colon)) {
2847 Parser.Lex(); // Eat ':"
2848 HasELFModifier = true;
2850 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2851 Error(Parser.getTok().getLoc(),
2852 "expect relocation specifier in operand after ':'");
2856 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2857 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2858 .Case("lo12", AArch64MCExpr::VK_LO12)
2859 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2860 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2861 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2862 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2863 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2864 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2865 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2866 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2867 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2868 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2869 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2870 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2871 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2872 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2873 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2874 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2875 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2876 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2877 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2878 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2879 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2880 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2881 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2882 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2883 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2884 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2885 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2886 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2887 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2888 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2889 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2890 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2891 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2892 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2893 .Default(AArch64MCExpr::VK_INVALID);
2895 if (RefKind == AArch64MCExpr::VK_INVALID) {
2896 Error(Parser.getTok().getLoc(),
2897 "expect relocation specifier in operand after ':'");
2901 Parser.Lex(); // Eat identifier
2903 if (Parser.getTok().isNot(AsmToken::Colon)) {
2904 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2907 Parser.Lex(); // Eat ':'
2910 if (getParser().parseExpression(ImmVal))
2914 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2919 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2920 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2921 MCAsmParser &Parser = getParser();
2922 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2924 Parser.Lex(); // Eat left bracket token.
2926 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2929 int64_t PrevReg = FirstReg;
2932 if (Parser.getTok().is(AsmToken::Minus)) {
2933 Parser.Lex(); // Eat the minus.
2935 SMLoc Loc = getLoc();
2937 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2940 // Any Kind suffices must match on all regs in the list.
2941 if (Kind != NextKind)
2942 return Error(Loc, "mismatched register size suffix");
2944 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2946 if (Space == 0 || Space > 3) {
2947 return Error(Loc, "invalid number of vectors");
2953 while (Parser.getTok().is(AsmToken::Comma)) {
2954 Parser.Lex(); // Eat the comma token.
2956 SMLoc Loc = getLoc();
2958 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2961 // Any Kind suffices must match on all regs in the list.
2962 if (Kind != NextKind)
2963 return Error(Loc, "mismatched register size suffix");
2965 // Registers must be incremental (with wraparound at 31)
2966 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2967 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2968 return Error(Loc, "registers must be sequential");
2975 if (Parser.getTok().isNot(AsmToken::RCurly))
2976 return Error(getLoc(), "'}' expected");
2977 Parser.Lex(); // Eat the '}' token.
2980 return Error(S, "invalid number of vectors");
2982 unsigned NumElements = 0;
2983 char ElementKind = 0;
2985 parseValidVectorKind(Kind, NumElements, ElementKind);
2987 Operands.push_back(AArch64Operand::CreateVectorList(
2988 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2990 // If there is an index specifier following the list, parse that too.
2991 if (Parser.getTok().is(AsmToken::LBrac)) {
2992 SMLoc SIdx = getLoc();
2993 Parser.Lex(); // Eat left bracket token.
2995 const MCExpr *ImmVal;
2996 if (getParser().parseExpression(ImmVal))
2998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3000 TokError("immediate value expected for vector index");
3005 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3006 Error(E, "']' expected");
3010 Parser.Lex(); // Eat right bracket token.
3012 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3018 AArch64AsmParser::OperandMatchResultTy
3019 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3020 MCAsmParser &Parser = getParser();
3021 const AsmToken &Tok = Parser.getTok();
3022 if (!Tok.is(AsmToken::Identifier))
3023 return MatchOperand_NoMatch;
3025 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3027 MCContext &Ctx = getContext();
3028 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3029 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3030 return MatchOperand_NoMatch;
3033 Parser.Lex(); // Eat register
3035 if (Parser.getTok().isNot(AsmToken::Comma)) {
3037 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3038 return MatchOperand_Success;
3040 Parser.Lex(); // Eat comma.
3042 if (Parser.getTok().is(AsmToken::Hash))
3043 Parser.Lex(); // Eat hash
3045 if (Parser.getTok().isNot(AsmToken::Integer)) {
3046 Error(getLoc(), "index must be absent or #0");
3047 return MatchOperand_ParseFail;
3050 const MCExpr *ImmVal;
3051 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3052 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3053 Error(getLoc(), "index must be absent or #0");
3054 return MatchOperand_ParseFail;
3058 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3059 return MatchOperand_Success;
3062 /// parseOperand - Parse a arm instruction operand. For now this parses the
3063 /// operand regardless of the mnemonic.
3064 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3065 bool invertCondCode) {
3066 MCAsmParser &Parser = getParser();
3067 // Check if the current operand has a custom associated parser, if so, try to
3068 // custom parse the operand, or fallback to the general approach.
3069 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3070 if (ResTy == MatchOperand_Success)
3072 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3073 // there was a match, but an error occurred, in which case, just return that
3074 // the operand parsing failed.
3075 if (ResTy == MatchOperand_ParseFail)
3078 // Nothing custom, so do general case parsing.
3080 switch (getLexer().getKind()) {
3084 if (parseSymbolicImmVal(Expr))
3085 return Error(S, "invalid operand");
3087 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3088 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3091 case AsmToken::LBrac: {
3092 SMLoc Loc = Parser.getTok().getLoc();
3093 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3095 Parser.Lex(); // Eat '['
3097 // There's no comma after a '[', so we can parse the next operand
3099 return parseOperand(Operands, false, false);
3101 case AsmToken::LCurly:
3102 return parseVectorList(Operands);
3103 case AsmToken::Identifier: {
3104 // If we're expecting a Condition Code operand, then just parse that.
3106 return parseCondCode(Operands, invertCondCode);
3108 // If it's a register name, parse it.
3109 if (!parseRegister(Operands))
3112 // This could be an optional "shift" or "extend" operand.
3113 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3114 // We can only continue if no tokens were eaten.
3115 if (GotShift != MatchOperand_NoMatch)
3118 // This was not a register so parse other operands that start with an
3119 // identifier (like labels) as expressions and create them as immediates.
3120 const MCExpr *IdVal;
3122 if (getParser().parseExpression(IdVal))
3125 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3126 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3129 case AsmToken::Integer:
3130 case AsmToken::Real:
3131 case AsmToken::Hash: {
3132 // #42 -> immediate.
3134 if (getLexer().is(AsmToken::Hash))
3137 // Parse a negative sign
3138 bool isNegative = false;
3139 if (Parser.getTok().is(AsmToken::Minus)) {
3141 // We need to consume this token only when we have a Real, otherwise
3142 // we let parseSymbolicImmVal take care of it
3143 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3147 // The only Real that should come through here is a literal #0.0 for
3148 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3149 // so convert the value.
3150 const AsmToken &Tok = Parser.getTok();
3151 if (Tok.is(AsmToken::Real)) {
3152 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3153 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3154 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3155 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3156 Mnemonic != "fcmlt")
3157 return TokError("unexpected floating point literal");
3158 else if (IntVal != 0 || isNegative)
3159 return TokError("expected floating-point constant #0.0");
3160 Parser.Lex(); // Eat the token.
3163 AArch64Operand::CreateToken("#0", false, S, getContext()));
3165 AArch64Operand::CreateToken(".0", false, S, getContext()));
3169 const MCExpr *ImmVal;
3170 if (parseSymbolicImmVal(ImmVal))
3173 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3174 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3177 case AsmToken::Equal: {
3178 SMLoc Loc = Parser.getTok().getLoc();
3179 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3180 return Error(Loc, "unexpected token in operand");
3181 Parser.Lex(); // Eat '='
3182 const MCExpr *SubExprVal;
3183 if (getParser().parseExpression(SubExprVal))
3186 if (Operands.size() < 2 ||
3187 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3188 return Error(Loc, "Only valid when first operand is register");
3191 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3192 Operands[1]->getReg());
3194 MCContext& Ctx = getContext();
3195 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3196 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3197 if (isa<MCConstantExpr>(SubExprVal)) {
3198 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3199 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3200 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3204 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3205 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3206 Operands.push_back(AArch64Operand::CreateImm(
3207 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3209 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3210 ShiftAmt, true, S, E, Ctx));
3213 APInt Simm = APInt(64, Imm << ShiftAmt);
3214 // check if the immediate is an unsigned or signed 32-bit int for W regs
3215 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3216 return Error(Loc, "Immediate too large for register");
3218 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3219 const MCExpr *CPLoc =
3220 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3221 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3227 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3229 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3230 StringRef Name, SMLoc NameLoc,
3231 OperandVector &Operands) {
3232 MCAsmParser &Parser = getParser();
3233 Name = StringSwitch<StringRef>(Name.lower())
3234 .Case("beq", "b.eq")
3235 .Case("bne", "b.ne")
3236 .Case("bhs", "b.hs")
3237 .Case("bcs", "b.cs")
3238 .Case("blo", "b.lo")
3239 .Case("bcc", "b.cc")
3240 .Case("bmi", "b.mi")
3241 .Case("bpl", "b.pl")
3242 .Case("bvs", "b.vs")
3243 .Case("bvc", "b.vc")
3244 .Case("bhi", "b.hi")
3245 .Case("bls", "b.ls")
3246 .Case("bge", "b.ge")
3247 .Case("blt", "b.lt")
3248 .Case("bgt", "b.gt")
3249 .Case("ble", "b.le")
3250 .Case("bal", "b.al")
3251 .Case("bnv", "b.nv")
3254 // First check for the AArch64-specific .req directive.
3255 if (Parser.getTok().is(AsmToken::Identifier) &&
3256 Parser.getTok().getIdentifier() == ".req") {
3257 parseDirectiveReq(Name, NameLoc);
3258 // We always return 'error' for this, as we're done with this
3259 // statement and don't need to match the 'instruction."
3263 // Create the leading tokens for the mnemonic, split by '.' characters.
3264 size_t Start = 0, Next = Name.find('.');
3265 StringRef Head = Name.slice(Start, Next);
3267 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3268 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3269 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3270 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3271 Parser.eatToEndOfStatement();
3276 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3279 // Handle condition codes for a branch mnemonic
3280 if (Head == "b" && Next != StringRef::npos) {
3282 Next = Name.find('.', Start + 1);
3283 Head = Name.slice(Start + 1, Next);
3285 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3286 (Head.data() - Name.data()));
3287 AArch64CC::CondCode CC = parseCondCodeString(Head);
3288 if (CC == AArch64CC::Invalid)
3289 return Error(SuffixLoc, "invalid condition code");
3291 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3293 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3296 // Add the remaining tokens in the mnemonic.
3297 while (Next != StringRef::npos) {
3299 Next = Name.find('.', Start + 1);
3300 Head = Name.slice(Start, Next);
3301 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3302 (Head.data() - Name.data()) + 1);
3304 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3307 // Conditional compare instructions have a Condition Code operand, which needs
3308 // to be parsed and an immediate operand created.
3309 bool condCodeFourthOperand =
3310 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3311 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3312 Head == "csinc" || Head == "csinv" || Head == "csneg");
3314 // These instructions are aliases to some of the conditional select
3315 // instructions. However, the condition code is inverted in the aliased
3318 // FIXME: Is this the correct way to handle these? Or should the parser
3319 // generate the aliased instructions directly?
3320 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3321 bool condCodeThirdOperand =
3322 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3324 // Read the remaining operands.
3325 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3326 // Read the first operand.
3327 if (parseOperand(Operands, false, false)) {
3328 Parser.eatToEndOfStatement();
3333 while (getLexer().is(AsmToken::Comma)) {
3334 Parser.Lex(); // Eat the comma.
3336 // Parse and remember the operand.
3337 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3338 (N == 3 && condCodeThirdOperand) ||
3339 (N == 2 && condCodeSecondOperand),
3340 condCodeSecondOperand || condCodeThirdOperand)) {
3341 Parser.eatToEndOfStatement();
3345 // After successfully parsing some operands there are two special cases to
3346 // consider (i.e. notional operands not separated by commas). Both are due
3347 // to memory specifiers:
3348 // + An RBrac will end an address for load/store/prefetch
3349 // + An '!' will indicate a pre-indexed operation.
3351 // It's someone else's responsibility to make sure these tokens are sane
3352 // in the given context!
3353 if (Parser.getTok().is(AsmToken::RBrac)) {
3354 SMLoc Loc = Parser.getTok().getLoc();
3355 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3360 if (Parser.getTok().is(AsmToken::Exclaim)) {
3361 SMLoc Loc = Parser.getTok().getLoc();
3362 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3371 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3372 SMLoc Loc = Parser.getTok().getLoc();
3373 Parser.eatToEndOfStatement();
3374 return Error(Loc, "unexpected token in argument list");
3377 Parser.Lex(); // Consume the EndOfStatement
3381 // FIXME: This entire function is a giant hack to provide us with decent
3382 // operand range validation/diagnostics until TableGen/MC can be extended
3383 // to support autogeneration of this kind of validation.
3384 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3385 SmallVectorImpl<SMLoc> &Loc) {
3386 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3387 // Check for indexed addressing modes w/ the base register being the
3388 // same as a destination/source register or pair load where
3389 // the Rt == Rt2. All of those are undefined behaviour.
3390 switch (Inst.getOpcode()) {
3391 case AArch64::LDPSWpre:
3392 case AArch64::LDPWpost:
3393 case AArch64::LDPWpre:
3394 case AArch64::LDPXpost:
3395 case AArch64::LDPXpre: {
3396 unsigned Rt = Inst.getOperand(1).getReg();
3397 unsigned Rt2 = Inst.getOperand(2).getReg();
3398 unsigned Rn = Inst.getOperand(3).getReg();
3399 if (RI->isSubRegisterEq(Rn, Rt))
3400 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3401 "is also a destination");
3402 if (RI->isSubRegisterEq(Rn, Rt2))
3403 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3404 "is also a destination");
3407 case AArch64::LDPDi:
3408 case AArch64::LDPQi:
3409 case AArch64::LDPSi:
3410 case AArch64::LDPSWi:
3411 case AArch64::LDPWi:
3412 case AArch64::LDPXi: {
3413 unsigned Rt = Inst.getOperand(0).getReg();
3414 unsigned Rt2 = Inst.getOperand(1).getReg();
3416 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3419 case AArch64::LDPDpost:
3420 case AArch64::LDPDpre:
3421 case AArch64::LDPQpost:
3422 case AArch64::LDPQpre:
3423 case AArch64::LDPSpost:
3424 case AArch64::LDPSpre:
3425 case AArch64::LDPSWpost: {
3426 unsigned Rt = Inst.getOperand(1).getReg();
3427 unsigned Rt2 = Inst.getOperand(2).getReg();
3429 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3432 case AArch64::STPDpost:
3433 case AArch64::STPDpre:
3434 case AArch64::STPQpost:
3435 case AArch64::STPQpre:
3436 case AArch64::STPSpost:
3437 case AArch64::STPSpre:
3438 case AArch64::STPWpost:
3439 case AArch64::STPWpre:
3440 case AArch64::STPXpost:
3441 case AArch64::STPXpre: {
3442 unsigned Rt = Inst.getOperand(1).getReg();
3443 unsigned Rt2 = Inst.getOperand(2).getReg();
3444 unsigned Rn = Inst.getOperand(3).getReg();
3445 if (RI->isSubRegisterEq(Rn, Rt))
3446 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3447 "is also a source");
3448 if (RI->isSubRegisterEq(Rn, Rt2))
3449 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3450 "is also a source");
3453 case AArch64::LDRBBpre:
3454 case AArch64::LDRBpre:
3455 case AArch64::LDRHHpre:
3456 case AArch64::LDRHpre:
3457 case AArch64::LDRSBWpre:
3458 case AArch64::LDRSBXpre:
3459 case AArch64::LDRSHWpre:
3460 case AArch64::LDRSHXpre:
3461 case AArch64::LDRSWpre:
3462 case AArch64::LDRWpre:
3463 case AArch64::LDRXpre:
3464 case AArch64::LDRBBpost:
3465 case AArch64::LDRBpost:
3466 case AArch64::LDRHHpost:
3467 case AArch64::LDRHpost:
3468 case AArch64::LDRSBWpost:
3469 case AArch64::LDRSBXpost:
3470 case AArch64::LDRSHWpost:
3471 case AArch64::LDRSHXpost:
3472 case AArch64::LDRSWpost:
3473 case AArch64::LDRWpost:
3474 case AArch64::LDRXpost: {
3475 unsigned Rt = Inst.getOperand(1).getReg();
3476 unsigned Rn = Inst.getOperand(2).getReg();
3477 if (RI->isSubRegisterEq(Rn, Rt))
3478 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3479 "is also a source");
3482 case AArch64::STRBBpost:
3483 case AArch64::STRBpost:
3484 case AArch64::STRHHpost:
3485 case AArch64::STRHpost:
3486 case AArch64::STRWpost:
3487 case AArch64::STRXpost:
3488 case AArch64::STRBBpre:
3489 case AArch64::STRBpre:
3490 case AArch64::STRHHpre:
3491 case AArch64::STRHpre:
3492 case AArch64::STRWpre:
3493 case AArch64::STRXpre: {
3494 unsigned Rt = Inst.getOperand(1).getReg();
3495 unsigned Rn = Inst.getOperand(2).getReg();
3496 if (RI->isSubRegisterEq(Rn, Rt))
3497 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3498 "is also a source");
3503 // Now check immediate ranges. Separate from the above as there is overlap
3504 // in the instructions being checked and this keeps the nested conditionals
3506 switch (Inst.getOpcode()) {
3507 case AArch64::ADDSWri:
3508 case AArch64::ADDSXri:
3509 case AArch64::ADDWri:
3510 case AArch64::ADDXri:
3511 case AArch64::SUBSWri:
3512 case AArch64::SUBSXri:
3513 case AArch64::SUBWri:
3514 case AArch64::SUBXri: {
3515 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3516 // some slight duplication here.
3517 if (Inst.getOperand(2).isExpr()) {
3518 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3519 AArch64MCExpr::VariantKind ELFRefKind;
3520 MCSymbolRefExpr::VariantKind DarwinRefKind;
3522 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3523 return Error(Loc[2], "invalid immediate expression");
3526 // Only allow these with ADDXri.
3527 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3528 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3529 Inst.getOpcode() == AArch64::ADDXri)
3532 // Only allow these with ADDXri/ADDWri
3533 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3534 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3535 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3536 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3537 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3538 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3539 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3540 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3541 (Inst.getOpcode() == AArch64::ADDXri ||
3542 Inst.getOpcode() == AArch64::ADDWri))
3545 // Don't allow expressions in the immediate field otherwise
3546 return Error(Loc[2], "invalid immediate expression");
3555 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3557 case Match_MissingFeature:
3559 "instruction requires a CPU feature not currently enabled");
3560 case Match_InvalidOperand:
3561 return Error(Loc, "invalid operand for instruction");
3562 case Match_InvalidSuffix:
3563 return Error(Loc, "invalid type suffix for instruction");
3564 case Match_InvalidCondCode:
3565 return Error(Loc, "expected AArch64 condition code");
3566 case Match_AddSubRegExtendSmall:
3568 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3569 case Match_AddSubRegExtendLarge:
3571 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3572 case Match_AddSubSecondSource:
3574 "expected compatible register, symbol or integer in range [0, 4095]");
3575 case Match_LogicalSecondSource:
3576 return Error(Loc, "expected compatible register or logical immediate");
3577 case Match_InvalidMovImm32Shift:
3578 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3579 case Match_InvalidMovImm64Shift:
3580 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3581 case Match_AddSubRegShift32:
3583 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3584 case Match_AddSubRegShift64:
3586 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3587 case Match_InvalidFPImm:
3589 "expected compatible register or floating-point constant");
3590 case Match_InvalidMemoryIndexedSImm9:
3591 return Error(Loc, "index must be an integer in range [-256, 255].");
3592 case Match_InvalidMemoryIndexed4SImm7:
3593 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3594 case Match_InvalidMemoryIndexed8SImm7:
3595 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3596 case Match_InvalidMemoryIndexed16SImm7:
3597 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3598 case Match_InvalidMemoryWExtend8:
3600 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3601 case Match_InvalidMemoryWExtend16:
3603 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3604 case Match_InvalidMemoryWExtend32:
3606 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3607 case Match_InvalidMemoryWExtend64:
3609 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3610 case Match_InvalidMemoryWExtend128:
3612 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3613 case Match_InvalidMemoryXExtend8:
3615 "expected 'lsl' or 'sxtx' with optional shift of #0");
3616 case Match_InvalidMemoryXExtend16:
3618 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3619 case Match_InvalidMemoryXExtend32:
3621 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3622 case Match_InvalidMemoryXExtend64:
3624 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3625 case Match_InvalidMemoryXExtend128:
3627 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3628 case Match_InvalidMemoryIndexed1:
3629 return Error(Loc, "index must be an integer in range [0, 4095].");
3630 case Match_InvalidMemoryIndexed2:
3631 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3632 case Match_InvalidMemoryIndexed4:
3633 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3634 case Match_InvalidMemoryIndexed8:
3635 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3636 case Match_InvalidMemoryIndexed16:
3637 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3638 case Match_InvalidImm0_1:
3639 return Error(Loc, "immediate must be an integer in range [0, 1].");
3640 case Match_InvalidImm0_7:
3641 return Error(Loc, "immediate must be an integer in range [0, 7].");
3642 case Match_InvalidImm0_15:
3643 return Error(Loc, "immediate must be an integer in range [0, 15].");
3644 case Match_InvalidImm0_31:
3645 return Error(Loc, "immediate must be an integer in range [0, 31].");
3646 case Match_InvalidImm0_63:
3647 return Error(Loc, "immediate must be an integer in range [0, 63].");
3648 case Match_InvalidImm0_127:
3649 return Error(Loc, "immediate must be an integer in range [0, 127].");
3650 case Match_InvalidImm0_65535:
3651 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3652 case Match_InvalidImm1_8:
3653 return Error(Loc, "immediate must be an integer in range [1, 8].");
3654 case Match_InvalidImm1_16:
3655 return Error(Loc, "immediate must be an integer in range [1, 16].");
3656 case Match_InvalidImm1_32:
3657 return Error(Loc, "immediate must be an integer in range [1, 32].");
3658 case Match_InvalidImm1_64:
3659 return Error(Loc, "immediate must be an integer in range [1, 64].");
3660 case Match_InvalidIndex1:
3661 return Error(Loc, "expected lane specifier '[1]'");
3662 case Match_InvalidIndexB:
3663 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3664 case Match_InvalidIndexH:
3665 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3666 case Match_InvalidIndexS:
3667 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3668 case Match_InvalidIndexD:
3669 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3670 case Match_InvalidLabel:
3671 return Error(Loc, "expected label or encodable integer pc offset");
3673 return Error(Loc, "expected readable system register");
3675 return Error(Loc, "expected writable system register or pstate");
3676 case Match_MnemonicFail:
3677 return Error(Loc, "unrecognized instruction mnemonic");
3679 llvm_unreachable("unexpected error code!");
3683 static const char *getSubtargetFeatureName(uint64_t Val);
3685 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3686 OperandVector &Operands,
3688 uint64_t &ErrorInfo,
3689 bool MatchingInlineAsm) {
3690 assert(!Operands.empty() && "Unexpect empty operand list!");
3691 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3692 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3694 StringRef Tok = Op.getToken();
3695 unsigned NumOperands = Operands.size();
3697 if (NumOperands == 4 && Tok == "lsl") {
3698 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3699 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3700 if (Op2.isReg() && Op3.isImm()) {
3701 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3703 uint64_t Op3Val = Op3CE->getValue();
3704 uint64_t NewOp3Val = 0;
3705 uint64_t NewOp4Val = 0;
3706 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3708 NewOp3Val = (32 - Op3Val) & 0x1f;
3709 NewOp4Val = 31 - Op3Val;
3711 NewOp3Val = (64 - Op3Val) & 0x3f;
3712 NewOp4Val = 63 - Op3Val;
3715 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3716 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3718 Operands[0] = AArch64Operand::CreateToken(
3719 "ubfm", false, Op.getStartLoc(), getContext());
3720 Operands.push_back(AArch64Operand::CreateImm(
3721 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3722 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3723 Op3.getEndLoc(), getContext());
3726 } else if (NumOperands == 4 && Tok == "bfc") {
3727 // FIXME: Horrible hack to handle BFC->BFM alias.
3728 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3729 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3730 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3732 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3733 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3734 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3736 if (LSBCE && WidthCE) {
3737 uint64_t LSB = LSBCE->getValue();
3738 uint64_t Width = WidthCE->getValue();
3740 uint64_t RegWidth = 0;
3741 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3747 if (LSB >= RegWidth)
3748 return Error(LSBOp.getStartLoc(),
3749 "expected integer in range [0, 31]");
3750 if (Width < 1 || Width > RegWidth)
3751 return Error(WidthOp.getStartLoc(),
3752 "expected integer in range [1, 32]");
3756 ImmR = (32 - LSB) & 0x1f;
3758 ImmR = (64 - LSB) & 0x3f;
3760 uint64_t ImmS = Width - 1;
3762 if (ImmR != 0 && ImmS >= ImmR)
3763 return Error(WidthOp.getStartLoc(),
3764 "requested insert overflows register");
3766 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3767 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3768 Operands[0] = AArch64Operand::CreateToken(
3769 "bfm", false, Op.getStartLoc(), getContext());
3770 Operands[2] = AArch64Operand::CreateReg(
3771 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3772 SMLoc(), getContext());
3773 Operands[3] = AArch64Operand::CreateImm(
3774 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3775 Operands.emplace_back(
3776 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3777 WidthOp.getEndLoc(), getContext()));
3780 } else if (NumOperands == 5) {
3781 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3782 // UBFIZ -> UBFM aliases.
3783 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3784 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3785 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3786 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3788 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3789 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3790 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3792 if (Op3CE && Op4CE) {
3793 uint64_t Op3Val = Op3CE->getValue();
3794 uint64_t Op4Val = Op4CE->getValue();
3796 uint64_t RegWidth = 0;
3797 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3803 if (Op3Val >= RegWidth)
3804 return Error(Op3.getStartLoc(),
3805 "expected integer in range [0, 31]");
3806 if (Op4Val < 1 || Op4Val > RegWidth)
3807 return Error(Op4.getStartLoc(),
3808 "expected integer in range [1, 32]");
3810 uint64_t NewOp3Val = 0;
3812 NewOp3Val = (32 - Op3Val) & 0x1f;
3814 NewOp3Val = (64 - Op3Val) & 0x3f;
3816 uint64_t NewOp4Val = Op4Val - 1;
3818 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3819 return Error(Op4.getStartLoc(),
3820 "requested insert overflows register");
3822 const MCExpr *NewOp3 =
3823 MCConstantExpr::create(NewOp3Val, getContext());
3824 const MCExpr *NewOp4 =
3825 MCConstantExpr::create(NewOp4Val, getContext());
3826 Operands[3] = AArch64Operand::CreateImm(
3827 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3828 Operands[4] = AArch64Operand::CreateImm(
3829 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3831 Operands[0] = AArch64Operand::CreateToken(
3832 "bfm", false, Op.getStartLoc(), getContext());
3833 else if (Tok == "sbfiz")
3834 Operands[0] = AArch64Operand::CreateToken(
3835 "sbfm", false, Op.getStartLoc(), getContext());
3836 else if (Tok == "ubfiz")
3837 Operands[0] = AArch64Operand::CreateToken(
3838 "ubfm", false, Op.getStartLoc(), getContext());
3840 llvm_unreachable("No valid mnemonic for alias?");
3844 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3845 // UBFX -> UBFM aliases.
3846 } else if (NumOperands == 5 &&
3847 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3848 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3849 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3850 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3852 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3853 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3854 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3856 if (Op3CE && Op4CE) {
3857 uint64_t Op3Val = Op3CE->getValue();
3858 uint64_t Op4Val = Op4CE->getValue();
3860 uint64_t RegWidth = 0;
3861 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3867 if (Op3Val >= RegWidth)
3868 return Error(Op3.getStartLoc(),
3869 "expected integer in range [0, 31]");
3870 if (Op4Val < 1 || Op4Val > RegWidth)
3871 return Error(Op4.getStartLoc(),
3872 "expected integer in range [1, 32]");
3874 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3876 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3877 return Error(Op4.getStartLoc(),
3878 "requested extract overflows register");
3880 const MCExpr *NewOp4 =
3881 MCConstantExpr::create(NewOp4Val, getContext());
3882 Operands[4] = AArch64Operand::CreateImm(
3883 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3885 Operands[0] = AArch64Operand::CreateToken(
3886 "bfm", false, Op.getStartLoc(), getContext());
3887 else if (Tok == "sbfx")
3888 Operands[0] = AArch64Operand::CreateToken(
3889 "sbfm", false, Op.getStartLoc(), getContext());
3890 else if (Tok == "ubfx")
3891 Operands[0] = AArch64Operand::CreateToken(
3892 "ubfm", false, Op.getStartLoc(), getContext());
3894 llvm_unreachable("No valid mnemonic for alias?");
3899 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3900 // InstAlias can't quite handle this since the reg classes aren't
3902 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3903 // The source register can be Wn here, but the matcher expects a
3904 // GPR64. Twiddle it here if necessary.
3905 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3907 unsigned Reg = getXRegFromWReg(Op.getReg());
3908 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3909 Op.getEndLoc(), getContext());
3912 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3913 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3916 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3918 // The source register can be Wn here, but the matcher expects a
3919 // GPR64. Twiddle it here if necessary.
3920 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3922 unsigned Reg = getXRegFromWReg(Op.getReg());
3923 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3924 Op.getEndLoc(), getContext());
3928 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3929 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3930 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3932 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3934 // The source register can be Wn here, but the matcher expects a
3935 // GPR32. Twiddle it here if necessary.
3936 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3938 unsigned Reg = getWRegFromXReg(Op.getReg());
3939 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3940 Op.getEndLoc(), getContext());
3945 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3946 if (NumOperands == 3 && Tok == "fmov") {
3947 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3948 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3949 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3951 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3955 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3956 Op.getEndLoc(), getContext());
3961 // First try to match against the secondary set of tables containing the
3962 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3963 unsigned MatchResult =
3964 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3966 // If that fails, try against the alternate table containing long-form NEON:
3967 // "fadd v0.2s, v1.2s, v2.2s"
3968 if (MatchResult != Match_Success) {
3969 // But first, save the short-form match result: we can use it in case the
3970 // long-form match also fails.
3971 auto ShortFormNEONErrorInfo = ErrorInfo;
3972 auto ShortFormNEONMatchResult = MatchResult;
3975 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3977 // Now, both matches failed, and the long-form match failed on the mnemonic
3978 // suffix token operand. The short-form match failure is probably more
3979 // relevant: use it instead.
3980 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3981 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3982 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3983 MatchResult = ShortFormNEONMatchResult;
3984 ErrorInfo = ShortFormNEONErrorInfo;
3989 switch (MatchResult) {
3990 case Match_Success: {
3991 // Perform range checking and other semantic validations
3992 SmallVector<SMLoc, 8> OperandLocs;
3993 NumOperands = Operands.size();
3994 for (unsigned i = 1; i < NumOperands; ++i)
3995 OperandLocs.push_back(Operands[i]->getStartLoc());
3996 if (validateInstruction(Inst, OperandLocs))
4000 Out.EmitInstruction(Inst, getSTI());
4003 case Match_MissingFeature: {
4004 assert(ErrorInfo && "Unknown missing feature!");
4005 // Special case the error message for the very common case where only
4006 // a single subtarget feature is missing (neon, e.g.).
4007 std::string Msg = "instruction requires:";
4009 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4010 if (ErrorInfo & Mask) {
4012 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4016 return Error(IDLoc, Msg);
4018 case Match_MnemonicFail:
4019 return showMatchError(IDLoc, MatchResult);
4020 case Match_InvalidOperand: {
4021 SMLoc ErrorLoc = IDLoc;
4023 if (ErrorInfo != ~0ULL) {
4024 if (ErrorInfo >= Operands.size())
4025 return Error(IDLoc, "too few operands for instruction");
4027 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4028 if (ErrorLoc == SMLoc())
4031 // If the match failed on a suffix token operand, tweak the diagnostic
4033 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4034 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4035 MatchResult = Match_InvalidSuffix;
4037 return showMatchError(ErrorLoc, MatchResult);
4039 case Match_InvalidMemoryIndexed1:
4040 case Match_InvalidMemoryIndexed2:
4041 case Match_InvalidMemoryIndexed4:
4042 case Match_InvalidMemoryIndexed8:
4043 case Match_InvalidMemoryIndexed16:
4044 case Match_InvalidCondCode:
4045 case Match_AddSubRegExtendSmall:
4046 case Match_AddSubRegExtendLarge:
4047 case Match_AddSubSecondSource:
4048 case Match_LogicalSecondSource:
4049 case Match_AddSubRegShift32:
4050 case Match_AddSubRegShift64:
4051 case Match_InvalidMovImm32Shift:
4052 case Match_InvalidMovImm64Shift:
4053 case Match_InvalidFPImm:
4054 case Match_InvalidMemoryWExtend8:
4055 case Match_InvalidMemoryWExtend16:
4056 case Match_InvalidMemoryWExtend32:
4057 case Match_InvalidMemoryWExtend64:
4058 case Match_InvalidMemoryWExtend128:
4059 case Match_InvalidMemoryXExtend8:
4060 case Match_InvalidMemoryXExtend16:
4061 case Match_InvalidMemoryXExtend32:
4062 case Match_InvalidMemoryXExtend64:
4063 case Match_InvalidMemoryXExtend128:
4064 case Match_InvalidMemoryIndexed4SImm7:
4065 case Match_InvalidMemoryIndexed8SImm7:
4066 case Match_InvalidMemoryIndexed16SImm7:
4067 case Match_InvalidMemoryIndexedSImm9:
4068 case Match_InvalidImm0_1:
4069 case Match_InvalidImm0_7:
4070 case Match_InvalidImm0_15:
4071 case Match_InvalidImm0_31:
4072 case Match_InvalidImm0_63:
4073 case Match_InvalidImm0_127:
4074 case Match_InvalidImm0_65535:
4075 case Match_InvalidImm1_8:
4076 case Match_InvalidImm1_16:
4077 case Match_InvalidImm1_32:
4078 case Match_InvalidImm1_64:
4079 case Match_InvalidIndex1:
4080 case Match_InvalidIndexB:
4081 case Match_InvalidIndexH:
4082 case Match_InvalidIndexS:
4083 case Match_InvalidIndexD:
4084 case Match_InvalidLabel:
4087 if (ErrorInfo >= Operands.size())
4088 return Error(IDLoc, "too few operands for instruction");
4089 // Any time we get here, there's nothing fancy to do. Just get the
4090 // operand SMLoc and display the diagnostic.
4091 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4092 if (ErrorLoc == SMLoc())
4094 return showMatchError(ErrorLoc, MatchResult);
4098 llvm_unreachable("Implement any new match types added!");
4101 /// ParseDirective parses the arm specific directives
4102 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4103 const MCObjectFileInfo::Environment Format =
4104 getContext().getObjectFileInfo()->getObjectFileType();
4105 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4106 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4108 StringRef IDVal = DirectiveID.getIdentifier();
4109 SMLoc Loc = DirectiveID.getLoc();
4110 if (IDVal == ".hword")
4111 return parseDirectiveWord(2, Loc);
4112 if (IDVal == ".word")
4113 return parseDirectiveWord(4, Loc);
4114 if (IDVal == ".xword")
4115 return parseDirectiveWord(8, Loc);
4116 if (IDVal == ".tlsdesccall")
4117 return parseDirectiveTLSDescCall(Loc);
4118 if (IDVal == ".ltorg" || IDVal == ".pool")
4119 return parseDirectiveLtorg(Loc);
4120 if (IDVal == ".unreq")
4121 return parseDirectiveUnreq(Loc);
4123 if (!IsMachO && !IsCOFF) {
4124 if (IDVal == ".inst")
4125 return parseDirectiveInst(Loc);
4128 return parseDirectiveLOH(IDVal, Loc);
4131 /// parseDirectiveWord
4132 /// ::= .word [ expression (, expression)* ]
4133 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4134 MCAsmParser &Parser = getParser();
4135 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4137 const MCExpr *Value;
4138 if (getParser().parseExpression(Value))
4141 getParser().getStreamer().EmitValue(Value, Size, L);
4143 if (getLexer().is(AsmToken::EndOfStatement))
4146 // FIXME: Improve diagnostic.
4147 if (getLexer().isNot(AsmToken::Comma))
4148 return Error(L, "unexpected token in directive");
4157 /// parseDirectiveInst
4158 /// ::= .inst opcode [, ...]
4159 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4160 MCAsmParser &Parser = getParser();
4161 if (getLexer().is(AsmToken::EndOfStatement)) {
4162 Parser.eatToEndOfStatement();
4163 Error(Loc, "expected expression following directive");
4170 if (getParser().parseExpression(Expr)) {
4171 Error(Loc, "expected expression");
4175 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4177 Error(Loc, "expected constant expression");
4181 getTargetStreamer().emitInst(Value->getValue());
4183 if (getLexer().is(AsmToken::EndOfStatement))
4186 if (getLexer().isNot(AsmToken::Comma)) {
4187 Error(Loc, "unexpected token in directive");
4191 Parser.Lex(); // Eat comma.
4198 // parseDirectiveTLSDescCall:
4199 // ::= .tlsdesccall symbol
4200 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4202 if (getParser().parseIdentifier(Name))
4203 return Error(L, "expected symbol after directive");
4205 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4206 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4207 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4210 Inst.setOpcode(AArch64::TLSDESCCALL);
4211 Inst.addOperand(MCOperand::createExpr(Expr));
4213 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4217 /// ::= .loh <lohName | lohId> label1, ..., labelN
4218 /// The number of arguments depends on the loh identifier.
4219 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4220 if (IDVal != MCLOHDirectiveName())
4223 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4224 if (getParser().getTok().isNot(AsmToken::Integer))
4225 return TokError("expected an identifier or a number in directive");
4226 // We successfully get a numeric value for the identifier.
4227 // Check if it is valid.
4228 int64_t Id = getParser().getTok().getIntVal();
4229 if (Id <= -1U && !isValidMCLOHType(Id))
4230 return TokError("invalid numeric identifier in directive");
4231 Kind = (MCLOHType)Id;
4233 StringRef Name = getTok().getIdentifier();
4234 // We successfully parse an identifier.
4235 // Check if it is a recognized one.
4236 int Id = MCLOHNameToId(Name);
4239 return TokError("invalid identifier in directive");
4240 Kind = (MCLOHType)Id;
4242 // Consume the identifier.
4244 // Get the number of arguments of this LOH.
4245 int NbArgs = MCLOHIdToNbArgs(Kind);
4247 assert(NbArgs != -1 && "Invalid number of arguments");
4249 SmallVector<MCSymbol *, 3> Args;
4250 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4252 if (getParser().parseIdentifier(Name))
4253 return TokError("expected identifier in directive");
4254 Args.push_back(getContext().getOrCreateSymbol(Name));
4256 if (Idx + 1 == NbArgs)
4258 if (getLexer().isNot(AsmToken::Comma))
4259 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4262 if (getLexer().isNot(AsmToken::EndOfStatement))
4263 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4265 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4269 /// parseDirectiveLtorg
4270 /// ::= .ltorg | .pool
4271 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4272 getTargetStreamer().emitCurrentConstantPool();
4276 /// parseDirectiveReq
4277 /// ::= name .req registername
4278 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4279 MCAsmParser &Parser = getParser();
4280 Parser.Lex(); // Eat the '.req' token.
4281 SMLoc SRegLoc = getLoc();
4282 unsigned RegNum = tryParseRegister();
4283 bool IsVector = false;
4285 if (RegNum == static_cast<unsigned>(-1)) {
4287 RegNum = tryMatchVectorRegister(Kind, false);
4288 if (!Kind.empty()) {
4289 Error(SRegLoc, "vector register without type specifier expected");
4295 if (RegNum == static_cast<unsigned>(-1)) {
4296 Parser.eatToEndOfStatement();
4297 Error(SRegLoc, "register name or alias expected");
4301 // Shouldn't be anything else.
4302 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4303 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4304 Parser.eatToEndOfStatement();
4308 Parser.Lex(); // Consume the EndOfStatement
4310 auto pair = std::make_pair(IsVector, RegNum);
4311 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4312 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4317 /// parseDirectiveUneq
4318 /// ::= .unreq registername
4319 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4320 MCAsmParser &Parser = getParser();
4321 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4322 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4323 Parser.eatToEndOfStatement();
4326 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4327 Parser.Lex(); // Eat the identifier.
4332 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4333 AArch64MCExpr::VariantKind &ELFRefKind,
4334 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4336 ELFRefKind = AArch64MCExpr::VK_INVALID;
4337 DarwinRefKind = MCSymbolRefExpr::VK_None;
4340 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4341 ELFRefKind = AE->getKind();
4342 Expr = AE->getSubExpr();
4345 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4347 // It's a simple symbol reference with no addend.
4348 DarwinRefKind = SE->getKind();
4352 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4356 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4359 DarwinRefKind = SE->getKind();
4361 if (BE->getOpcode() != MCBinaryExpr::Add &&
4362 BE->getOpcode() != MCBinaryExpr::Sub)
4365 // See if the addend is is a constant, otherwise there's more going
4366 // on here than we can deal with.
4367 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4371 Addend = AddendExpr->getValue();
4372 if (BE->getOpcode() == MCBinaryExpr::Sub)
4375 // It's some symbol reference + a constant addend, but really
4376 // shouldn't use both Darwin and ELF syntax.
4377 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4378 DarwinRefKind == MCSymbolRefExpr::VK_None;
4381 /// Force static initialization.
4382 extern "C" void LLVMInitializeAArch64AsmParser() {
4383 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4384 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4385 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4388 #define GET_REGISTER_MATCHER
4389 #define GET_SUBTARGET_FEATURE_NAME
4390 #define GET_MATCHER_IMPLEMENTATION
4391 #include "AArch64GenAsmMatcher.inc"
4393 // Define this matcher function after the auto-generated include so we
4394 // have the match class enum definitions.
4395 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4397 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4398 // If the kind is a token for a literal immediate, check if our asm
4399 // operand matches. This is for InstAliases which have a fixed-value
4400 // immediate in the syntax.
4401 int64_t ExpectedVal;
4404 return Match_InvalidOperand;
4446 return Match_InvalidOperand;
4447 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4449 return Match_InvalidOperand;
4450 if (CE->getValue() == ExpectedVal)
4451 return Match_Success;
4452 return Match_InvalidOperand;
4456 AArch64AsmParser::OperandMatchResultTy
4457 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4461 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4462 Error(S, "expected register");
4463 return MatchOperand_ParseFail;
4466 int FirstReg = tryParseRegister();
4467 if (FirstReg == -1) {
4468 return MatchOperand_ParseFail;
4470 const MCRegisterClass &WRegClass =
4471 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4472 const MCRegisterClass &XRegClass =
4473 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4475 bool isXReg = XRegClass.contains(FirstReg),
4476 isWReg = WRegClass.contains(FirstReg);
4477 if (!isXReg && !isWReg) {
4478 Error(S, "expected first even register of a "
4479 "consecutive same-size even/odd register pair");
4480 return MatchOperand_ParseFail;
4483 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4484 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4486 if (FirstEncoding & 0x1) {
4487 Error(S, "expected first even register of a "
4488 "consecutive same-size even/odd register pair");
4489 return MatchOperand_ParseFail;
4493 if (getParser().getTok().isNot(AsmToken::Comma)) {
4494 Error(M, "expected comma");
4495 return MatchOperand_ParseFail;
4501 int SecondReg = tryParseRegister();
4502 if (SecondReg ==-1) {
4503 return MatchOperand_ParseFail;
4506 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4507 (isXReg && !XRegClass.contains(SecondReg)) ||
4508 (isWReg && !WRegClass.contains(SecondReg))) {
4509 Error(E,"expected second odd register of a "
4510 "consecutive same-size even/odd register pair");
4511 return MatchOperand_ParseFail;
4516 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4517 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4519 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4520 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4523 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4526 return MatchOperand_Success;