1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
214 uint32_t PStateField;
225 struct ShiftExtendOp {
226 AArch64_AM::ShiftExtendType Type;
228 bool HasExplicitAmount;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct CondCodeOp CondCode;
243 struct FPImmOp FPImm;
244 struct BarrierOp Barrier;
245 struct SysRegOp SysReg;
246 struct SysCRImmOp SysCRImm;
247 struct PrefetchOp Prefetch;
248 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
256 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
258 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
260 StartLoc = o.StartLoc;
270 ShiftedImm = o.ShiftedImm;
273 CondCode = o.CondCode;
285 VectorList = o.VectorList;
288 VectorIndex = o.VectorIndex;
294 SysCRImm = o.SysCRImm;
297 Prefetch = o.Prefetch;
300 ShiftExtend = o.ShiftExtend;
305 /// getStartLoc - Get the location of the first token of this operand.
306 SMLoc getStartLoc() const override { return StartLoc; }
307 /// getEndLoc - Get the location of the last token of this operand.
308 SMLoc getEndLoc() const override { return EndLoc; }
310 StringRef getToken() const {
311 assert(Kind == k_Token && "Invalid access!");
312 return StringRef(Tok.Data, Tok.Length);
315 bool isTokenSuffix() const {
316 assert(Kind == k_Token && "Invalid access!");
320 const MCExpr *getImm() const {
321 assert(Kind == k_Immediate && "Invalid access!");
325 const MCExpr *getShiftedImmVal() const {
326 assert(Kind == k_ShiftedImm && "Invalid access!");
327 return ShiftedImm.Val;
330 unsigned getShiftedImmShift() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.ShiftAmount;
335 AArch64CC::CondCode getCondCode() const {
336 assert(Kind == k_CondCode && "Invalid access!");
337 return CondCode.Code;
340 unsigned getFPImm() const {
341 assert(Kind == k_FPImm && "Invalid access!");
345 unsigned getBarrier() const {
346 assert(Kind == k_Barrier && "Invalid access!");
350 unsigned getReg() const override {
351 assert(Kind == k_Register && "Invalid access!");
355 unsigned getVectorListStart() const {
356 assert(Kind == k_VectorList && "Invalid access!");
357 return VectorList.RegNum;
360 unsigned getVectorListCount() const {
361 assert(Kind == k_VectorList && "Invalid access!");
362 return VectorList.Count;
365 unsigned getVectorIndex() const {
366 assert(Kind == k_VectorIndex && "Invalid access!");
367 return VectorIndex.Val;
370 StringRef getSysReg() const {
371 assert(Kind == k_SysReg && "Invalid access!");
372 return StringRef(SysReg.Data, SysReg.Length);
375 unsigned getSysCR() const {
376 assert(Kind == k_SysCR && "Invalid access!");
380 unsigned getPrefetch() const {
381 assert(Kind == k_Prefetch && "Invalid access!");
385 AArch64_AM::ShiftExtendType getShiftExtendType() const {
386 assert(Kind == k_ShiftExtend && "Invalid access!");
387 return ShiftExtend.Type;
390 unsigned getShiftExtendAmount() const {
391 assert(Kind == k_ShiftExtend && "Invalid access!");
392 return ShiftExtend.Amount;
395 bool hasShiftExtendAmount() const {
396 assert(Kind == k_ShiftExtend && "Invalid access!");
397 return ShiftExtend.HasExplicitAmount;
400 bool isImm() const override { return Kind == k_Immediate; }
401 bool isMem() const override { return false; }
402 bool isSImm9() const {
405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
408 int64_t Val = MCE->getValue();
409 return (Val >= -256 && Val < 256);
411 bool isSImm7s4() const {
414 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
417 int64_t Val = MCE->getValue();
418 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
420 bool isSImm7s8() const {
423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 int64_t Val = MCE->getValue();
427 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
429 bool isSImm7s16() const {
432 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
435 int64_t Val = MCE->getValue();
436 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
439 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
440 AArch64MCExpr::VariantKind ELFRefKind;
441 MCSymbolRefExpr::VariantKind DarwinRefKind;
443 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
445 // If we don't understand the expression, assume the best and
446 // let the fixup and relocation code deal with it.
450 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
451 ELFRefKind == AArch64MCExpr::VK_LO12 ||
452 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
453 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
454 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
455 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
456 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
457 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
458 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
459 // Note that we don't range-check the addend. It's adjusted modulo page
460 // size when converted, so there is no "out of range" condition when using
462 return Addend >= 0 && (Addend % Scale) == 0;
463 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
464 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
465 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
472 template <int Scale> bool isUImm12Offset() const {
476 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
478 return isSymbolicUImm12Offset(getImm(), Scale);
480 int64_t Val = MCE->getValue();
481 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
484 bool isImm0_7() const {
487 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
490 int64_t Val = MCE->getValue();
491 return (Val >= 0 && Val < 8);
493 bool isImm1_8() const {
496 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
499 int64_t Val = MCE->getValue();
500 return (Val > 0 && Val < 9);
502 bool isImm0_15() const {
505 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 int64_t Val = MCE->getValue();
509 return (Val >= 0 && Val < 16);
511 bool isImm1_16() const {
514 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 int64_t Val = MCE->getValue();
518 return (Val > 0 && Val < 17);
520 bool isImm0_31() const {
523 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 int64_t Val = MCE->getValue();
527 return (Val >= 0 && Val < 32);
529 bool isImm1_31() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 int64_t Val = MCE->getValue();
536 return (Val >= 1 && Val < 32);
538 bool isImm1_32() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 int64_t Val = MCE->getValue();
545 return (Val >= 1 && Val < 33);
547 bool isImm0_63() const {
550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
553 int64_t Val = MCE->getValue();
554 return (Val >= 0 && Val < 64);
556 bool isImm1_63() const {
559 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
562 int64_t Val = MCE->getValue();
563 return (Val >= 1 && Val < 64);
565 bool isImm1_64() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 int64_t Val = MCE->getValue();
572 return (Val >= 1 && Val < 65);
574 bool isImm0_127() const {
577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 int64_t Val = MCE->getValue();
581 return (Val >= 0 && Val < 128);
583 bool isImm0_255() const {
586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 int64_t Val = MCE->getValue();
590 return (Val >= 0 && Val < 256);
592 bool isImm0_65535() const {
595 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
598 int64_t Val = MCE->getValue();
599 return (Val >= 0 && Val < 65536);
601 bool isImm32_63() const {
604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607 int64_t Val = MCE->getValue();
608 return (Val >= 32 && Val < 64);
610 bool isLogicalImm32() const {
613 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
616 int64_t Val = MCE->getValue();
617 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
620 return AArch64_AM::isLogicalImmediate(Val, 32);
622 bool isLogicalImm64() const {
625 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
630 bool isLogicalImm32Not() const {
633 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
636 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
637 return AArch64_AM::isLogicalImmediate(Val, 32);
639 bool isLogicalImm64Not() const {
642 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
645 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
647 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
648 bool isAddSubImm() const {
649 if (!isShiftedImm() && !isImm())
654 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
655 if (isShiftedImm()) {
656 unsigned Shift = ShiftedImm.ShiftAmount;
657 Expr = ShiftedImm.Val;
658 if (Shift != 0 && Shift != 12)
664 AArch64MCExpr::VariantKind ELFRefKind;
665 MCSymbolRefExpr::VariantKind DarwinRefKind;
667 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
668 DarwinRefKind, Addend)) {
669 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
670 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
671 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
672 || ELFRefKind == AArch64MCExpr::VK_LO12
673 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
674 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
675 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
676 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
677 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
678 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
679 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
682 // Otherwise it should be a real immediate in range:
683 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
684 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
686 bool isCondCode() const { return Kind == k_CondCode; }
687 bool isSIMDImmType10() const {
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
695 bool isBranchTarget26() const {
698 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
701 int64_t Val = MCE->getValue();
704 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
706 bool isPCRelLabel19() const {
709 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
712 int64_t Val = MCE->getValue();
715 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
717 bool isBranchTarget14() const {
720 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
723 int64_t Val = MCE->getValue();
726 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
730 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
734 AArch64MCExpr::VariantKind ELFRefKind;
735 MCSymbolRefExpr::VariantKind DarwinRefKind;
737 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
738 DarwinRefKind, Addend)) {
741 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
744 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
745 if (ELFRefKind == AllowedModifiers[i])
752 bool isMovZSymbolG3() const {
753 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
756 bool isMovZSymbolG2() const {
757 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
758 AArch64MCExpr::VK_TPREL_G2,
759 AArch64MCExpr::VK_DTPREL_G2});
762 bool isMovZSymbolG1() const {
763 return isMovWSymbol({
764 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
765 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
766 AArch64MCExpr::VK_DTPREL_G1,
770 bool isMovZSymbolG0() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
772 AArch64MCExpr::VK_TPREL_G0,
773 AArch64MCExpr::VK_DTPREL_G0});
776 bool isMovKSymbolG3() const {
777 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
780 bool isMovKSymbolG2() const {
781 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
784 bool isMovKSymbolG1() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
786 AArch64MCExpr::VK_TPREL_G1_NC,
787 AArch64MCExpr::VK_DTPREL_G1_NC});
790 bool isMovKSymbolG0() const {
792 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
793 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
796 template<int RegWidth, int Shift>
797 bool isMOVZMovAlias() const {
798 if (!isImm()) return false;
800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
801 if (!CE) return false;
802 uint64_t Value = CE->getValue();
805 Value &= 0xffffffffULL;
807 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
808 if (Value == 0 && Shift != 0)
811 return (Value & ~(0xffffULL << Shift)) == 0;
814 template<int RegWidth, int Shift>
815 bool isMOVNMovAlias() const {
816 if (!isImm()) return false;
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
822 // MOVZ takes precedence over MOVN.
823 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
824 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
829 Value &= 0xffffffffULL;
831 return (Value & ~(0xffffULL << Shift)) == 0;
834 bool isFPImm() const { return Kind == k_FPImm; }
835 bool isBarrier() const { return Kind == k_Barrier; }
836 bool isSysReg() const { return Kind == k_SysReg; }
837 bool isMRSSystemRegister() const {
838 if (!isSysReg()) return false;
840 return SysReg.MRSReg != -1U;
842 bool isMSRSystemRegister() const {
843 if (!isSysReg()) return false;
845 return SysReg.MSRReg != -1U;
847 bool isSystemPStateField() const {
848 if (!isSysReg()) return false;
850 return SysReg.PStateField != -1U;
852 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
853 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
854 bool isVectorRegLo() const {
855 return Kind == k_Register && Reg.isVector &&
856 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
859 bool isGPR32as64() const {
860 return Kind == k_Register && !Reg.isVector &&
861 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
864 bool isGPR64sp0() const {
865 return Kind == k_Register && !Reg.isVector &&
866 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
869 /// Is this a vector list with the type implicit (presumably attached to the
870 /// instruction itself)?
871 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
872 return Kind == k_VectorList && VectorList.Count == NumRegs &&
873 !VectorList.ElementKind;
876 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
877 bool isTypedVectorList() const {
878 if (Kind != k_VectorList)
880 if (VectorList.Count != NumRegs)
882 if (VectorList.ElementKind != ElementKind)
884 return VectorList.NumElements == NumElements;
887 bool isVectorIndex1() const {
888 return Kind == k_VectorIndex && VectorIndex.Val == 1;
890 bool isVectorIndexB() const {
891 return Kind == k_VectorIndex && VectorIndex.Val < 16;
893 bool isVectorIndexH() const {
894 return Kind == k_VectorIndex && VectorIndex.Val < 8;
896 bool isVectorIndexS() const {
897 return Kind == k_VectorIndex && VectorIndex.Val < 4;
899 bool isVectorIndexD() const {
900 return Kind == k_VectorIndex && VectorIndex.Val < 2;
902 bool isToken() const override { return Kind == k_Token; }
903 bool isTokenEqual(StringRef Str) const {
904 return Kind == k_Token && getToken() == Str;
906 bool isSysCR() const { return Kind == k_SysCR; }
907 bool isPrefetch() const { return Kind == k_Prefetch; }
908 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
909 bool isShifter() const {
910 if (!isShiftExtend())
913 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
914 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
915 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
916 ST == AArch64_AM::MSL);
918 bool isExtend() const {
919 if (!isShiftExtend())
922 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
923 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
924 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
925 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
926 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
927 ET == AArch64_AM::LSL) &&
928 getShiftExtendAmount() <= 4;
931 bool isExtend64() const {
934 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
935 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
936 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
938 bool isExtendLSL64() const {
941 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
942 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
943 ET == AArch64_AM::LSL) &&
944 getShiftExtendAmount() <= 4;
947 template<int Width> bool isMemXExtend() const {
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
952 (getShiftExtendAmount() == Log2_32(Width / 8) ||
953 getShiftExtendAmount() == 0);
956 template<int Width> bool isMemWExtend() const {
959 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
960 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
961 (getShiftExtendAmount() == Log2_32(Width / 8) ||
962 getShiftExtendAmount() == 0);
965 template <unsigned width>
966 bool isArithmeticShifter() const {
970 // An arithmetic shifter is LSL, LSR, or ASR.
971 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
972 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
973 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
976 template <unsigned width>
977 bool isLogicalShifter() const {
981 // A logical shifter is LSL, LSR, ASR or ROR.
982 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
983 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
984 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
985 getShiftExtendAmount() < width;
988 bool isMovImm32Shifter() const {
992 // A MOVi shifter is LSL of 0, 16, 32, or 48.
993 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
994 if (ST != AArch64_AM::LSL)
996 uint64_t Val = getShiftExtendAmount();
997 return (Val == 0 || Val == 16);
1000 bool isMovImm64Shifter() const {
1004 // A MOVi shifter is LSL of 0 or 16.
1005 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1006 if (ST != AArch64_AM::LSL)
1008 uint64_t Val = getShiftExtendAmount();
1009 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1012 bool isLogicalVecShifter() const {
1016 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1017 unsigned Shift = getShiftExtendAmount();
1018 return getShiftExtendType() == AArch64_AM::LSL &&
1019 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1022 bool isLogicalVecHalfWordShifter() const {
1023 if (!isLogicalVecShifter())
1026 // A logical vector shifter is a left shift by 0 or 8.
1027 unsigned Shift = getShiftExtendAmount();
1028 return getShiftExtendType() == AArch64_AM::LSL &&
1029 (Shift == 0 || Shift == 8);
1032 bool isMoveVecShifter() const {
1033 if (!isShiftExtend())
1036 // A logical vector shifter is a left shift by 8 or 16.
1037 unsigned Shift = getShiftExtendAmount();
1038 return getShiftExtendType() == AArch64_AM::MSL &&
1039 (Shift == 8 || Shift == 16);
1042 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1043 // to LDUR/STUR when the offset is not legal for the former but is for
1044 // the latter. As such, in addition to checking for being a legal unscaled
1045 // address, also check that it is not a legal scaled address. This avoids
1046 // ambiguity in the matcher.
1048 bool isSImm9OffsetFB() const {
1049 return isSImm9() && !isUImm12Offset<Width / 8>();
1052 bool isAdrpLabel() const {
1053 // Validation was handled during parsing, so we just sanity check that
1054 // something didn't go haywire.
1058 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059 int64_t Val = CE->getValue();
1060 int64_t Min = - (4096 * (1LL << (21 - 1)));
1061 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1062 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1068 bool isAdrLabel() const {
1069 // Validation was handled during parsing, so we just sanity check that
1070 // something didn't go haywire.
1074 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1075 int64_t Val = CE->getValue();
1076 int64_t Min = - (1LL << (21 - 1));
1077 int64_t Max = ((1LL << (21 - 1)) - 1);
1078 return Val >= Min && Val <= Max;
1084 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1085 // Add as immediates when possible. Null MCExpr = 0.
1087 Inst.addOperand(MCOperand::CreateImm(0));
1088 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1089 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1091 Inst.addOperand(MCOperand::CreateExpr(Expr));
1094 void addRegOperands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1096 Inst.addOperand(MCOperand::CreateReg(getReg()));
1099 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1100 assert(N == 1 && "Invalid number of operands!");
1102 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1104 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1105 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1106 RI->getEncodingValue(getReg()));
1108 Inst.addOperand(MCOperand::CreateReg(Reg));
1111 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1112 assert(N == 1 && "Invalid number of operands!");
1114 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1115 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1118 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1119 assert(N == 1 && "Invalid number of operands!");
1121 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(getReg()));
1125 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1127 Inst.addOperand(MCOperand::CreateReg(getReg()));
1130 template <unsigned NumRegs>
1131 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!");
1133 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1134 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1135 unsigned FirstReg = FirstRegs[NumRegs - 1];
1138 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1141 template <unsigned NumRegs>
1142 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1143 assert(N == 1 && "Invalid number of operands!");
1144 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1145 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1146 unsigned FirstReg = FirstRegs[NumRegs - 1];
1149 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1152 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1153 assert(N == 1 && "Invalid number of operands!");
1154 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1157 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1162 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1163 assert(N == 1 && "Invalid number of operands!");
1164 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1167 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1172 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 1 && "Invalid number of operands!");
1174 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1177 void addImmOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 1 && "Invalid number of operands!");
1179 // If this is a pageoff symrefexpr with an addend, adjust the addend
1180 // to be only the page-offset portion. Otherwise, just add the expr
1182 addExpr(Inst, getImm());
1185 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 2 && "Invalid number of operands!");
1187 if (isShiftedImm()) {
1188 addExpr(Inst, getShiftedImmVal());
1189 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1191 addExpr(Inst, getImm());
1192 Inst.addOperand(MCOperand::CreateImm(0));
1196 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1201 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1205 addExpr(Inst, getImm());
1207 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1210 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1211 addImmOperands(Inst, N);
1215 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1220 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1223 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1226 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1229 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1232 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1235 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1238 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1239 assert(N == 1 && "Invalid number of operands!");
1240 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1241 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1244 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1247 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1250 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1251 assert(N == 1 && "Invalid number of operands!");
1252 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1253 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1256 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1257 assert(N == 1 && "Invalid number of operands!");
1258 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1259 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1262 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1263 assert(N == 1 && "Invalid number of operands!");
1264 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1265 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1268 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1271 assert(MCE && "Invalid constant immediate operand!");
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1275 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1278 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1281 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1284 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1287 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1288 assert(N == 1 && "Invalid number of operands!");
1289 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1290 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1293 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1294 assert(N == 1 && "Invalid number of operands!");
1295 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1296 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1299 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1300 assert(N == 1 && "Invalid number of operands!");
1301 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1302 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1305 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1306 assert(N == 1 && "Invalid number of operands!");
1307 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1308 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1311 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1312 assert(N == 1 && "Invalid number of operands!");
1313 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1314 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1318 assert(N == 1 && "Invalid number of operands!");
1319 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1320 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1323 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1324 assert(N == 1 && "Invalid number of operands!");
1325 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1326 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1329 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1330 assert(N == 1 && "Invalid number of operands!");
1331 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1332 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1335 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1336 assert(N == 1 && "Invalid number of operands!");
1337 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1339 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1340 Inst.addOperand(MCOperand::CreateImm(encoding));
1343 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1347 Inst.addOperand(MCOperand::CreateImm(encoding));
1350 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1351 assert(N == 1 && "Invalid number of operands!");
1352 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1354 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1355 Inst.addOperand(MCOperand::CreateImm(encoding));
1358 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1362 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1363 Inst.addOperand(MCOperand::CreateImm(encoding));
1366 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1367 assert(N == 1 && "Invalid number of operands!");
1368 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1369 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1370 Inst.addOperand(MCOperand::CreateImm(encoding));
1373 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1374 // Branch operands don't encode the low bits, so shift them off
1375 // here. If it's a label, however, just put it on directly as there's
1376 // not enough information now to do anything.
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1380 addExpr(Inst, getImm());
1383 assert(MCE && "Invalid constant immediate operand!");
1384 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1387 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 addExpr(Inst, getImm());
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1401 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1415 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1420 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1422 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1425 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1426 assert(N == 1 && "Invalid number of operands!");
1428 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1431 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1432 assert(N == 1 && "Invalid number of operands!");
1434 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1437 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1438 assert(N == 1 && "Invalid number of operands!");
1440 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1443 void addSysCROperands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1448 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1449 assert(N == 1 && "Invalid number of operands!");
1450 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1453 void addShifterOperands(MCInst &Inst, unsigned N) const {
1454 assert(N == 1 && "Invalid number of operands!");
1456 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1457 Inst.addOperand(MCOperand::CreateImm(Imm));
1460 void addExtendOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 1 && "Invalid number of operands!");
1462 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1463 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1464 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1465 Inst.addOperand(MCOperand::CreateImm(Imm));
1468 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1469 assert(N == 1 && "Invalid number of operands!");
1470 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1471 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1472 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1473 Inst.addOperand(MCOperand::CreateImm(Imm));
1476 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1477 assert(N == 2 && "Invalid number of operands!");
1478 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1479 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1480 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1481 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1484 // For 8-bit load/store instructions with a register offset, both the
1485 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1486 // they're disambiguated by whether the shift was explicit or implicit rather
1488 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1489 assert(N == 2 && "Invalid number of operands!");
1490 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1491 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1492 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1493 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1497 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1498 assert(N == 1 && "Invalid number of operands!");
1500 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1501 uint64_t Value = CE->getValue();
1502 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1506 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1507 assert(N == 1 && "Invalid number of operands!");
1509 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1510 uint64_t Value = CE->getValue();
1511 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1514 void print(raw_ostream &OS) const override;
1516 static std::unique_ptr<AArch64Operand>
1517 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1518 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1519 Op->Tok.Data = Str.data();
1520 Op->Tok.Length = Str.size();
1521 Op->Tok.IsSuffix = IsSuffix;
1527 static std::unique_ptr<AArch64Operand>
1528 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1529 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1530 Op->Reg.RegNum = RegNum;
1531 Op->Reg.isVector = isVector;
1537 static std::unique_ptr<AArch64Operand>
1538 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1539 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1540 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1541 Op->VectorList.RegNum = RegNum;
1542 Op->VectorList.Count = Count;
1543 Op->VectorList.NumElements = NumElements;
1544 Op->VectorList.ElementKind = ElementKind;
1550 static std::unique_ptr<AArch64Operand>
1551 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1552 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1553 Op->VectorIndex.Val = Idx;
1559 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1560 SMLoc E, MCContext &Ctx) {
1561 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1568 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1569 unsigned ShiftAmount,
1572 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1573 Op->ShiftedImm .Val = Val;
1574 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1580 static std::unique_ptr<AArch64Operand>
1581 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1582 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1583 Op->CondCode.Code = Code;
1589 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1591 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1592 Op->FPImm.Val = Val;
1598 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1600 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1601 Op->Barrier.Val = Val;
1607 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1610 uint32_t PStateField,
1612 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1613 Op->SysReg.Data = Str.data();
1614 Op->SysReg.Length = Str.size();
1615 Op->SysReg.MRSReg = MRSReg;
1616 Op->SysReg.MSRReg = MSRReg;
1617 Op->SysReg.PStateField = PStateField;
1623 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1624 SMLoc E, MCContext &Ctx) {
1625 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1626 Op->SysCRImm.Val = Val;
1632 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1634 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1635 Op->Prefetch.Val = Val;
1641 static std::unique_ptr<AArch64Operand>
1642 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1643 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1644 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1645 Op->ShiftExtend.Type = ShOp;
1646 Op->ShiftExtend.Amount = Val;
1647 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1654 } // end anonymous namespace.
1656 void AArch64Operand::print(raw_ostream &OS) const {
1659 OS << "<fpimm " << getFPImm() << "("
1660 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1664 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1666 OS << "<barrier " << Name << ">";
1668 OS << "<barrier invalid #" << getBarrier() << ">";
1672 getImm()->print(OS);
1674 case k_ShiftedImm: {
1675 unsigned Shift = getShiftedImmShift();
1676 OS << "<shiftedimm ";
1677 getShiftedImmVal()->print(OS);
1678 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1682 OS << "<condcode " << getCondCode() << ">";
1685 OS << "<register " << getReg() << ">";
1687 case k_VectorList: {
1688 OS << "<vectorlist ";
1689 unsigned Reg = getVectorListStart();
1690 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1691 OS << Reg + i << " ";
1696 OS << "<vectorindex " << getVectorIndex() << ">";
1699 OS << "<sysreg: " << getSysReg() << '>';
1702 OS << "'" << getToken() << "'";
1705 OS << "c" << getSysCR();
1709 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1711 OS << "<prfop " << Name << ">";
1713 OS << "<prfop invalid #" << getPrefetch() << ">";
1716 case k_ShiftExtend: {
1717 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1718 << getShiftExtendAmount();
1719 if (!hasShiftExtendAmount())
1727 /// @name Auto-generated Match Functions
1730 static unsigned MatchRegisterName(StringRef Name);
1734 static unsigned matchVectorRegName(StringRef Name) {
1735 return StringSwitch<unsigned>(Name)
1736 .Case("v0", AArch64::Q0)
1737 .Case("v1", AArch64::Q1)
1738 .Case("v2", AArch64::Q2)
1739 .Case("v3", AArch64::Q3)
1740 .Case("v4", AArch64::Q4)
1741 .Case("v5", AArch64::Q5)
1742 .Case("v6", AArch64::Q6)
1743 .Case("v7", AArch64::Q7)
1744 .Case("v8", AArch64::Q8)
1745 .Case("v9", AArch64::Q9)
1746 .Case("v10", AArch64::Q10)
1747 .Case("v11", AArch64::Q11)
1748 .Case("v12", AArch64::Q12)
1749 .Case("v13", AArch64::Q13)
1750 .Case("v14", AArch64::Q14)
1751 .Case("v15", AArch64::Q15)
1752 .Case("v16", AArch64::Q16)
1753 .Case("v17", AArch64::Q17)
1754 .Case("v18", AArch64::Q18)
1755 .Case("v19", AArch64::Q19)
1756 .Case("v20", AArch64::Q20)
1757 .Case("v21", AArch64::Q21)
1758 .Case("v22", AArch64::Q22)
1759 .Case("v23", AArch64::Q23)
1760 .Case("v24", AArch64::Q24)
1761 .Case("v25", AArch64::Q25)
1762 .Case("v26", AArch64::Q26)
1763 .Case("v27", AArch64::Q27)
1764 .Case("v28", AArch64::Q28)
1765 .Case("v29", AArch64::Q29)
1766 .Case("v30", AArch64::Q30)
1767 .Case("v31", AArch64::Q31)
1771 static bool isValidVectorKind(StringRef Name) {
1772 return StringSwitch<bool>(Name.lower())
1782 // Accept the width neutral ones, too, for verbose syntax. If those
1783 // aren't used in the right places, the token operand won't match so
1784 // all will work out.
1792 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1793 char &ElementKind) {
1794 assert(isValidVectorKind(Name));
1796 ElementKind = Name.lower()[Name.size() - 1];
1799 if (Name.size() == 2)
1802 // Parse the lane count
1803 Name = Name.drop_front();
1804 while (isdigit(Name.front())) {
1805 NumElements = 10 * NumElements + (Name.front() - '0');
1806 Name = Name.drop_front();
1810 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1812 StartLoc = getLoc();
1813 RegNo = tryParseRegister();
1814 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1815 return (RegNo == (unsigned)-1);
1818 // Matches a register name or register alias previously defined by '.req'
1819 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1821 unsigned RegNum = isVector ? matchVectorRegName(Name)
1822 : MatchRegisterName(Name);
1825 // Check for aliases registered via .req. Canonicalize to lower case.
1826 // That's more consistent since register names are case insensitive, and
1827 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1828 auto Entry = RegisterReqs.find(Name.lower());
1829 if (Entry == RegisterReqs.end())
1831 // set RegNum if the match is the right kind of register
1832 if (isVector == Entry->getValue().first)
1833 RegNum = Entry->getValue().second;
1838 /// tryParseRegister - Try to parse a register name. The token must be an
1839 /// Identifier when called, and if it is a register name the token is eaten and
1840 /// the register is added to the operand list.
1841 int AArch64AsmParser::tryParseRegister() {
1842 MCAsmParser &Parser = getParser();
1843 const AsmToken &Tok = Parser.getTok();
1844 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1846 std::string lowerCase = Tok.getString().lower();
1847 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1848 // Also handle a few aliases of registers.
1850 RegNum = StringSwitch<unsigned>(lowerCase)
1851 .Case("fp", AArch64::FP)
1852 .Case("lr", AArch64::LR)
1853 .Case("x31", AArch64::XZR)
1854 .Case("w31", AArch64::WZR)
1860 Parser.Lex(); // Eat identifier token.
1864 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1865 /// kind specifier. If it is a register specifier, eat the token and return it.
1866 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1867 MCAsmParser &Parser = getParser();
1868 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1869 TokError("vector register expected");
1873 StringRef Name = Parser.getTok().getString();
1874 // If there is a kind specifier, it's separated from the register name by
1876 size_t Start = 0, Next = Name.find('.');
1877 StringRef Head = Name.slice(Start, Next);
1878 unsigned RegNum = matchRegisterNameAlias(Head, true);
1881 if (Next != StringRef::npos) {
1882 Kind = Name.slice(Next, StringRef::npos);
1883 if (!isValidVectorKind(Kind)) {
1884 TokError("invalid vector kind qualifier");
1888 Parser.Lex(); // Eat the register token.
1893 TokError("vector register expected");
1897 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1898 AArch64AsmParser::OperandMatchResultTy
1899 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1900 MCAsmParser &Parser = getParser();
1903 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1904 Error(S, "Expected cN operand where 0 <= N <= 15");
1905 return MatchOperand_ParseFail;
1908 StringRef Tok = Parser.getTok().getIdentifier();
1909 if (Tok[0] != 'c' && Tok[0] != 'C') {
1910 Error(S, "Expected cN operand where 0 <= N <= 15");
1911 return MatchOperand_ParseFail;
1915 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1916 if (BadNum || CRNum > 15) {
1917 Error(S, "Expected cN operand where 0 <= N <= 15");
1918 return MatchOperand_ParseFail;
1921 Parser.Lex(); // Eat identifier token.
1923 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1924 return MatchOperand_Success;
1927 /// tryParsePrefetch - Try to parse a prefetch operand.
1928 AArch64AsmParser::OperandMatchResultTy
1929 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1930 MCAsmParser &Parser = getParser();
1932 const AsmToken &Tok = Parser.getTok();
1933 // Either an identifier for named values or a 5-bit immediate.
1934 bool Hash = Tok.is(AsmToken::Hash);
1935 if (Hash || Tok.is(AsmToken::Integer)) {
1937 Parser.Lex(); // Eat hash token.
1938 const MCExpr *ImmVal;
1939 if (getParser().parseExpression(ImmVal))
1940 return MatchOperand_ParseFail;
1942 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1944 TokError("immediate value expected for prefetch operand");
1945 return MatchOperand_ParseFail;
1947 unsigned prfop = MCE->getValue();
1949 TokError("prefetch operand out of range, [0,31] expected");
1950 return MatchOperand_ParseFail;
1953 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1954 return MatchOperand_Success;
1957 if (Tok.isNot(AsmToken::Identifier)) {
1958 TokError("pre-fetch hint expected");
1959 return MatchOperand_ParseFail;
1963 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1965 TokError("pre-fetch hint expected");
1966 return MatchOperand_ParseFail;
1969 Parser.Lex(); // Eat identifier token.
1970 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1971 return MatchOperand_Success;
1974 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1976 AArch64AsmParser::OperandMatchResultTy
1977 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1978 MCAsmParser &Parser = getParser();
1982 if (Parser.getTok().is(AsmToken::Hash)) {
1983 Parser.Lex(); // Eat hash token.
1986 if (parseSymbolicImmVal(Expr))
1987 return MatchOperand_ParseFail;
1989 AArch64MCExpr::VariantKind ELFRefKind;
1990 MCSymbolRefExpr::VariantKind DarwinRefKind;
1992 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1993 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1994 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1995 // No modifier was specified at all; this is the syntax for an ELF basic
1996 // ADRP relocation (unfortunately).
1998 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1999 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2000 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2002 Error(S, "gotpage label reference not allowed an addend");
2003 return MatchOperand_ParseFail;
2004 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2005 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2006 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2007 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2008 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2009 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2010 // The operand must be an @page or @gotpage qualified symbolref.
2011 Error(S, "page or gotpage label reference expected");
2012 return MatchOperand_ParseFail;
2016 // We have either a label reference possibly with addend or an immediate. The
2017 // addend is a raw value here. The linker will adjust it to only reference the
2019 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2020 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2022 return MatchOperand_Success;
2025 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2027 AArch64AsmParser::OperandMatchResultTy
2028 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2029 MCAsmParser &Parser = getParser();
2033 if (Parser.getTok().is(AsmToken::Hash)) {
2034 Parser.Lex(); // Eat hash token.
2037 if (getParser().parseExpression(Expr))
2038 return MatchOperand_ParseFail;
2040 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2041 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2043 return MatchOperand_Success;
2046 /// tryParseFPImm - A floating point immediate expression operand.
2047 AArch64AsmParser::OperandMatchResultTy
2048 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2049 MCAsmParser &Parser = getParser();
2053 if (Parser.getTok().is(AsmToken::Hash)) {
2054 Parser.Lex(); // Eat '#'
2058 // Handle negation, as that still comes through as a separate token.
2059 bool isNegative = false;
2060 if (Parser.getTok().is(AsmToken::Minus)) {
2064 const AsmToken &Tok = Parser.getTok();
2065 if (Tok.is(AsmToken::Real)) {
2066 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2067 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2068 // If we had a '-' in front, toggle the sign bit.
2069 IntVal ^= (uint64_t)isNegative << 63;
2070 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2071 Parser.Lex(); // Eat the token.
2072 // Check for out of range values. As an exception, we let Zero through,
2073 // as we handle that special case in post-processing before matching in
2074 // order to use the zero register for it.
2075 if (Val == -1 && !RealVal.isZero()) {
2076 TokError("expected compatible register or floating-point constant");
2077 return MatchOperand_ParseFail;
2079 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2080 return MatchOperand_Success;
2082 if (Tok.is(AsmToken::Integer)) {
2084 if (!isNegative && Tok.getString().startswith("0x")) {
2085 Val = Tok.getIntVal();
2086 if (Val > 255 || Val < 0) {
2087 TokError("encoded floating point value out of range");
2088 return MatchOperand_ParseFail;
2091 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2092 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2093 // If we had a '-' in front, toggle the sign bit.
2094 IntVal ^= (uint64_t)isNegative << 63;
2095 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2097 Parser.Lex(); // Eat the token.
2098 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2099 return MatchOperand_Success;
2103 return MatchOperand_NoMatch;
2105 TokError("invalid floating point immediate");
2106 return MatchOperand_ParseFail;
2109 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2110 AArch64AsmParser::OperandMatchResultTy
2111 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2112 MCAsmParser &Parser = getParser();
2115 if (Parser.getTok().is(AsmToken::Hash))
2116 Parser.Lex(); // Eat '#'
2117 else if (Parser.getTok().isNot(AsmToken::Integer))
2118 // Operand should start from # or should be integer, emit error otherwise.
2119 return MatchOperand_NoMatch;
2122 if (parseSymbolicImmVal(Imm))
2123 return MatchOperand_ParseFail;
2124 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2125 uint64_t ShiftAmount = 0;
2126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2128 int64_t Val = MCE->getValue();
2129 if (Val > 0xfff && (Val & 0xfff) == 0) {
2130 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2134 SMLoc E = Parser.getTok().getLoc();
2135 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2137 return MatchOperand_Success;
2143 // The optional operand must be "lsl #N" where N is non-negative.
2144 if (!Parser.getTok().is(AsmToken::Identifier) ||
2145 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2146 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2147 return MatchOperand_ParseFail;
2153 if (Parser.getTok().is(AsmToken::Hash)) {
2157 if (Parser.getTok().isNot(AsmToken::Integer)) {
2158 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2159 return MatchOperand_ParseFail;
2162 int64_t ShiftAmount = Parser.getTok().getIntVal();
2164 if (ShiftAmount < 0) {
2165 Error(Parser.getTok().getLoc(), "positive shift amount required");
2166 return MatchOperand_ParseFail;
2168 Parser.Lex(); // Eat the number
2170 SMLoc E = Parser.getTok().getLoc();
2171 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2172 S, E, getContext()));
2173 return MatchOperand_Success;
2176 /// parseCondCodeString - Parse a Condition Code string.
2177 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2178 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2179 .Case("eq", AArch64CC::EQ)
2180 .Case("ne", AArch64CC::NE)
2181 .Case("cs", AArch64CC::HS)
2182 .Case("hs", AArch64CC::HS)
2183 .Case("cc", AArch64CC::LO)
2184 .Case("lo", AArch64CC::LO)
2185 .Case("mi", AArch64CC::MI)
2186 .Case("pl", AArch64CC::PL)
2187 .Case("vs", AArch64CC::VS)
2188 .Case("vc", AArch64CC::VC)
2189 .Case("hi", AArch64CC::HI)
2190 .Case("ls", AArch64CC::LS)
2191 .Case("ge", AArch64CC::GE)
2192 .Case("lt", AArch64CC::LT)
2193 .Case("gt", AArch64CC::GT)
2194 .Case("le", AArch64CC::LE)
2195 .Case("al", AArch64CC::AL)
2196 .Case("nv", AArch64CC::NV)
2197 .Default(AArch64CC::Invalid);
2201 /// parseCondCode - Parse a Condition Code operand.
2202 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2203 bool invertCondCode) {
2204 MCAsmParser &Parser = getParser();
2206 const AsmToken &Tok = Parser.getTok();
2207 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2209 StringRef Cond = Tok.getString();
2210 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2211 if (CC == AArch64CC::Invalid)
2212 return TokError("invalid condition code");
2213 Parser.Lex(); // Eat identifier token.
2215 if (invertCondCode) {
2216 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2217 return TokError("condition codes AL and NV are invalid for this instruction");
2218 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2222 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2226 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2227 /// them if present.
2228 AArch64AsmParser::OperandMatchResultTy
2229 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2230 MCAsmParser &Parser = getParser();
2231 const AsmToken &Tok = Parser.getTok();
2232 std::string LowerID = Tok.getString().lower();
2233 AArch64_AM::ShiftExtendType ShOp =
2234 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2235 .Case("lsl", AArch64_AM::LSL)
2236 .Case("lsr", AArch64_AM::LSR)
2237 .Case("asr", AArch64_AM::ASR)
2238 .Case("ror", AArch64_AM::ROR)
2239 .Case("msl", AArch64_AM::MSL)
2240 .Case("uxtb", AArch64_AM::UXTB)
2241 .Case("uxth", AArch64_AM::UXTH)
2242 .Case("uxtw", AArch64_AM::UXTW)
2243 .Case("uxtx", AArch64_AM::UXTX)
2244 .Case("sxtb", AArch64_AM::SXTB)
2245 .Case("sxth", AArch64_AM::SXTH)
2246 .Case("sxtw", AArch64_AM::SXTW)
2247 .Case("sxtx", AArch64_AM::SXTX)
2248 .Default(AArch64_AM::InvalidShiftExtend);
2250 if (ShOp == AArch64_AM::InvalidShiftExtend)
2251 return MatchOperand_NoMatch;
2253 SMLoc S = Tok.getLoc();
2256 bool Hash = getLexer().is(AsmToken::Hash);
2257 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2258 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2259 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2260 ShOp == AArch64_AM::MSL) {
2261 // We expect a number here.
2262 TokError("expected #imm after shift specifier");
2263 return MatchOperand_ParseFail;
2266 // "extend" type operatoins don't need an immediate, #0 is implicit.
2267 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2269 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2270 return MatchOperand_Success;
2274 Parser.Lex(); // Eat the '#'.
2276 // Make sure we do actually have a number or a parenthesized expression.
2277 SMLoc E = Parser.getTok().getLoc();
2278 if (!Parser.getTok().is(AsmToken::Integer) &&
2279 !Parser.getTok().is(AsmToken::LParen)) {
2280 Error(E, "expected integer shift amount");
2281 return MatchOperand_ParseFail;
2284 const MCExpr *ImmVal;
2285 if (getParser().parseExpression(ImmVal))
2286 return MatchOperand_ParseFail;
2288 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2290 Error(E, "expected constant '#imm' after shift specifier");
2291 return MatchOperand_ParseFail;
2294 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2295 Operands.push_back(AArch64Operand::CreateShiftExtend(
2296 ShOp, MCE->getValue(), true, S, E, getContext()));
2297 return MatchOperand_Success;
2300 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2301 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2302 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2303 OperandVector &Operands) {
2304 if (Name.find('.') != StringRef::npos)
2305 return TokError("invalid operand");
2309 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2311 MCAsmParser &Parser = getParser();
2312 const AsmToken &Tok = Parser.getTok();
2313 StringRef Op = Tok.getString();
2314 SMLoc S = Tok.getLoc();
2316 const MCExpr *Expr = nullptr;
2318 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2320 Expr = MCConstantExpr::Create(op1, getContext()); \
2321 Operands.push_back( \
2322 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2323 Operands.push_back( \
2324 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2325 Operands.push_back( \
2326 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2327 Expr = MCConstantExpr::Create(op2, getContext()); \
2328 Operands.push_back( \
2329 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2332 if (Mnemonic == "ic") {
2333 if (!Op.compare_lower("ialluis")) {
2334 // SYS #0, C7, C1, #0
2335 SYS_ALIAS(0, 7, 1, 0);
2336 } else if (!Op.compare_lower("iallu")) {
2337 // SYS #0, C7, C5, #0
2338 SYS_ALIAS(0, 7, 5, 0);
2339 } else if (!Op.compare_lower("ivau")) {
2340 // SYS #3, C7, C5, #1
2341 SYS_ALIAS(3, 7, 5, 1);
2343 return TokError("invalid operand for IC instruction");
2345 } else if (Mnemonic == "dc") {
2346 if (!Op.compare_lower("zva")) {
2347 // SYS #3, C7, C4, #1
2348 SYS_ALIAS(3, 7, 4, 1);
2349 } else if (!Op.compare_lower("ivac")) {
2350 // SYS #3, C7, C6, #1
2351 SYS_ALIAS(0, 7, 6, 1);
2352 } else if (!Op.compare_lower("isw")) {
2353 // SYS #0, C7, C6, #2
2354 SYS_ALIAS(0, 7, 6, 2);
2355 } else if (!Op.compare_lower("cvac")) {
2356 // SYS #3, C7, C10, #1
2357 SYS_ALIAS(3, 7, 10, 1);
2358 } else if (!Op.compare_lower("csw")) {
2359 // SYS #0, C7, C10, #2
2360 SYS_ALIAS(0, 7, 10, 2);
2361 } else if (!Op.compare_lower("cvau")) {
2362 // SYS #3, C7, C11, #1
2363 SYS_ALIAS(3, 7, 11, 1);
2364 } else if (!Op.compare_lower("civac")) {
2365 // SYS #3, C7, C14, #1
2366 SYS_ALIAS(3, 7, 14, 1);
2367 } else if (!Op.compare_lower("cisw")) {
2368 // SYS #0, C7, C14, #2
2369 SYS_ALIAS(0, 7, 14, 2);
2371 return TokError("invalid operand for DC instruction");
2373 } else if (Mnemonic == "at") {
2374 if (!Op.compare_lower("s1e1r")) {
2375 // SYS #0, C7, C8, #0
2376 SYS_ALIAS(0, 7, 8, 0);
2377 } else if (!Op.compare_lower("s1e2r")) {
2378 // SYS #4, C7, C8, #0
2379 SYS_ALIAS(4, 7, 8, 0);
2380 } else if (!Op.compare_lower("s1e3r")) {
2381 // SYS #6, C7, C8, #0
2382 SYS_ALIAS(6, 7, 8, 0);
2383 } else if (!Op.compare_lower("s1e1w")) {
2384 // SYS #0, C7, C8, #1
2385 SYS_ALIAS(0, 7, 8, 1);
2386 } else if (!Op.compare_lower("s1e2w")) {
2387 // SYS #4, C7, C8, #1
2388 SYS_ALIAS(4, 7, 8, 1);
2389 } else if (!Op.compare_lower("s1e3w")) {
2390 // SYS #6, C7, C8, #1
2391 SYS_ALIAS(6, 7, 8, 1);
2392 } else if (!Op.compare_lower("s1e0r")) {
2393 // SYS #0, C7, C8, #3
2394 SYS_ALIAS(0, 7, 8, 2);
2395 } else if (!Op.compare_lower("s1e0w")) {
2396 // SYS #0, C7, C8, #3
2397 SYS_ALIAS(0, 7, 8, 3);
2398 } else if (!Op.compare_lower("s12e1r")) {
2399 // SYS #4, C7, C8, #4
2400 SYS_ALIAS(4, 7, 8, 4);
2401 } else if (!Op.compare_lower("s12e1w")) {
2402 // SYS #4, C7, C8, #5
2403 SYS_ALIAS(4, 7, 8, 5);
2404 } else if (!Op.compare_lower("s12e0r")) {
2405 // SYS #4, C7, C8, #6
2406 SYS_ALIAS(4, 7, 8, 6);
2407 } else if (!Op.compare_lower("s12e0w")) {
2408 // SYS #4, C7, C8, #7
2409 SYS_ALIAS(4, 7, 8, 7);
2411 return TokError("invalid operand for AT instruction");
2413 } else if (Mnemonic == "tlbi") {
2414 if (!Op.compare_lower("vmalle1is")) {
2415 // SYS #0, C8, C3, #0
2416 SYS_ALIAS(0, 8, 3, 0);
2417 } else if (!Op.compare_lower("alle2is")) {
2418 // SYS #4, C8, C3, #0
2419 SYS_ALIAS(4, 8, 3, 0);
2420 } else if (!Op.compare_lower("alle3is")) {
2421 // SYS #6, C8, C3, #0
2422 SYS_ALIAS(6, 8, 3, 0);
2423 } else if (!Op.compare_lower("vae1is")) {
2424 // SYS #0, C8, C3, #1
2425 SYS_ALIAS(0, 8, 3, 1);
2426 } else if (!Op.compare_lower("vae2is")) {
2427 // SYS #4, C8, C3, #1
2428 SYS_ALIAS(4, 8, 3, 1);
2429 } else if (!Op.compare_lower("vae3is")) {
2430 // SYS #6, C8, C3, #1
2431 SYS_ALIAS(6, 8, 3, 1);
2432 } else if (!Op.compare_lower("aside1is")) {
2433 // SYS #0, C8, C3, #2
2434 SYS_ALIAS(0, 8, 3, 2);
2435 } else if (!Op.compare_lower("vaae1is")) {
2436 // SYS #0, C8, C3, #3
2437 SYS_ALIAS(0, 8, 3, 3);
2438 } else if (!Op.compare_lower("alle1is")) {
2439 // SYS #4, C8, C3, #4
2440 SYS_ALIAS(4, 8, 3, 4);
2441 } else if (!Op.compare_lower("vale1is")) {
2442 // SYS #0, C8, C3, #5
2443 SYS_ALIAS(0, 8, 3, 5);
2444 } else if (!Op.compare_lower("vaale1is")) {
2445 // SYS #0, C8, C3, #7
2446 SYS_ALIAS(0, 8, 3, 7);
2447 } else if (!Op.compare_lower("vmalle1")) {
2448 // SYS #0, C8, C7, #0
2449 SYS_ALIAS(0, 8, 7, 0);
2450 } else if (!Op.compare_lower("alle2")) {
2451 // SYS #4, C8, C7, #0
2452 SYS_ALIAS(4, 8, 7, 0);
2453 } else if (!Op.compare_lower("vale2is")) {
2454 // SYS #4, C8, C3, #5
2455 SYS_ALIAS(4, 8, 3, 5);
2456 } else if (!Op.compare_lower("vale3is")) {
2457 // SYS #6, C8, C3, #5
2458 SYS_ALIAS(6, 8, 3, 5);
2459 } else if (!Op.compare_lower("alle3")) {
2460 // SYS #6, C8, C7, #0
2461 SYS_ALIAS(6, 8, 7, 0);
2462 } else if (!Op.compare_lower("vae1")) {
2463 // SYS #0, C8, C7, #1
2464 SYS_ALIAS(0, 8, 7, 1);
2465 } else if (!Op.compare_lower("vae2")) {
2466 // SYS #4, C8, C7, #1
2467 SYS_ALIAS(4, 8, 7, 1);
2468 } else if (!Op.compare_lower("vae3")) {
2469 // SYS #6, C8, C7, #1
2470 SYS_ALIAS(6, 8, 7, 1);
2471 } else if (!Op.compare_lower("aside1")) {
2472 // SYS #0, C8, C7, #2
2473 SYS_ALIAS(0, 8, 7, 2);
2474 } else if (!Op.compare_lower("vaae1")) {
2475 // SYS #0, C8, C7, #3
2476 SYS_ALIAS(0, 8, 7, 3);
2477 } else if (!Op.compare_lower("alle1")) {
2478 // SYS #4, C8, C7, #4
2479 SYS_ALIAS(4, 8, 7, 4);
2480 } else if (!Op.compare_lower("vale1")) {
2481 // SYS #0, C8, C7, #5
2482 SYS_ALIAS(0, 8, 7, 5);
2483 } else if (!Op.compare_lower("vale2")) {
2484 // SYS #4, C8, C7, #5
2485 SYS_ALIAS(4, 8, 7, 5);
2486 } else if (!Op.compare_lower("vale3")) {
2487 // SYS #6, C8, C7, #5
2488 SYS_ALIAS(6, 8, 7, 5);
2489 } else if (!Op.compare_lower("vaale1")) {
2490 // SYS #0, C8, C7, #7
2491 SYS_ALIAS(0, 8, 7, 7);
2492 } else if (!Op.compare_lower("ipas2e1")) {
2493 // SYS #4, C8, C4, #1
2494 SYS_ALIAS(4, 8, 4, 1);
2495 } else if (!Op.compare_lower("ipas2le1")) {
2496 // SYS #4, C8, C4, #5
2497 SYS_ALIAS(4, 8, 4, 5);
2498 } else if (!Op.compare_lower("ipas2e1is")) {
2499 // SYS #4, C8, C4, #1
2500 SYS_ALIAS(4, 8, 0, 1);
2501 } else if (!Op.compare_lower("ipas2le1is")) {
2502 // SYS #4, C8, C4, #5
2503 SYS_ALIAS(4, 8, 0, 5);
2504 } else if (!Op.compare_lower("vmalls12e1")) {
2505 // SYS #4, C8, C7, #6
2506 SYS_ALIAS(4, 8, 7, 6);
2507 } else if (!Op.compare_lower("vmalls12e1is")) {
2508 // SYS #4, C8, C3, #6
2509 SYS_ALIAS(4, 8, 3, 6);
2511 return TokError("invalid operand for TLBI instruction");
2517 Parser.Lex(); // Eat operand.
2519 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2520 bool HasRegister = false;
2522 // Check for the optional register operand.
2523 if (getLexer().is(AsmToken::Comma)) {
2524 Parser.Lex(); // Eat comma.
2526 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2527 return TokError("expected register operand");
2532 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2533 Parser.eatToEndOfStatement();
2534 return TokError("unexpected token in argument list");
2537 if (ExpectRegister && !HasRegister) {
2538 return TokError("specified " + Mnemonic + " op requires a register");
2540 else if (!ExpectRegister && HasRegister) {
2541 return TokError("specified " + Mnemonic + " op does not use a register");
2544 Parser.Lex(); // Consume the EndOfStatement
2548 AArch64AsmParser::OperandMatchResultTy
2549 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2550 MCAsmParser &Parser = getParser();
2551 const AsmToken &Tok = Parser.getTok();
2553 // Can be either a #imm style literal or an option name
2554 bool Hash = Tok.is(AsmToken::Hash);
2555 if (Hash || Tok.is(AsmToken::Integer)) {
2556 // Immediate operand.
2558 Parser.Lex(); // Eat the '#'
2559 const MCExpr *ImmVal;
2560 SMLoc ExprLoc = getLoc();
2561 if (getParser().parseExpression(ImmVal))
2562 return MatchOperand_ParseFail;
2563 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2565 Error(ExprLoc, "immediate value expected for barrier operand");
2566 return MatchOperand_ParseFail;
2568 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2569 Error(ExprLoc, "barrier operand out of range");
2570 return MatchOperand_ParseFail;
2573 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2574 return MatchOperand_Success;
2577 if (Tok.isNot(AsmToken::Identifier)) {
2578 TokError("invalid operand for instruction");
2579 return MatchOperand_ParseFail;
2583 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2585 TokError("invalid barrier option name");
2586 return MatchOperand_ParseFail;
2589 // The only valid named option for ISB is 'sy'
2590 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2591 TokError("'sy' or #imm operand expected");
2592 return MatchOperand_ParseFail;
2596 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2597 Parser.Lex(); // Consume the option
2599 return MatchOperand_Success;
2602 AArch64AsmParser::OperandMatchResultTy
2603 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2604 MCAsmParser &Parser = getParser();
2605 const AsmToken &Tok = Parser.getTok();
2607 if (Tok.isNot(AsmToken::Identifier))
2608 return MatchOperand_NoMatch;
2611 auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
2612 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
2613 assert(IsKnown == (MRSReg != -1U) &&
2614 "register should be -1 if and only if it's unknown");
2616 auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
2617 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
2618 assert(IsKnown == (MSRReg != -1U) &&
2619 "register should be -1 if and only if it's unknown");
2621 uint32_t PStateField =
2622 AArch64PState::PStateMapper().fromString(Tok.getString(), IsKnown);
2623 assert(IsKnown == (PStateField != -1U) &&
2624 "register should be -1 if and only if it's unknown");
2626 Operands.push_back(AArch64Operand::CreateSysReg(
2627 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2628 Parser.Lex(); // Eat identifier
2630 return MatchOperand_Success;
2633 /// tryParseVectorRegister - Parse a vector register operand.
2634 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2635 MCAsmParser &Parser = getParser();
2636 if (Parser.getTok().isNot(AsmToken::Identifier))
2640 // Check for a vector register specifier first.
2642 int64_t Reg = tryMatchVectorRegister(Kind, false);
2646 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2647 // If there was an explicit qualifier, that goes on as a literal text
2651 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2653 // If there is an index specifier following the register, parse that too.
2654 if (Parser.getTok().is(AsmToken::LBrac)) {
2655 SMLoc SIdx = getLoc();
2656 Parser.Lex(); // Eat left bracket token.
2658 const MCExpr *ImmVal;
2659 if (getParser().parseExpression(ImmVal))
2661 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2663 TokError("immediate value expected for vector index");
2668 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2669 Error(E, "']' expected");
2673 Parser.Lex(); // Eat right bracket token.
2675 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2682 /// parseRegister - Parse a non-vector register operand.
2683 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2684 MCAsmParser &Parser = getParser();
2686 // Try for a vector register.
2687 if (!tryParseVectorRegister(Operands))
2690 // Try for a scalar register.
2691 int64_t Reg = tryParseRegister();
2695 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2697 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2698 // as a string token in the instruction itself.
2699 if (getLexer().getKind() == AsmToken::LBrac) {
2700 SMLoc LBracS = getLoc();
2702 const AsmToken &Tok = Parser.getTok();
2703 if (Tok.is(AsmToken::Integer)) {
2704 SMLoc IntS = getLoc();
2705 int64_t Val = Tok.getIntVal();
2708 if (getLexer().getKind() == AsmToken::RBrac) {
2709 SMLoc RBracS = getLoc();
2712 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2714 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2716 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2726 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2727 MCAsmParser &Parser = getParser();
2728 bool HasELFModifier = false;
2729 AArch64MCExpr::VariantKind RefKind;
2731 if (Parser.getTok().is(AsmToken::Colon)) {
2732 Parser.Lex(); // Eat ':"
2733 HasELFModifier = true;
2735 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2736 Error(Parser.getTok().getLoc(),
2737 "expect relocation specifier in operand after ':'");
2741 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2742 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2743 .Case("lo12", AArch64MCExpr::VK_LO12)
2744 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2745 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2746 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2747 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2748 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2749 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2750 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2751 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2752 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2753 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2754 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2755 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2756 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2757 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2758 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2759 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2760 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2761 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2762 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2763 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2764 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2765 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2766 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2767 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2768 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2769 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2770 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2771 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2772 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2773 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2774 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2775 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2776 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2777 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2778 .Default(AArch64MCExpr::VK_INVALID);
2780 if (RefKind == AArch64MCExpr::VK_INVALID) {
2781 Error(Parser.getTok().getLoc(),
2782 "expect relocation specifier in operand after ':'");
2786 Parser.Lex(); // Eat identifier
2788 if (Parser.getTok().isNot(AsmToken::Colon)) {
2789 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2792 Parser.Lex(); // Eat ':'
2795 if (getParser().parseExpression(ImmVal))
2799 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2804 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2805 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2806 MCAsmParser &Parser = getParser();
2807 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2809 Parser.Lex(); // Eat left bracket token.
2811 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2814 int64_t PrevReg = FirstReg;
2817 if (Parser.getTok().is(AsmToken::Minus)) {
2818 Parser.Lex(); // Eat the minus.
2820 SMLoc Loc = getLoc();
2822 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2825 // Any Kind suffices must match on all regs in the list.
2826 if (Kind != NextKind)
2827 return Error(Loc, "mismatched register size suffix");
2829 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2831 if (Space == 0 || Space > 3) {
2832 return Error(Loc, "invalid number of vectors");
2838 while (Parser.getTok().is(AsmToken::Comma)) {
2839 Parser.Lex(); // Eat the comma token.
2841 SMLoc Loc = getLoc();
2843 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2846 // Any Kind suffices must match on all regs in the list.
2847 if (Kind != NextKind)
2848 return Error(Loc, "mismatched register size suffix");
2850 // Registers must be incremental (with wraparound at 31)
2851 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2852 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2853 return Error(Loc, "registers must be sequential");
2860 if (Parser.getTok().isNot(AsmToken::RCurly))
2861 return Error(getLoc(), "'}' expected");
2862 Parser.Lex(); // Eat the '}' token.
2865 return Error(S, "invalid number of vectors");
2867 unsigned NumElements = 0;
2868 char ElementKind = 0;
2870 parseValidVectorKind(Kind, NumElements, ElementKind);
2872 Operands.push_back(AArch64Operand::CreateVectorList(
2873 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2875 // If there is an index specifier following the list, parse that too.
2876 if (Parser.getTok().is(AsmToken::LBrac)) {
2877 SMLoc SIdx = getLoc();
2878 Parser.Lex(); // Eat left bracket token.
2880 const MCExpr *ImmVal;
2881 if (getParser().parseExpression(ImmVal))
2883 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2885 TokError("immediate value expected for vector index");
2890 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2891 Error(E, "']' expected");
2895 Parser.Lex(); // Eat right bracket token.
2897 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2903 AArch64AsmParser::OperandMatchResultTy
2904 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2905 MCAsmParser &Parser = getParser();
2906 const AsmToken &Tok = Parser.getTok();
2907 if (!Tok.is(AsmToken::Identifier))
2908 return MatchOperand_NoMatch;
2910 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2912 MCContext &Ctx = getContext();
2913 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2914 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2915 return MatchOperand_NoMatch;
2918 Parser.Lex(); // Eat register
2920 if (Parser.getTok().isNot(AsmToken::Comma)) {
2922 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2923 return MatchOperand_Success;
2925 Parser.Lex(); // Eat comma.
2927 if (Parser.getTok().is(AsmToken::Hash))
2928 Parser.Lex(); // Eat hash
2930 if (Parser.getTok().isNot(AsmToken::Integer)) {
2931 Error(getLoc(), "index must be absent or #0");
2932 return MatchOperand_ParseFail;
2935 const MCExpr *ImmVal;
2936 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2937 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2938 Error(getLoc(), "index must be absent or #0");
2939 return MatchOperand_ParseFail;
2943 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2944 return MatchOperand_Success;
2947 /// parseOperand - Parse a arm instruction operand. For now this parses the
2948 /// operand regardless of the mnemonic.
2949 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2950 bool invertCondCode) {
2951 MCAsmParser &Parser = getParser();
2952 // Check if the current operand has a custom associated parser, if so, try to
2953 // custom parse the operand, or fallback to the general approach.
2954 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2955 if (ResTy == MatchOperand_Success)
2957 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2958 // there was a match, but an error occurred, in which case, just return that
2959 // the operand parsing failed.
2960 if (ResTy == MatchOperand_ParseFail)
2963 // Nothing custom, so do general case parsing.
2965 switch (getLexer().getKind()) {
2969 if (parseSymbolicImmVal(Expr))
2970 return Error(S, "invalid operand");
2972 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2973 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2976 case AsmToken::LBrac: {
2977 SMLoc Loc = Parser.getTok().getLoc();
2978 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2980 Parser.Lex(); // Eat '['
2982 // There's no comma after a '[', so we can parse the next operand
2984 return parseOperand(Operands, false, false);
2986 case AsmToken::LCurly:
2987 return parseVectorList(Operands);
2988 case AsmToken::Identifier: {
2989 // If we're expecting a Condition Code operand, then just parse that.
2991 return parseCondCode(Operands, invertCondCode);
2993 // If it's a register name, parse it.
2994 if (!parseRegister(Operands))
2997 // This could be an optional "shift" or "extend" operand.
2998 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2999 // We can only continue if no tokens were eaten.
3000 if (GotShift != MatchOperand_NoMatch)
3003 // This was not a register so parse other operands that start with an
3004 // identifier (like labels) as expressions and create them as immediates.
3005 const MCExpr *IdVal;
3007 if (getParser().parseExpression(IdVal))
3010 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3014 case AsmToken::Integer:
3015 case AsmToken::Real:
3016 case AsmToken::Hash: {
3017 // #42 -> immediate.
3019 if (getLexer().is(AsmToken::Hash))
3022 // Parse a negative sign
3023 bool isNegative = false;
3024 if (Parser.getTok().is(AsmToken::Minus)) {
3026 // We need to consume this token only when we have a Real, otherwise
3027 // we let parseSymbolicImmVal take care of it
3028 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3032 // The only Real that should come through here is a literal #0.0 for
3033 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3034 // so convert the value.
3035 const AsmToken &Tok = Parser.getTok();
3036 if (Tok.is(AsmToken::Real)) {
3037 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3038 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3039 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3040 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3041 Mnemonic != "fcmlt")
3042 return TokError("unexpected floating point literal");
3043 else if (IntVal != 0 || isNegative)
3044 return TokError("expected floating-point constant #0.0");
3045 Parser.Lex(); // Eat the token.
3048 AArch64Operand::CreateToken("#0", false, S, getContext()));
3050 AArch64Operand::CreateToken(".0", false, S, getContext()));
3054 const MCExpr *ImmVal;
3055 if (parseSymbolicImmVal(ImmVal))
3058 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3059 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3062 case AsmToken::Equal: {
3063 SMLoc Loc = Parser.getTok().getLoc();
3064 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3065 return Error(Loc, "unexpected token in operand");
3066 Parser.Lex(); // Eat '='
3067 const MCExpr *SubExprVal;
3068 if (getParser().parseExpression(SubExprVal))
3071 if (Operands.size() < 2 ||
3072 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3076 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3077 Operands[1]->getReg());
3079 MCContext& Ctx = getContext();
3080 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3081 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3082 if (isa<MCConstantExpr>(SubExprVal)) {
3083 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3084 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3085 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3089 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3090 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3091 Operands.push_back(AArch64Operand::CreateImm(
3092 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3094 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3095 ShiftAmt, true, S, E, Ctx));
3098 APInt Simm = APInt(64, Imm << ShiftAmt);
3099 // check if the immediate is an unsigned or signed 32-bit int for W regs
3100 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3101 return Error(Loc, "Immediate too large for register");
3103 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3104 const MCExpr *CPLoc =
3105 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3106 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3112 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3114 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3115 StringRef Name, SMLoc NameLoc,
3116 OperandVector &Operands) {
3117 MCAsmParser &Parser = getParser();
3118 Name = StringSwitch<StringRef>(Name.lower())
3119 .Case("beq", "b.eq")
3120 .Case("bne", "b.ne")
3121 .Case("bhs", "b.hs")
3122 .Case("bcs", "b.cs")
3123 .Case("blo", "b.lo")
3124 .Case("bcc", "b.cc")
3125 .Case("bmi", "b.mi")
3126 .Case("bpl", "b.pl")
3127 .Case("bvs", "b.vs")
3128 .Case("bvc", "b.vc")
3129 .Case("bhi", "b.hi")
3130 .Case("bls", "b.ls")
3131 .Case("bge", "b.ge")
3132 .Case("blt", "b.lt")
3133 .Case("bgt", "b.gt")
3134 .Case("ble", "b.le")
3135 .Case("bal", "b.al")
3136 .Case("bnv", "b.nv")
3139 // First check for the AArch64-specific .req directive.
3140 if (Parser.getTok().is(AsmToken::Identifier) &&
3141 Parser.getTok().getIdentifier() == ".req") {
3142 parseDirectiveReq(Name, NameLoc);
3143 // We always return 'error' for this, as we're done with this
3144 // statement and don't need to match the 'instruction."
3148 // Create the leading tokens for the mnemonic, split by '.' characters.
3149 size_t Start = 0, Next = Name.find('.');
3150 StringRef Head = Name.slice(Start, Next);
3152 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3153 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3154 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3155 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3156 Parser.eatToEndOfStatement();
3161 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3164 // Handle condition codes for a branch mnemonic
3165 if (Head == "b" && Next != StringRef::npos) {
3167 Next = Name.find('.', Start + 1);
3168 Head = Name.slice(Start + 1, Next);
3170 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3171 (Head.data() - Name.data()));
3172 AArch64CC::CondCode CC = parseCondCodeString(Head);
3173 if (CC == AArch64CC::Invalid)
3174 return Error(SuffixLoc, "invalid condition code");
3176 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3178 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3181 // Add the remaining tokens in the mnemonic.
3182 while (Next != StringRef::npos) {
3184 Next = Name.find('.', Start + 1);
3185 Head = Name.slice(Start, Next);
3186 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3187 (Head.data() - Name.data()) + 1);
3189 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3192 // Conditional compare instructions have a Condition Code operand, which needs
3193 // to be parsed and an immediate operand created.
3194 bool condCodeFourthOperand =
3195 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3196 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3197 Head == "csinc" || Head == "csinv" || Head == "csneg");
3199 // These instructions are aliases to some of the conditional select
3200 // instructions. However, the condition code is inverted in the aliased
3203 // FIXME: Is this the correct way to handle these? Or should the parser
3204 // generate the aliased instructions directly?
3205 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3206 bool condCodeThirdOperand =
3207 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3209 // Read the remaining operands.
3210 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3211 // Read the first operand.
3212 if (parseOperand(Operands, false, false)) {
3213 Parser.eatToEndOfStatement();
3218 while (getLexer().is(AsmToken::Comma)) {
3219 Parser.Lex(); // Eat the comma.
3221 // Parse and remember the operand.
3222 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3223 (N == 3 && condCodeThirdOperand) ||
3224 (N == 2 && condCodeSecondOperand),
3225 condCodeSecondOperand || condCodeThirdOperand)) {
3226 Parser.eatToEndOfStatement();
3230 // After successfully parsing some operands there are two special cases to
3231 // consider (i.e. notional operands not separated by commas). Both are due
3232 // to memory specifiers:
3233 // + An RBrac will end an address for load/store/prefetch
3234 // + An '!' will indicate a pre-indexed operation.
3236 // It's someone else's responsibility to make sure these tokens are sane
3237 // in the given context!
3238 if (Parser.getTok().is(AsmToken::RBrac)) {
3239 SMLoc Loc = Parser.getTok().getLoc();
3240 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3245 if (Parser.getTok().is(AsmToken::Exclaim)) {
3246 SMLoc Loc = Parser.getTok().getLoc();
3247 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3256 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3257 SMLoc Loc = Parser.getTok().getLoc();
3258 Parser.eatToEndOfStatement();
3259 return Error(Loc, "unexpected token in argument list");
3262 Parser.Lex(); // Consume the EndOfStatement
3266 // FIXME: This entire function is a giant hack to provide us with decent
3267 // operand range validation/diagnostics until TableGen/MC can be extended
3268 // to support autogeneration of this kind of validation.
3269 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3270 SmallVectorImpl<SMLoc> &Loc) {
3271 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3272 // Check for indexed addressing modes w/ the base register being the
3273 // same as a destination/source register or pair load where
3274 // the Rt == Rt2. All of those are undefined behaviour.
3275 switch (Inst.getOpcode()) {
3276 case AArch64::LDPSWpre:
3277 case AArch64::LDPWpost:
3278 case AArch64::LDPWpre:
3279 case AArch64::LDPXpost:
3280 case AArch64::LDPXpre: {
3281 unsigned Rt = Inst.getOperand(1).getReg();
3282 unsigned Rt2 = Inst.getOperand(2).getReg();
3283 unsigned Rn = Inst.getOperand(3).getReg();
3284 if (RI->isSubRegisterEq(Rn, Rt))
3285 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3286 "is also a destination");
3287 if (RI->isSubRegisterEq(Rn, Rt2))
3288 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3289 "is also a destination");
3292 case AArch64::LDPDi:
3293 case AArch64::LDPQi:
3294 case AArch64::LDPSi:
3295 case AArch64::LDPSWi:
3296 case AArch64::LDPWi:
3297 case AArch64::LDPXi: {
3298 unsigned Rt = Inst.getOperand(0).getReg();
3299 unsigned Rt2 = Inst.getOperand(1).getReg();
3301 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3304 case AArch64::LDPDpost:
3305 case AArch64::LDPDpre:
3306 case AArch64::LDPQpost:
3307 case AArch64::LDPQpre:
3308 case AArch64::LDPSpost:
3309 case AArch64::LDPSpre:
3310 case AArch64::LDPSWpost: {
3311 unsigned Rt = Inst.getOperand(1).getReg();
3312 unsigned Rt2 = Inst.getOperand(2).getReg();
3314 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3317 case AArch64::STPDpost:
3318 case AArch64::STPDpre:
3319 case AArch64::STPQpost:
3320 case AArch64::STPQpre:
3321 case AArch64::STPSpost:
3322 case AArch64::STPSpre:
3323 case AArch64::STPWpost:
3324 case AArch64::STPWpre:
3325 case AArch64::STPXpost:
3326 case AArch64::STPXpre: {
3327 unsigned Rt = Inst.getOperand(1).getReg();
3328 unsigned Rt2 = Inst.getOperand(2).getReg();
3329 unsigned Rn = Inst.getOperand(3).getReg();
3330 if (RI->isSubRegisterEq(Rn, Rt))
3331 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3332 "is also a source");
3333 if (RI->isSubRegisterEq(Rn, Rt2))
3334 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3335 "is also a source");
3338 case AArch64::LDRBBpre:
3339 case AArch64::LDRBpre:
3340 case AArch64::LDRHHpre:
3341 case AArch64::LDRHpre:
3342 case AArch64::LDRSBWpre:
3343 case AArch64::LDRSBXpre:
3344 case AArch64::LDRSHWpre:
3345 case AArch64::LDRSHXpre:
3346 case AArch64::LDRSWpre:
3347 case AArch64::LDRWpre:
3348 case AArch64::LDRXpre:
3349 case AArch64::LDRBBpost:
3350 case AArch64::LDRBpost:
3351 case AArch64::LDRHHpost:
3352 case AArch64::LDRHpost:
3353 case AArch64::LDRSBWpost:
3354 case AArch64::LDRSBXpost:
3355 case AArch64::LDRSHWpost:
3356 case AArch64::LDRSHXpost:
3357 case AArch64::LDRSWpost:
3358 case AArch64::LDRWpost:
3359 case AArch64::LDRXpost: {
3360 unsigned Rt = Inst.getOperand(1).getReg();
3361 unsigned Rn = Inst.getOperand(2).getReg();
3362 if (RI->isSubRegisterEq(Rn, Rt))
3363 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3364 "is also a source");
3367 case AArch64::STRBBpost:
3368 case AArch64::STRBpost:
3369 case AArch64::STRHHpost:
3370 case AArch64::STRHpost:
3371 case AArch64::STRWpost:
3372 case AArch64::STRXpost:
3373 case AArch64::STRBBpre:
3374 case AArch64::STRBpre:
3375 case AArch64::STRHHpre:
3376 case AArch64::STRHpre:
3377 case AArch64::STRWpre:
3378 case AArch64::STRXpre: {
3379 unsigned Rt = Inst.getOperand(1).getReg();
3380 unsigned Rn = Inst.getOperand(2).getReg();
3381 if (RI->isSubRegisterEq(Rn, Rt))
3382 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3383 "is also a source");
3388 // Now check immediate ranges. Separate from the above as there is overlap
3389 // in the instructions being checked and this keeps the nested conditionals
3391 switch (Inst.getOpcode()) {
3392 case AArch64::ADDSWri:
3393 case AArch64::ADDSXri:
3394 case AArch64::ADDWri:
3395 case AArch64::ADDXri:
3396 case AArch64::SUBSWri:
3397 case AArch64::SUBSXri:
3398 case AArch64::SUBWri:
3399 case AArch64::SUBXri: {
3400 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3401 // some slight duplication here.
3402 if (Inst.getOperand(2).isExpr()) {
3403 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3404 AArch64MCExpr::VariantKind ELFRefKind;
3405 MCSymbolRefExpr::VariantKind DarwinRefKind;
3407 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3408 return Error(Loc[2], "invalid immediate expression");
3411 // Only allow these with ADDXri.
3412 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3413 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3414 Inst.getOpcode() == AArch64::ADDXri)
3417 // Only allow these with ADDXri/ADDWri
3418 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3419 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3420 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3421 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3422 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3423 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3424 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3425 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3426 (Inst.getOpcode() == AArch64::ADDXri ||
3427 Inst.getOpcode() == AArch64::ADDWri))
3430 // Don't allow expressions in the immediate field otherwise
3431 return Error(Loc[2], "invalid immediate expression");
3440 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3442 case Match_MissingFeature:
3444 "instruction requires a CPU feature not currently enabled");
3445 case Match_InvalidOperand:
3446 return Error(Loc, "invalid operand for instruction");
3447 case Match_InvalidSuffix:
3448 return Error(Loc, "invalid type suffix for instruction");
3449 case Match_InvalidCondCode:
3450 return Error(Loc, "expected AArch64 condition code");
3451 case Match_AddSubRegExtendSmall:
3453 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3454 case Match_AddSubRegExtendLarge:
3456 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3457 case Match_AddSubSecondSource:
3459 "expected compatible register, symbol or integer in range [0, 4095]");
3460 case Match_LogicalSecondSource:
3461 return Error(Loc, "expected compatible register or logical immediate");
3462 case Match_InvalidMovImm32Shift:
3463 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3464 case Match_InvalidMovImm64Shift:
3465 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3466 case Match_AddSubRegShift32:
3468 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3469 case Match_AddSubRegShift64:
3471 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3472 case Match_InvalidFPImm:
3474 "expected compatible register or floating-point constant");
3475 case Match_InvalidMemoryIndexedSImm9:
3476 return Error(Loc, "index must be an integer in range [-256, 255].");
3477 case Match_InvalidMemoryIndexed4SImm7:
3478 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3479 case Match_InvalidMemoryIndexed8SImm7:
3480 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3481 case Match_InvalidMemoryIndexed16SImm7:
3482 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3483 case Match_InvalidMemoryWExtend8:
3485 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3486 case Match_InvalidMemoryWExtend16:
3488 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3489 case Match_InvalidMemoryWExtend32:
3491 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3492 case Match_InvalidMemoryWExtend64:
3494 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3495 case Match_InvalidMemoryWExtend128:
3497 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3498 case Match_InvalidMemoryXExtend8:
3500 "expected 'lsl' or 'sxtx' with optional shift of #0");
3501 case Match_InvalidMemoryXExtend16:
3503 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3504 case Match_InvalidMemoryXExtend32:
3506 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3507 case Match_InvalidMemoryXExtend64:
3509 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3510 case Match_InvalidMemoryXExtend128:
3512 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3513 case Match_InvalidMemoryIndexed1:
3514 return Error(Loc, "index must be an integer in range [0, 4095].");
3515 case Match_InvalidMemoryIndexed2:
3516 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3517 case Match_InvalidMemoryIndexed4:
3518 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3519 case Match_InvalidMemoryIndexed8:
3520 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3521 case Match_InvalidMemoryIndexed16:
3522 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3523 case Match_InvalidImm0_7:
3524 return Error(Loc, "immediate must be an integer in range [0, 7].");
3525 case Match_InvalidImm0_15:
3526 return Error(Loc, "immediate must be an integer in range [0, 15].");
3527 case Match_InvalidImm0_31:
3528 return Error(Loc, "immediate must be an integer in range [0, 31].");
3529 case Match_InvalidImm0_63:
3530 return Error(Loc, "immediate must be an integer in range [0, 63].");
3531 case Match_InvalidImm0_127:
3532 return Error(Loc, "immediate must be an integer in range [0, 127].");
3533 case Match_InvalidImm0_65535:
3534 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3535 case Match_InvalidImm1_8:
3536 return Error(Loc, "immediate must be an integer in range [1, 8].");
3537 case Match_InvalidImm1_16:
3538 return Error(Loc, "immediate must be an integer in range [1, 16].");
3539 case Match_InvalidImm1_32:
3540 return Error(Loc, "immediate must be an integer in range [1, 32].");
3541 case Match_InvalidImm1_64:
3542 return Error(Loc, "immediate must be an integer in range [1, 64].");
3543 case Match_InvalidIndex1:
3544 return Error(Loc, "expected lane specifier '[1]'");
3545 case Match_InvalidIndexB:
3546 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3547 case Match_InvalidIndexH:
3548 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3549 case Match_InvalidIndexS:
3550 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3551 case Match_InvalidIndexD:
3552 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3553 case Match_InvalidLabel:
3554 return Error(Loc, "expected label or encodable integer pc offset");
3556 return Error(Loc, "expected readable system register");
3558 return Error(Loc, "expected writable system register or pstate");
3559 case Match_MnemonicFail:
3560 return Error(Loc, "unrecognized instruction mnemonic");
3562 llvm_unreachable("unexpected error code!");
3566 static const char *getSubtargetFeatureName(uint64_t Val);
3568 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3569 OperandVector &Operands,
3571 uint64_t &ErrorInfo,
3572 bool MatchingInlineAsm) {
3573 assert(!Operands.empty() && "Unexpect empty operand list!");
3574 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3575 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3577 StringRef Tok = Op.getToken();
3578 unsigned NumOperands = Operands.size();
3580 if (NumOperands == 4 && Tok == "lsl") {
3581 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3582 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3583 if (Op2.isReg() && Op3.isImm()) {
3584 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3586 uint64_t Op3Val = Op3CE->getValue();
3587 uint64_t NewOp3Val = 0;
3588 uint64_t NewOp4Val = 0;
3589 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3591 NewOp3Val = (32 - Op3Val) & 0x1f;
3592 NewOp4Val = 31 - Op3Val;
3594 NewOp3Val = (64 - Op3Val) & 0x3f;
3595 NewOp4Val = 63 - Op3Val;
3598 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3599 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3601 Operands[0] = AArch64Operand::CreateToken(
3602 "ubfm", false, Op.getStartLoc(), getContext());
3603 Operands.push_back(AArch64Operand::CreateImm(
3604 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3605 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3606 Op3.getEndLoc(), getContext());
3609 } else if (NumOperands == 5) {
3610 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3611 // UBFIZ -> UBFM aliases.
3612 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3613 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3614 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3615 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3617 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3618 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3619 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3621 if (Op3CE && Op4CE) {
3622 uint64_t Op3Val = Op3CE->getValue();
3623 uint64_t Op4Val = Op4CE->getValue();
3625 uint64_t RegWidth = 0;
3626 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3632 if (Op3Val >= RegWidth)
3633 return Error(Op3.getStartLoc(),
3634 "expected integer in range [0, 31]");
3635 if (Op4Val < 1 || Op4Val > RegWidth)
3636 return Error(Op4.getStartLoc(),
3637 "expected integer in range [1, 32]");
3639 uint64_t NewOp3Val = 0;
3640 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3642 NewOp3Val = (32 - Op3Val) & 0x1f;
3644 NewOp3Val = (64 - Op3Val) & 0x3f;
3646 uint64_t NewOp4Val = Op4Val - 1;
3648 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3649 return Error(Op4.getStartLoc(),
3650 "requested insert overflows register");
3652 const MCExpr *NewOp3 =
3653 MCConstantExpr::Create(NewOp3Val, getContext());
3654 const MCExpr *NewOp4 =
3655 MCConstantExpr::Create(NewOp4Val, getContext());
3656 Operands[3] = AArch64Operand::CreateImm(
3657 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3658 Operands[4] = AArch64Operand::CreateImm(
3659 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3661 Operands[0] = AArch64Operand::CreateToken(
3662 "bfm", false, Op.getStartLoc(), getContext());
3663 else if (Tok == "sbfiz")
3664 Operands[0] = AArch64Operand::CreateToken(
3665 "sbfm", false, Op.getStartLoc(), getContext());
3666 else if (Tok == "ubfiz")
3667 Operands[0] = AArch64Operand::CreateToken(
3668 "ubfm", false, Op.getStartLoc(), getContext());
3670 llvm_unreachable("No valid mnemonic for alias?");
3674 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3675 // UBFX -> UBFM aliases.
3676 } else if (NumOperands == 5 &&
3677 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3678 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3679 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3680 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3682 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3683 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3684 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3686 if (Op3CE && Op4CE) {
3687 uint64_t Op3Val = Op3CE->getValue();
3688 uint64_t Op4Val = Op4CE->getValue();
3690 uint64_t RegWidth = 0;
3691 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3697 if (Op3Val >= RegWidth)
3698 return Error(Op3.getStartLoc(),
3699 "expected integer in range [0, 31]");
3700 if (Op4Val < 1 || Op4Val > RegWidth)
3701 return Error(Op4.getStartLoc(),
3702 "expected integer in range [1, 32]");
3704 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3706 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3707 return Error(Op4.getStartLoc(),
3708 "requested extract overflows register");
3710 const MCExpr *NewOp4 =
3711 MCConstantExpr::Create(NewOp4Val, getContext());
3712 Operands[4] = AArch64Operand::CreateImm(
3713 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3715 Operands[0] = AArch64Operand::CreateToken(
3716 "bfm", false, Op.getStartLoc(), getContext());
3717 else if (Tok == "sbfx")
3718 Operands[0] = AArch64Operand::CreateToken(
3719 "sbfm", false, Op.getStartLoc(), getContext());
3720 else if (Tok == "ubfx")
3721 Operands[0] = AArch64Operand::CreateToken(
3722 "ubfm", false, Op.getStartLoc(), getContext());
3724 llvm_unreachable("No valid mnemonic for alias?");
3729 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3730 // InstAlias can't quite handle this since the reg classes aren't
3732 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3733 // The source register can be Wn here, but the matcher expects a
3734 // GPR64. Twiddle it here if necessary.
3735 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3737 unsigned Reg = getXRegFromWReg(Op.getReg());
3738 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3739 Op.getEndLoc(), getContext());
3742 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3743 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3744 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3746 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3748 // The source register can be Wn here, but the matcher expects a
3749 // GPR64. Twiddle it here if necessary.
3750 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3752 unsigned Reg = getXRegFromWReg(Op.getReg());
3753 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3754 Op.getEndLoc(), getContext());
3758 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3759 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3760 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3762 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3764 // The source register can be Wn here, but the matcher expects a
3765 // GPR32. Twiddle it here if necessary.
3766 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3768 unsigned Reg = getWRegFromXReg(Op.getReg());
3769 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3770 Op.getEndLoc(), getContext());
3775 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3776 if (NumOperands == 3 && Tok == "fmov") {
3777 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3779 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3781 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3785 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3786 Op.getEndLoc(), getContext());
3791 // First try to match against the secondary set of tables containing the
3792 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3793 unsigned MatchResult =
3794 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3796 // If that fails, try against the alternate table containing long-form NEON:
3797 // "fadd v0.2s, v1.2s, v2.2s"
3798 if (MatchResult != Match_Success)
3800 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3802 switch (MatchResult) {
3803 case Match_Success: {
3804 // Perform range checking and other semantic validations
3805 SmallVector<SMLoc, 8> OperandLocs;
3806 NumOperands = Operands.size();
3807 for (unsigned i = 1; i < NumOperands; ++i)
3808 OperandLocs.push_back(Operands[i]->getStartLoc());
3809 if (validateInstruction(Inst, OperandLocs))
3813 Out.EmitInstruction(Inst, STI);
3816 case Match_MissingFeature: {
3817 assert(ErrorInfo && "Unknown missing feature!");
3818 // Special case the error message for the very common case where only
3819 // a single subtarget feature is missing (neon, e.g.).
3820 std::string Msg = "instruction requires:";
3822 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3823 if (ErrorInfo & Mask) {
3825 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3829 return Error(IDLoc, Msg);
3831 case Match_MnemonicFail:
3832 return showMatchError(IDLoc, MatchResult);
3833 case Match_InvalidOperand: {
3834 SMLoc ErrorLoc = IDLoc;
3835 if (ErrorInfo != ~0ULL) {
3836 if (ErrorInfo >= Operands.size())
3837 return Error(IDLoc, "too few operands for instruction");
3839 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3840 if (ErrorLoc == SMLoc())
3843 // If the match failed on a suffix token operand, tweak the diagnostic
3845 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3846 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3847 MatchResult = Match_InvalidSuffix;
3849 return showMatchError(ErrorLoc, MatchResult);
3851 case Match_InvalidMemoryIndexed1:
3852 case Match_InvalidMemoryIndexed2:
3853 case Match_InvalidMemoryIndexed4:
3854 case Match_InvalidMemoryIndexed8:
3855 case Match_InvalidMemoryIndexed16:
3856 case Match_InvalidCondCode:
3857 case Match_AddSubRegExtendSmall:
3858 case Match_AddSubRegExtendLarge:
3859 case Match_AddSubSecondSource:
3860 case Match_LogicalSecondSource:
3861 case Match_AddSubRegShift32:
3862 case Match_AddSubRegShift64:
3863 case Match_InvalidMovImm32Shift:
3864 case Match_InvalidMovImm64Shift:
3865 case Match_InvalidFPImm:
3866 case Match_InvalidMemoryWExtend8:
3867 case Match_InvalidMemoryWExtend16:
3868 case Match_InvalidMemoryWExtend32:
3869 case Match_InvalidMemoryWExtend64:
3870 case Match_InvalidMemoryWExtend128:
3871 case Match_InvalidMemoryXExtend8:
3872 case Match_InvalidMemoryXExtend16:
3873 case Match_InvalidMemoryXExtend32:
3874 case Match_InvalidMemoryXExtend64:
3875 case Match_InvalidMemoryXExtend128:
3876 case Match_InvalidMemoryIndexed4SImm7:
3877 case Match_InvalidMemoryIndexed8SImm7:
3878 case Match_InvalidMemoryIndexed16SImm7:
3879 case Match_InvalidMemoryIndexedSImm9:
3880 case Match_InvalidImm0_7:
3881 case Match_InvalidImm0_15:
3882 case Match_InvalidImm0_31:
3883 case Match_InvalidImm0_63:
3884 case Match_InvalidImm0_127:
3885 case Match_InvalidImm0_65535:
3886 case Match_InvalidImm1_8:
3887 case Match_InvalidImm1_16:
3888 case Match_InvalidImm1_32:
3889 case Match_InvalidImm1_64:
3890 case Match_InvalidIndex1:
3891 case Match_InvalidIndexB:
3892 case Match_InvalidIndexH:
3893 case Match_InvalidIndexS:
3894 case Match_InvalidIndexD:
3895 case Match_InvalidLabel:
3898 if (ErrorInfo >= Operands.size())
3899 return Error(IDLoc, "too few operands for instruction");
3900 // Any time we get here, there's nothing fancy to do. Just get the
3901 // operand SMLoc and display the diagnostic.
3902 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3903 if (ErrorLoc == SMLoc())
3905 return showMatchError(ErrorLoc, MatchResult);
3909 llvm_unreachable("Implement any new match types added!");
3912 /// ParseDirective parses the arm specific directives
3913 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3914 const MCObjectFileInfo::Environment Format =
3915 getContext().getObjectFileInfo()->getObjectFileType();
3916 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3917 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3919 StringRef IDVal = DirectiveID.getIdentifier();
3920 SMLoc Loc = DirectiveID.getLoc();
3921 if (IDVal == ".hword")
3922 return parseDirectiveWord(2, Loc);
3923 if (IDVal == ".word")
3924 return parseDirectiveWord(4, Loc);
3925 if (IDVal == ".xword")
3926 return parseDirectiveWord(8, Loc);
3927 if (IDVal == ".tlsdesccall")
3928 return parseDirectiveTLSDescCall(Loc);
3929 if (IDVal == ".ltorg" || IDVal == ".pool")
3930 return parseDirectiveLtorg(Loc);
3931 if (IDVal == ".unreq")
3932 return parseDirectiveUnreq(DirectiveID.getLoc());
3934 if (!IsMachO && !IsCOFF) {
3935 if (IDVal == ".inst")
3936 return parseDirectiveInst(Loc);
3939 return parseDirectiveLOH(IDVal, Loc);
3942 /// parseDirectiveWord
3943 /// ::= .word [ expression (, expression)* ]
3944 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3945 MCAsmParser &Parser = getParser();
3946 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3948 const MCExpr *Value;
3949 if (getParser().parseExpression(Value))
3952 getParser().getStreamer().EmitValue(Value, Size);
3954 if (getLexer().is(AsmToken::EndOfStatement))
3957 // FIXME: Improve diagnostic.
3958 if (getLexer().isNot(AsmToken::Comma))
3959 return Error(L, "unexpected token in directive");
3968 /// parseDirectiveInst
3969 /// ::= .inst opcode [, ...]
3970 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3971 MCAsmParser &Parser = getParser();
3972 if (getLexer().is(AsmToken::EndOfStatement)) {
3973 Parser.eatToEndOfStatement();
3974 Error(Loc, "expected expression following directive");
3981 if (getParser().parseExpression(Expr)) {
3982 Error(Loc, "expected expression");
3986 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3988 Error(Loc, "expected constant expression");
3992 getTargetStreamer().emitInst(Value->getValue());
3994 if (getLexer().is(AsmToken::EndOfStatement))
3997 if (getLexer().isNot(AsmToken::Comma)) {
3998 Error(Loc, "unexpected token in directive");
4002 Parser.Lex(); // Eat comma.
4009 // parseDirectiveTLSDescCall:
4010 // ::= .tlsdesccall symbol
4011 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4013 if (getParser().parseIdentifier(Name))
4014 return Error(L, "expected symbol after directive");
4016 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4017 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4018 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4021 Inst.setOpcode(AArch64::TLSDESCCALL);
4022 Inst.addOperand(MCOperand::CreateExpr(Expr));
4024 getParser().getStreamer().EmitInstruction(Inst, STI);
4028 /// ::= .loh <lohName | lohId> label1, ..., labelN
4029 /// The number of arguments depends on the loh identifier.
4030 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4031 if (IDVal != MCLOHDirectiveName())
4034 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4035 if (getParser().getTok().isNot(AsmToken::Integer))
4036 return TokError("expected an identifier or a number in directive");
4037 // We successfully get a numeric value for the identifier.
4038 // Check if it is valid.
4039 int64_t Id = getParser().getTok().getIntVal();
4040 if (Id <= -1U && !isValidMCLOHType(Id))
4041 return TokError("invalid numeric identifier in directive");
4042 Kind = (MCLOHType)Id;
4044 StringRef Name = getTok().getIdentifier();
4045 // We successfully parse an identifier.
4046 // Check if it is a recognized one.
4047 int Id = MCLOHNameToId(Name);
4050 return TokError("invalid identifier in directive");
4051 Kind = (MCLOHType)Id;
4053 // Consume the identifier.
4055 // Get the number of arguments of this LOH.
4056 int NbArgs = MCLOHIdToNbArgs(Kind);
4058 assert(NbArgs != -1 && "Invalid number of arguments");
4060 SmallVector<MCSymbol *, 3> Args;
4061 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4063 if (getParser().parseIdentifier(Name))
4064 return TokError("expected identifier in directive");
4065 Args.push_back(getContext().GetOrCreateSymbol(Name));
4067 if (Idx + 1 == NbArgs)
4069 if (getLexer().isNot(AsmToken::Comma))
4070 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4073 if (getLexer().isNot(AsmToken::EndOfStatement))
4074 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4076 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4080 /// parseDirectiveLtorg
4081 /// ::= .ltorg | .pool
4082 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4083 getTargetStreamer().emitCurrentConstantPool();
4087 /// parseDirectiveReq
4088 /// ::= name .req registername
4089 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4090 MCAsmParser &Parser = getParser();
4091 Parser.Lex(); // Eat the '.req' token.
4092 SMLoc SRegLoc = getLoc();
4093 unsigned RegNum = tryParseRegister();
4094 bool IsVector = false;
4096 if (RegNum == static_cast<unsigned>(-1)) {
4098 RegNum = tryMatchVectorRegister(Kind, false);
4099 if (!Kind.empty()) {
4100 Error(SRegLoc, "vector register without type specifier expected");
4106 if (RegNum == static_cast<unsigned>(-1)) {
4107 Parser.eatToEndOfStatement();
4108 Error(SRegLoc, "register name or alias expected");
4112 // Shouldn't be anything else.
4113 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4114 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4115 Parser.eatToEndOfStatement();
4119 Parser.Lex(); // Consume the EndOfStatement
4121 auto pair = std::make_pair(IsVector, RegNum);
4122 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4123 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4128 /// parseDirectiveUneq
4129 /// ::= .unreq registername
4130 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4131 MCAsmParser &Parser = getParser();
4132 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4133 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4134 Parser.eatToEndOfStatement();
4137 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4138 Parser.Lex(); // Eat the identifier.
4143 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4144 AArch64MCExpr::VariantKind &ELFRefKind,
4145 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4147 ELFRefKind = AArch64MCExpr::VK_INVALID;
4148 DarwinRefKind = MCSymbolRefExpr::VK_None;
4151 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4152 ELFRefKind = AE->getKind();
4153 Expr = AE->getSubExpr();
4156 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4158 // It's a simple symbol reference with no addend.
4159 DarwinRefKind = SE->getKind();
4163 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4167 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4170 DarwinRefKind = SE->getKind();
4172 if (BE->getOpcode() != MCBinaryExpr::Add &&
4173 BE->getOpcode() != MCBinaryExpr::Sub)
4176 // See if the addend is is a constant, otherwise there's more going
4177 // on here than we can deal with.
4178 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4182 Addend = AddendExpr->getValue();
4183 if (BE->getOpcode() == MCBinaryExpr::Sub)
4186 // It's some symbol reference + a constant addend, but really
4187 // shouldn't use both Darwin and ELF syntax.
4188 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4189 DarwinRefKind == MCSymbolRefExpr::VK_None;
4192 /// Force static initialization.
4193 extern "C" void LLVMInitializeAArch64AsmParser() {
4194 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4195 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4196 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4199 #define GET_REGISTER_MATCHER
4200 #define GET_SUBTARGET_FEATURE_NAME
4201 #define GET_MATCHER_IMPLEMENTATION
4202 #include "AArch64GenAsmMatcher.inc"
4204 // Define this matcher function after the auto-generated include so we
4205 // have the match class enum definitions.
4206 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4208 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4209 // If the kind is a token for a literal immediate, check if our asm
4210 // operand matches. This is for InstAliases which have a fixed-value
4211 // immediate in the syntax.
4212 int64_t ExpectedVal;
4215 return Match_InvalidOperand;
4257 return Match_InvalidOperand;
4258 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4260 return Match_InvalidOperand;
4261 if (CE->getValue() == ExpectedVal)
4262 return Match_Success;
4263 return Match_InvalidOperand;