1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseRegister(OperandVector &Operands);
60 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
61 bool parseVectorList(OperandVector &Operands);
62 bool parseOperand(OperandVector &Operands, bool isCondCode,
65 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
66 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
67 bool showMatchError(SMLoc Loc, unsigned ErrCode);
69 bool parseDirectiveWord(unsigned Size, SMLoc L);
70 bool parseDirectiveTLSDescCall(SMLoc L);
72 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
74 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
75 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76 OperandVector &Operands, MCStreamer &Out,
78 bool MatchingInlineAsm) override;
79 /// @name Auto-generated Match Functions
82 #define GET_ASSEMBLER_HEADER
83 #include "AArch64GenAsmMatcher.inc"
87 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
88 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
89 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
90 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
91 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
92 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
93 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
94 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
95 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
96 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
97 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum AArch64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "AArch64GenAsmMatcher.inc"
106 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII,
108 const MCTargetOptions &Options)
109 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110 MCAsmParserExtension::Initialize(_Parser);
112 // Initialize the set of available features.
113 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
116 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
117 SMLoc NameLoc, OperandVector &Operands) override;
118 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
119 bool ParseDirective(AsmToken DirectiveID) override;
120 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
121 unsigned Kind) override;
123 static bool classifySymbolRef(const MCExpr *Expr,
124 AArch64MCExpr::VariantKind &ELFRefKind,
125 MCSymbolRefExpr::VariantKind &DarwinRefKind,
128 } // end anonymous namespace
132 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
134 class AArch64Operand : public MCParsedAsmOperand {
152 SMLoc StartLoc, EndLoc;
157 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
165 struct VectorListOp {
168 unsigned NumElements;
169 unsigned ElementKind;
172 struct VectorIndexOp {
180 struct ShiftedImmOp {
182 unsigned ShiftAmount;
186 AArch64CC::CondCode Code;
190 unsigned Val; // Encoded 8-bit representation.
194 unsigned Val; // Not the enum since not all values have names.
200 uint64_t FeatureBits; // We need to pass through information about which
201 // core we are compiling for so that the SysReg
202 // Mappers can appropriately conditionalize.
213 struct ShiftExtendOp {
214 AArch64_AM::ShiftExtendType Type;
216 bool HasExplicitAmount;
226 struct VectorListOp VectorList;
227 struct VectorIndexOp VectorIndex;
229 struct ShiftedImmOp ShiftedImm;
230 struct CondCodeOp CondCode;
231 struct FPImmOp FPImm;
232 struct BarrierOp Barrier;
233 struct SysRegOp SysReg;
234 struct SysCRImmOp SysCRImm;
235 struct PrefetchOp Prefetch;
236 struct ShiftExtendOp ShiftExtend;
239 // Keep the MCContext around as the MCExprs may need manipulated during
240 // the add<>Operands() calls.
243 AArch64Operand(KindTy K, MCContext &_Ctx)
244 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
247 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
249 StartLoc = o.StartLoc;
259 ShiftedImm = o.ShiftedImm;
262 CondCode = o.CondCode;
274 VectorList = o.VectorList;
277 VectorIndex = o.VectorIndex;
283 SysCRImm = o.SysCRImm;
286 Prefetch = o.Prefetch;
289 ShiftExtend = o.ShiftExtend;
294 /// getStartLoc - Get the location of the first token of this operand.
295 SMLoc getStartLoc() const override { return StartLoc; }
296 /// getEndLoc - Get the location of the last token of this operand.
297 SMLoc getEndLoc() const override { return EndLoc; }
299 StringRef getToken() const {
300 assert(Kind == k_Token && "Invalid access!");
301 return StringRef(Tok.Data, Tok.Length);
304 bool isTokenSuffix() const {
305 assert(Kind == k_Token && "Invalid access!");
309 const MCExpr *getImm() const {
310 assert(Kind == k_Immediate && "Invalid access!");
314 const MCExpr *getShiftedImmVal() const {
315 assert(Kind == k_ShiftedImm && "Invalid access!");
316 return ShiftedImm.Val;
319 unsigned getShiftedImmShift() const {
320 assert(Kind == k_ShiftedImm && "Invalid access!");
321 return ShiftedImm.ShiftAmount;
324 AArch64CC::CondCode getCondCode() const {
325 assert(Kind == k_CondCode && "Invalid access!");
326 return CondCode.Code;
329 unsigned getFPImm() const {
330 assert(Kind == k_FPImm && "Invalid access!");
334 unsigned getBarrier() const {
335 assert(Kind == k_Barrier && "Invalid access!");
339 unsigned getReg() const override {
340 assert(Kind == k_Register && "Invalid access!");
344 unsigned getVectorListStart() const {
345 assert(Kind == k_VectorList && "Invalid access!");
346 return VectorList.RegNum;
349 unsigned getVectorListCount() const {
350 assert(Kind == k_VectorList && "Invalid access!");
351 return VectorList.Count;
354 unsigned getVectorIndex() const {
355 assert(Kind == k_VectorIndex && "Invalid access!");
356 return VectorIndex.Val;
359 StringRef getSysReg() const {
360 assert(Kind == k_SysReg && "Invalid access!");
361 return StringRef(SysReg.Data, SysReg.Length);
364 uint64_t getSysRegFeatureBits() const {
365 assert(Kind == k_SysReg && "Invalid access!");
366 return SysReg.FeatureBits;
369 unsigned getSysCR() const {
370 assert(Kind == k_SysCR && "Invalid access!");
374 unsigned getPrefetch() const {
375 assert(Kind == k_Prefetch && "Invalid access!");
379 AArch64_AM::ShiftExtendType getShiftExtendType() const {
380 assert(Kind == k_ShiftExtend && "Invalid access!");
381 return ShiftExtend.Type;
384 unsigned getShiftExtendAmount() const {
385 assert(Kind == k_ShiftExtend && "Invalid access!");
386 return ShiftExtend.Amount;
389 bool hasShiftExtendAmount() const {
390 assert(Kind == k_ShiftExtend && "Invalid access!");
391 return ShiftExtend.HasExplicitAmount;
394 bool isImm() const override { return Kind == k_Immediate; }
395 bool isMem() const override { return false; }
396 bool isSImm9() const {
399 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
402 int64_t Val = MCE->getValue();
403 return (Val >= -256 && Val < 256);
405 bool isSImm7s4() const {
408 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
411 int64_t Val = MCE->getValue();
412 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
414 bool isSImm7s8() const {
417 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
420 int64_t Val = MCE->getValue();
421 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
423 bool isSImm7s16() const {
426 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
429 int64_t Val = MCE->getValue();
430 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
433 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
434 AArch64MCExpr::VariantKind ELFRefKind;
435 MCSymbolRefExpr::VariantKind DarwinRefKind;
437 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
439 // If we don't understand the expression, assume the best and
440 // let the fixup and relocation code deal with it.
444 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
445 ELFRefKind == AArch64MCExpr::VK_LO12 ||
446 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
447 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
448 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
449 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
450 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
451 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
452 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
453 // Note that we don't range-check the addend. It's adjusted modulo page
454 // size when converted, so there is no "out of range" condition when using
456 return Addend >= 0 && (Addend % Scale) == 0;
457 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
458 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
459 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
466 template <int Scale> bool isUImm12Offset() const {
470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
472 return isSymbolicUImm12Offset(getImm(), Scale);
474 int64_t Val = MCE->getValue();
475 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
478 bool isImm0_7() const {
481 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 int64_t Val = MCE->getValue();
485 return (Val >= 0 && Val < 8);
487 bool isImm1_8() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 int64_t Val = MCE->getValue();
494 return (Val > 0 && Val < 9);
496 bool isImm0_15() const {
499 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
502 int64_t Val = MCE->getValue();
503 return (Val >= 0 && Val < 16);
505 bool isImm1_16() const {
508 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
511 int64_t Val = MCE->getValue();
512 return (Val > 0 && Val < 17);
514 bool isImm0_31() const {
517 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
520 int64_t Val = MCE->getValue();
521 return (Val >= 0 && Val < 32);
523 bool isImm1_31() const {
526 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529 int64_t Val = MCE->getValue();
530 return (Val >= 1 && Val < 32);
532 bool isImm1_32() const {
535 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538 int64_t Val = MCE->getValue();
539 return (Val >= 1 && Val < 33);
541 bool isImm0_63() const {
544 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547 int64_t Val = MCE->getValue();
548 return (Val >= 0 && Val < 64);
550 bool isImm1_63() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
557 return (Val >= 1 && Val < 64);
559 bool isImm1_64() const {
562 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565 int64_t Val = MCE->getValue();
566 return (Val >= 1 && Val < 65);
568 bool isImm0_127() const {
571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 int64_t Val = MCE->getValue();
575 return (Val >= 0 && Val < 128);
577 bool isImm0_255() const {
580 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583 int64_t Val = MCE->getValue();
584 return (Val >= 0 && Val < 256);
586 bool isImm0_65535() const {
589 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592 int64_t Val = MCE->getValue();
593 return (Val >= 0 && Val < 65536);
595 bool isImm32_63() const {
598 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601 int64_t Val = MCE->getValue();
602 return (Val >= 32 && Val < 64);
604 bool isLogicalImm32() const {
607 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
612 bool isLogicalImm64() const {
615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
618 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
620 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
621 bool isAddSubImm() const {
622 if (!isShiftedImm() && !isImm())
627 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
628 if (isShiftedImm()) {
629 unsigned Shift = ShiftedImm.ShiftAmount;
630 Expr = ShiftedImm.Val;
631 if (Shift != 0 && Shift != 12)
637 AArch64MCExpr::VariantKind ELFRefKind;
638 MCSymbolRefExpr::VariantKind DarwinRefKind;
640 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
641 DarwinRefKind, Addend)) {
642 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
643 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
644 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
645 || ELFRefKind == AArch64MCExpr::VK_LO12
646 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
647 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
648 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
649 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
650 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
651 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
652 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
655 // Otherwise it should be a real immediate in range:
656 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
657 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
659 bool isCondCode() const { return Kind == k_CondCode; }
660 bool isSIMDImmType10() const {
663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
666 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
668 bool isBranchTarget26() const {
671 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674 int64_t Val = MCE->getValue();
677 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
679 bool isPCRelLabel19() const {
682 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
685 int64_t Val = MCE->getValue();
688 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
690 bool isBranchTarget14() const {
693 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
696 int64_t Val = MCE->getValue();
699 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
703 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
707 AArch64MCExpr::VariantKind ELFRefKind;
708 MCSymbolRefExpr::VariantKind DarwinRefKind;
710 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
711 DarwinRefKind, Addend)) {
714 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
717 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
718 if (ELFRefKind == AllowedModifiers[i])
725 bool isMovZSymbolG3() const {
726 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
727 return isMovWSymbol(Variants);
730 bool isMovZSymbolG2() const {
731 static AArch64MCExpr::VariantKind Variants[] = {
732 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
733 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
734 return isMovWSymbol(Variants);
737 bool isMovZSymbolG1() const {
738 static AArch64MCExpr::VariantKind Variants[] = {
739 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
740 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
741 AArch64MCExpr::VK_DTPREL_G1,
743 return isMovWSymbol(Variants);
746 bool isMovZSymbolG0() const {
747 static AArch64MCExpr::VariantKind Variants[] = {
748 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
749 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
750 return isMovWSymbol(Variants);
753 bool isMovKSymbolG3() const {
754 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
755 return isMovWSymbol(Variants);
758 bool isMovKSymbolG2() const {
759 static AArch64MCExpr::VariantKind Variants[] = {
760 AArch64MCExpr::VK_ABS_G2_NC};
761 return isMovWSymbol(Variants);
764 bool isMovKSymbolG1() const {
765 static AArch64MCExpr::VariantKind Variants[] = {
766 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
767 AArch64MCExpr::VK_DTPREL_G1_NC
769 return isMovWSymbol(Variants);
772 bool isMovKSymbolG0() const {
773 static AArch64MCExpr::VariantKind Variants[] = {
774 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
775 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
777 return isMovWSymbol(Variants);
780 template<int RegWidth, int Shift>
781 bool isMOVZMovAlias() const {
782 if (!isImm()) return false;
784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785 if (!CE) return false;
786 uint64_t Value = CE->getValue();
789 Value &= 0xffffffffULL;
791 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
792 if (Value == 0 && Shift != 0)
795 return (Value & ~(0xffffULL << Shift)) == 0;
798 template<int RegWidth, int Shift>
799 bool isMOVNMovAlias() const {
800 if (!isImm()) return false;
802 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803 if (!CE) return false;
804 uint64_t Value = CE->getValue();
806 // MOVZ takes precedence over MOVN.
807 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
808 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
813 Value &= 0xffffffffULL;
815 return (Value & ~(0xffffULL << Shift)) == 0;
818 bool isFPImm() const { return Kind == k_FPImm; }
819 bool isBarrier() const { return Kind == k_Barrier; }
820 bool isSysReg() const { return Kind == k_SysReg; }
821 bool isMRSSystemRegister() const {
822 if (!isSysReg()) return false;
824 bool IsKnownRegister;
825 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
826 Mapper.fromString(getSysReg(), IsKnownRegister);
828 return IsKnownRegister;
830 bool isMSRSystemRegister() const {
831 if (!isSysReg()) return false;
833 bool IsKnownRegister;
834 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
835 Mapper.fromString(getSysReg(), IsKnownRegister);
837 return IsKnownRegister;
839 bool isSystemPStateField() const {
840 if (!isSysReg()) return false;
842 bool IsKnownRegister;
843 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
845 return IsKnownRegister;
847 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
848 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
849 bool isVectorRegLo() const {
850 return Kind == k_Register && Reg.isVector &&
851 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
854 bool isGPR32as64() const {
855 return Kind == k_Register && !Reg.isVector &&
856 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
859 bool isGPR64sp0() const {
860 return Kind == k_Register && !Reg.isVector &&
861 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
864 /// Is this a vector list with the type implicit (presumably attached to the
865 /// instruction itself)?
866 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
867 return Kind == k_VectorList && VectorList.Count == NumRegs &&
868 !VectorList.ElementKind;
871 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
872 bool isTypedVectorList() const {
873 if (Kind != k_VectorList)
875 if (VectorList.Count != NumRegs)
877 if (VectorList.ElementKind != ElementKind)
879 return VectorList.NumElements == NumElements;
882 bool isVectorIndex1() const {
883 return Kind == k_VectorIndex && VectorIndex.Val == 1;
885 bool isVectorIndexB() const {
886 return Kind == k_VectorIndex && VectorIndex.Val < 16;
888 bool isVectorIndexH() const {
889 return Kind == k_VectorIndex && VectorIndex.Val < 8;
891 bool isVectorIndexS() const {
892 return Kind == k_VectorIndex && VectorIndex.Val < 4;
894 bool isVectorIndexD() const {
895 return Kind == k_VectorIndex && VectorIndex.Val < 2;
897 bool isToken() const override { return Kind == k_Token; }
898 bool isTokenEqual(StringRef Str) const {
899 return Kind == k_Token && getToken() == Str;
901 bool isSysCR() const { return Kind == k_SysCR; }
902 bool isPrefetch() const { return Kind == k_Prefetch; }
903 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
904 bool isShifter() const {
905 if (!isShiftExtend())
908 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
909 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
910 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
911 ST == AArch64_AM::MSL);
913 bool isExtend() const {
914 if (!isShiftExtend())
917 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
918 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
919 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
920 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
921 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
922 ET == AArch64_AM::LSL) &&
923 getShiftExtendAmount() <= 4;
926 bool isExtend64() const {
929 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
930 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
931 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
933 bool isExtendLSL64() const {
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
938 ET == AArch64_AM::LSL) &&
939 getShiftExtendAmount() <= 4;
942 template<int Width> bool isMemXExtend() const {
945 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
946 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
947 (getShiftExtendAmount() == Log2_32(Width / 8) ||
948 getShiftExtendAmount() == 0);
951 template<int Width> bool isMemWExtend() const {
954 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
955 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
956 (getShiftExtendAmount() == Log2_32(Width / 8) ||
957 getShiftExtendAmount() == 0);
960 template <unsigned width>
961 bool isArithmeticShifter() const {
965 // An arithmetic shifter is LSL, LSR, or ASR.
966 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
967 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
968 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
971 template <unsigned width>
972 bool isLogicalShifter() const {
976 // A logical shifter is LSL, LSR, ASR or ROR.
977 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
978 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
979 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
980 getShiftExtendAmount() < width;
983 bool isMovImm32Shifter() const {
987 // A MOVi shifter is LSL of 0, 16, 32, or 48.
988 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
989 if (ST != AArch64_AM::LSL)
991 uint64_t Val = getShiftExtendAmount();
992 return (Val == 0 || Val == 16);
995 bool isMovImm64Shifter() const {
999 // A MOVi shifter is LSL of 0 or 16.
1000 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001 if (ST != AArch64_AM::LSL)
1003 uint64_t Val = getShiftExtendAmount();
1004 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1007 bool isLogicalVecShifter() const {
1011 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1012 unsigned Shift = getShiftExtendAmount();
1013 return getShiftExtendType() == AArch64_AM::LSL &&
1014 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1017 bool isLogicalVecHalfWordShifter() const {
1018 if (!isLogicalVecShifter())
1021 // A logical vector shifter is a left shift by 0 or 8.
1022 unsigned Shift = getShiftExtendAmount();
1023 return getShiftExtendType() == AArch64_AM::LSL &&
1024 (Shift == 0 || Shift == 8);
1027 bool isMoveVecShifter() const {
1028 if (!isShiftExtend())
1031 // A logical vector shifter is a left shift by 8 or 16.
1032 unsigned Shift = getShiftExtendAmount();
1033 return getShiftExtendType() == AArch64_AM::MSL &&
1034 (Shift == 8 || Shift == 16);
1037 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1038 // to LDUR/STUR when the offset is not legal for the former but is for
1039 // the latter. As such, in addition to checking for being a legal unscaled
1040 // address, also check that it is not a legal scaled address. This avoids
1041 // ambiguity in the matcher.
1043 bool isSImm9OffsetFB() const {
1044 return isSImm9() && !isUImm12Offset<Width / 8>();
1047 bool isAdrpLabel() const {
1048 // Validation was handled during parsing, so we just sanity check that
1049 // something didn't go haywire.
1053 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1054 int64_t Val = CE->getValue();
1055 int64_t Min = - (4096 * (1LL << (21 - 1)));
1056 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1057 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1063 bool isAdrLabel() const {
1064 // Validation was handled during parsing, so we just sanity check that
1065 // something didn't go haywire.
1069 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1070 int64_t Val = CE->getValue();
1071 int64_t Min = - (1LL << (21 - 1));
1072 int64_t Max = ((1LL << (21 - 1)) - 1);
1073 return Val >= Min && Val <= Max;
1079 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1080 // Add as immediates when possible. Null MCExpr = 0.
1082 Inst.addOperand(MCOperand::CreateImm(0));
1083 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1084 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1086 Inst.addOperand(MCOperand::CreateExpr(Expr));
1089 void addRegOperands(MCInst &Inst, unsigned N) const {
1090 assert(N == 1 && "Invalid number of operands!");
1091 Inst.addOperand(MCOperand::CreateReg(getReg()));
1094 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1097 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1099 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1100 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1101 RI->getEncodingValue(getReg()));
1103 Inst.addOperand(MCOperand::CreateReg(Reg));
1106 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1107 assert(N == 1 && "Invalid number of operands!");
1109 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1110 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1113 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1117 Inst.addOperand(MCOperand::CreateReg(getReg()));
1120 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1121 assert(N == 1 && "Invalid number of operands!");
1122 Inst.addOperand(MCOperand::CreateReg(getReg()));
1125 template <unsigned NumRegs>
1126 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1127 assert(N == 1 && "Invalid number of operands!");
1128 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1129 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1130 unsigned FirstReg = FirstRegs[NumRegs - 1];
1133 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1136 template <unsigned NumRegs>
1137 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1138 assert(N == 1 && "Invalid number of operands!");
1139 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1140 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1141 unsigned FirstReg = FirstRegs[NumRegs - 1];
1144 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1147 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1148 assert(N == 1 && "Invalid number of operands!");
1149 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1152 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1153 assert(N == 1 && "Invalid number of operands!");
1154 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1157 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1162 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1163 assert(N == 1 && "Invalid number of operands!");
1164 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1167 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1172 void addImmOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 1 && "Invalid number of operands!");
1174 // If this is a pageoff symrefexpr with an addend, adjust the addend
1175 // to be only the page-offset portion. Otherwise, just add the expr
1177 addExpr(Inst, getImm());
1180 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 2 && "Invalid number of operands!");
1182 if (isShiftedImm()) {
1183 addExpr(Inst, getShiftedImmVal());
1184 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1186 addExpr(Inst, getImm());
1187 Inst.addOperand(MCOperand::CreateImm(0));
1191 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1196 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1200 addExpr(Inst, getImm());
1202 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1205 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1206 addImmOperands(Inst, N);
1210 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1215 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1218 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1221 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1222 assert(N == 1 && "Invalid number of operands!");
1223 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1224 assert(MCE && "Invalid constant immediate operand!");
1225 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1228 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1231 assert(MCE && "Invalid constant immediate operand!");
1232 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1235 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1236 assert(N == 1 && "Invalid number of operands!");
1237 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1238 assert(MCE && "Invalid constant immediate operand!");
1239 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1242 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1243 assert(N == 1 && "Invalid number of operands!");
1244 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1245 assert(MCE && "Invalid constant immediate operand!");
1246 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1249 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1250 assert(N == 1 && "Invalid number of operands!");
1251 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1252 assert(MCE && "Invalid constant immediate operand!");
1253 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1256 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1257 assert(N == 1 && "Invalid number of operands!");
1258 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1259 assert(MCE && "Invalid constant immediate operand!");
1260 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1263 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1264 assert(N == 1 && "Invalid number of operands!");
1265 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1266 assert(MCE && "Invalid constant immediate operand!");
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1270 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1273 assert(MCE && "Invalid constant immediate operand!");
1274 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1277 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1280 assert(MCE && "Invalid constant immediate operand!");
1281 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1284 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!");
1286 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1287 assert(MCE && "Invalid constant immediate operand!");
1288 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1291 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1292 assert(N == 1 && "Invalid number of operands!");
1293 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1294 assert(MCE && "Invalid constant immediate operand!");
1295 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1298 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1301 assert(MCE && "Invalid constant immediate operand!");
1302 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1305 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1306 assert(N == 1 && "Invalid number of operands!");
1307 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1308 assert(MCE && "Invalid constant immediate operand!");
1309 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1312 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1313 assert(N == 1 && "Invalid number of operands!");
1314 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1315 assert(MCE && "Invalid constant immediate operand!");
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1319 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1322 assert(MCE && "Invalid constant immediate operand!");
1323 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1326 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1327 assert(N == 1 && "Invalid number of operands!");
1328 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1329 assert(MCE && "Invalid constant immediate operand!");
1330 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1333 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1336 assert(MCE && "Invalid constant immediate operand!");
1337 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1340 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1343 assert(MCE && "Invalid constant immediate operand!");
1344 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1347 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1348 assert(N == 1 && "Invalid number of operands!");
1349 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1350 assert(MCE && "Invalid logical immediate operand!");
1351 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1352 Inst.addOperand(MCOperand::CreateImm(encoding));
1355 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1358 assert(MCE && "Invalid logical immediate operand!");
1359 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1360 Inst.addOperand(MCOperand::CreateImm(encoding));
1363 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1366 assert(MCE && "Invalid immediate operand!");
1367 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1368 Inst.addOperand(MCOperand::CreateImm(encoding));
1371 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1372 // Branch operands don't encode the low bits, so shift them off
1373 // here. If it's a label, however, just put it on directly as there's
1374 // not enough information now to do anything.
1375 assert(N == 1 && "Invalid number of operands!");
1376 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1378 addExpr(Inst, getImm());
1381 assert(MCE && "Invalid constant immediate operand!");
1382 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1385 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1386 // Branch operands don't encode the low bits, so shift them off
1387 // here. If it's a label, however, just put it on directly as there's
1388 // not enough information now to do anything.
1389 assert(N == 1 && "Invalid number of operands!");
1390 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1392 addExpr(Inst, getImm());
1395 assert(MCE && "Invalid constant immediate operand!");
1396 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1399 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1400 // Branch operands don't encode the low bits, so shift them off
1401 // here. If it's a label, however, just put it on directly as there's
1402 // not enough information now to do anything.
1403 assert(N == 1 && "Invalid number of operands!");
1404 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1406 addExpr(Inst, getImm());
1409 assert(MCE && "Invalid constant immediate operand!");
1410 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1413 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1418 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1419 assert(N == 1 && "Invalid number of operands!");
1420 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1423 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1424 assert(N == 1 && "Invalid number of operands!");
1427 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1428 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1430 Inst.addOperand(MCOperand::CreateImm(Bits));
1433 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!");
1437 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1438 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1440 Inst.addOperand(MCOperand::CreateImm(Bits));
1443 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1448 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1450 Inst.addOperand(MCOperand::CreateImm(Bits));
1453 void addSysCROperands(MCInst &Inst, unsigned N) const {
1454 assert(N == 1 && "Invalid number of operands!");
1455 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1458 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1459 assert(N == 1 && "Invalid number of operands!");
1460 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1463 void addShifterOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1466 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1467 Inst.addOperand(MCOperand::CreateImm(Imm));
1470 void addExtendOperands(MCInst &Inst, unsigned N) const {
1471 assert(N == 1 && "Invalid number of operands!");
1472 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1473 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1474 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1475 Inst.addOperand(MCOperand::CreateImm(Imm));
1478 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1479 assert(N == 1 && "Invalid number of operands!");
1480 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1481 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1482 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1483 Inst.addOperand(MCOperand::CreateImm(Imm));
1486 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1487 assert(N == 2 && "Invalid number of operands!");
1488 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1489 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1490 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1491 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1494 // For 8-bit load/store instructions with a register offset, both the
1495 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1496 // they're disambiguated by whether the shift was explicit or implicit rather
1498 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1499 assert(N == 2 && "Invalid number of operands!");
1500 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1501 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1502 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1503 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1507 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1508 assert(N == 1 && "Invalid number of operands!");
1510 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1511 uint64_t Value = CE->getValue();
1512 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1516 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1517 assert(N == 1 && "Invalid number of operands!");
1519 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1520 uint64_t Value = CE->getValue();
1521 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1524 void print(raw_ostream &OS) const override;
1526 static AArch64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1528 AArch64Operand *Op = new AArch64Operand(k_Token, Ctx);
1529 Op->Tok.Data = Str.data();
1530 Op->Tok.Length = Str.size();
1531 Op->Tok.IsSuffix = IsSuffix;
1537 static AArch64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1538 SMLoc E, MCContext &Ctx) {
1539 AArch64Operand *Op = new AArch64Operand(k_Register, Ctx);
1540 Op->Reg.RegNum = RegNum;
1541 Op->Reg.isVector = isVector;
1547 static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1548 unsigned NumElements, char ElementKind,
1549 SMLoc S, SMLoc E, MCContext &Ctx) {
1550 AArch64Operand *Op = new AArch64Operand(k_VectorList, Ctx);
1551 Op->VectorList.RegNum = RegNum;
1552 Op->VectorList.Count = Count;
1553 Op->VectorList.NumElements = NumElements;
1554 Op->VectorList.ElementKind = ElementKind;
1560 static AArch64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1562 AArch64Operand *Op = new AArch64Operand(k_VectorIndex, Ctx);
1563 Op->VectorIndex.Val = Idx;
1569 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1571 AArch64Operand *Op = new AArch64Operand(k_Immediate, Ctx);
1578 static AArch64Operand *CreateShiftedImm(const MCExpr *Val,
1579 unsigned ShiftAmount, SMLoc S,
1580 SMLoc E, MCContext &Ctx) {
1581 AArch64Operand *Op = new AArch64Operand(k_ShiftedImm, Ctx);
1582 Op->ShiftedImm .Val = Val;
1583 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1589 static AArch64Operand *CreateCondCode(AArch64CC::CondCode Code, SMLoc S,
1590 SMLoc E, MCContext &Ctx) {
1591 AArch64Operand *Op = new AArch64Operand(k_CondCode, Ctx);
1592 Op->CondCode.Code = Code;
1598 static AArch64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1599 AArch64Operand *Op = new AArch64Operand(k_FPImm, Ctx);
1600 Op->FPImm.Val = Val;
1606 static AArch64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1607 AArch64Operand *Op = new AArch64Operand(k_Barrier, Ctx);
1608 Op->Barrier.Val = Val;
1614 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S,
1615 uint64_t FeatureBits, MCContext &Ctx) {
1616 AArch64Operand *Op = new AArch64Operand(k_SysReg, Ctx);
1617 Op->SysReg.Data = Str.data();
1618 Op->SysReg.Length = Str.size();
1619 Op->SysReg.FeatureBits = FeatureBits;
1625 static AArch64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1627 AArch64Operand *Op = new AArch64Operand(k_SysCR, Ctx);
1628 Op->SysCRImm.Val = Val;
1634 static AArch64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1635 AArch64Operand *Op = new AArch64Operand(k_Prefetch, Ctx);
1636 Op->Prefetch.Val = Val;
1642 static AArch64Operand *CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,
1643 unsigned Val, bool HasExplicitAmount,
1644 SMLoc S, SMLoc E, MCContext &Ctx) {
1645 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, Ctx);
1646 Op->ShiftExtend.Type = ShOp;
1647 Op->ShiftExtend.Amount = Val;
1648 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1655 } // end anonymous namespace.
1657 void AArch64Operand::print(raw_ostream &OS) const {
1660 OS << "<fpimm " << getFPImm() << "("
1661 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1665 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1667 OS << "<barrier " << Name << ">";
1669 OS << "<barrier invalid #" << getBarrier() << ">";
1673 getImm()->print(OS);
1675 case k_ShiftedImm: {
1676 unsigned Shift = getShiftedImmShift();
1677 OS << "<shiftedimm ";
1678 getShiftedImmVal()->print(OS);
1679 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1683 OS << "<condcode " << getCondCode() << ">";
1686 OS << "<register " << getReg() << ">";
1688 case k_VectorList: {
1689 OS << "<vectorlist ";
1690 unsigned Reg = getVectorListStart();
1691 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1692 OS << Reg + i << " ";
1697 OS << "<vectorindex " << getVectorIndex() << ">";
1700 OS << "<sysreg: " << getSysReg() << '>';
1703 OS << "'" << getToken() << "'";
1706 OS << "c" << getSysCR();
1710 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1712 OS << "<prfop " << Name << ">";
1714 OS << "<prfop invalid #" << getPrefetch() << ">";
1717 case k_ShiftExtend: {
1718 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1719 << getShiftExtendAmount();
1720 if (!hasShiftExtendAmount())
1728 /// @name Auto-generated Match Functions
1731 static unsigned MatchRegisterName(StringRef Name);
1735 static unsigned matchVectorRegName(StringRef Name) {
1736 return StringSwitch<unsigned>(Name)
1737 .Case("v0", AArch64::Q0)
1738 .Case("v1", AArch64::Q1)
1739 .Case("v2", AArch64::Q2)
1740 .Case("v3", AArch64::Q3)
1741 .Case("v4", AArch64::Q4)
1742 .Case("v5", AArch64::Q5)
1743 .Case("v6", AArch64::Q6)
1744 .Case("v7", AArch64::Q7)
1745 .Case("v8", AArch64::Q8)
1746 .Case("v9", AArch64::Q9)
1747 .Case("v10", AArch64::Q10)
1748 .Case("v11", AArch64::Q11)
1749 .Case("v12", AArch64::Q12)
1750 .Case("v13", AArch64::Q13)
1751 .Case("v14", AArch64::Q14)
1752 .Case("v15", AArch64::Q15)
1753 .Case("v16", AArch64::Q16)
1754 .Case("v17", AArch64::Q17)
1755 .Case("v18", AArch64::Q18)
1756 .Case("v19", AArch64::Q19)
1757 .Case("v20", AArch64::Q20)
1758 .Case("v21", AArch64::Q21)
1759 .Case("v22", AArch64::Q22)
1760 .Case("v23", AArch64::Q23)
1761 .Case("v24", AArch64::Q24)
1762 .Case("v25", AArch64::Q25)
1763 .Case("v26", AArch64::Q26)
1764 .Case("v27", AArch64::Q27)
1765 .Case("v28", AArch64::Q28)
1766 .Case("v29", AArch64::Q29)
1767 .Case("v30", AArch64::Q30)
1768 .Case("v31", AArch64::Q31)
1772 static bool isValidVectorKind(StringRef Name) {
1773 return StringSwitch<bool>(Name.lower())
1783 // Accept the width neutral ones, too, for verbose syntax. If those
1784 // aren't used in the right places, the token operand won't match so
1785 // all will work out.
1793 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1794 char &ElementKind) {
1795 assert(isValidVectorKind(Name));
1797 ElementKind = Name.lower()[Name.size() - 1];
1800 if (Name.size() == 2)
1803 // Parse the lane count
1804 Name = Name.drop_front();
1805 while (isdigit(Name.front())) {
1806 NumElements = 10 * NumElements + (Name.front() - '0');
1807 Name = Name.drop_front();
1811 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1813 StartLoc = getLoc();
1814 RegNo = tryParseRegister();
1815 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1816 return (RegNo == (unsigned)-1);
1819 /// tryParseRegister - Try to parse a register name. The token must be an
1820 /// Identifier when called, and if it is a register name the token is eaten and
1821 /// the register is added to the operand list.
1822 int AArch64AsmParser::tryParseRegister() {
1823 const AsmToken &Tok = Parser.getTok();
1824 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1826 std::string lowerCase = Tok.getString().lower();
1827 unsigned RegNum = MatchRegisterName(lowerCase);
1828 // Also handle a few aliases of registers.
1830 RegNum = StringSwitch<unsigned>(lowerCase)
1831 .Case("fp", AArch64::FP)
1832 .Case("lr", AArch64::LR)
1833 .Case("x31", AArch64::XZR)
1834 .Case("w31", AArch64::WZR)
1840 Parser.Lex(); // Eat identifier token.
1844 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1845 /// kind specifier. If it is a register specifier, eat the token and return it.
1846 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1847 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1848 TokError("vector register expected");
1852 StringRef Name = Parser.getTok().getString();
1853 // If there is a kind specifier, it's separated from the register name by
1855 size_t Start = 0, Next = Name.find('.');
1856 StringRef Head = Name.slice(Start, Next);
1857 unsigned RegNum = matchVectorRegName(Head);
1859 if (Next != StringRef::npos) {
1860 Kind = Name.slice(Next, StringRef::npos);
1861 if (!isValidVectorKind(Kind)) {
1862 TokError("invalid vector kind qualifier");
1866 Parser.Lex(); // Eat the register token.
1871 TokError("vector register expected");
1875 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1876 AArch64AsmParser::OperandMatchResultTy
1877 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1880 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1881 Error(S, "Expected cN operand where 0 <= N <= 15");
1882 return MatchOperand_ParseFail;
1885 StringRef Tok = Parser.getTok().getIdentifier();
1886 if (Tok[0] != 'c' && Tok[0] != 'C') {
1887 Error(S, "Expected cN operand where 0 <= N <= 15");
1888 return MatchOperand_ParseFail;
1892 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1893 if (BadNum || CRNum > 15) {
1894 Error(S, "Expected cN operand where 0 <= N <= 15");
1895 return MatchOperand_ParseFail;
1898 Parser.Lex(); // Eat identifier token.
1900 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1901 return MatchOperand_Success;
1904 /// tryParsePrefetch - Try to parse a prefetch operand.
1905 AArch64AsmParser::OperandMatchResultTy
1906 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1908 const AsmToken &Tok = Parser.getTok();
1909 // Either an identifier for named values or a 5-bit immediate.
1910 bool Hash = Tok.is(AsmToken::Hash);
1911 if (Hash || Tok.is(AsmToken::Integer)) {
1913 Parser.Lex(); // Eat hash token.
1914 const MCExpr *ImmVal;
1915 if (getParser().parseExpression(ImmVal))
1916 return MatchOperand_ParseFail;
1918 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1920 TokError("immediate value expected for prefetch operand");
1921 return MatchOperand_ParseFail;
1923 unsigned prfop = MCE->getValue();
1925 TokError("prefetch operand out of range, [0,31] expected");
1926 return MatchOperand_ParseFail;
1929 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1930 return MatchOperand_Success;
1933 if (Tok.isNot(AsmToken::Identifier)) {
1934 TokError("pre-fetch hint expected");
1935 return MatchOperand_ParseFail;
1939 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1941 TokError("pre-fetch hint expected");
1942 return MatchOperand_ParseFail;
1945 Parser.Lex(); // Eat identifier token.
1946 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1947 return MatchOperand_Success;
1950 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1952 AArch64AsmParser::OperandMatchResultTy
1953 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1957 if (Parser.getTok().is(AsmToken::Hash)) {
1958 Parser.Lex(); // Eat hash token.
1961 if (parseSymbolicImmVal(Expr))
1962 return MatchOperand_ParseFail;
1964 AArch64MCExpr::VariantKind ELFRefKind;
1965 MCSymbolRefExpr::VariantKind DarwinRefKind;
1967 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1968 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1969 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1970 // No modifier was specified at all; this is the syntax for an ELF basic
1971 // ADRP relocation (unfortunately).
1973 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1974 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1975 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1977 Error(S, "gotpage label reference not allowed an addend");
1978 return MatchOperand_ParseFail;
1979 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1980 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1981 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1982 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
1983 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
1984 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
1985 // The operand must be an @page or @gotpage qualified symbolref.
1986 Error(S, "page or gotpage label reference expected");
1987 return MatchOperand_ParseFail;
1991 // We have either a label reference possibly with addend or an immediate. The
1992 // addend is a raw value here. The linker will adjust it to only reference the
1994 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1995 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
1997 return MatchOperand_Success;
2000 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2002 AArch64AsmParser::OperandMatchResultTy
2003 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2007 if (Parser.getTok().is(AsmToken::Hash)) {
2008 Parser.Lex(); // Eat hash token.
2011 if (getParser().parseExpression(Expr))
2012 return MatchOperand_ParseFail;
2014 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2015 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2017 return MatchOperand_Success;
2020 /// tryParseFPImm - A floating point immediate expression operand.
2021 AArch64AsmParser::OperandMatchResultTy
2022 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2026 if (Parser.getTok().is(AsmToken::Hash)) {
2027 Parser.Lex(); // Eat '#'
2031 // Handle negation, as that still comes through as a separate token.
2032 bool isNegative = false;
2033 if (Parser.getTok().is(AsmToken::Minus)) {
2037 const AsmToken &Tok = Parser.getTok();
2038 if (Tok.is(AsmToken::Real)) {
2039 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2040 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2041 // If we had a '-' in front, toggle the sign bit.
2042 IntVal ^= (uint64_t)isNegative << 63;
2043 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2044 Parser.Lex(); // Eat the token.
2045 // Check for out of range values. As an exception, we let Zero through,
2046 // as we handle that special case in post-processing before matching in
2047 // order to use the zero register for it.
2048 if (Val == -1 && !RealVal.isZero()) {
2049 TokError("expected compatible register or floating-point constant");
2050 return MatchOperand_ParseFail;
2052 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2053 return MatchOperand_Success;
2055 if (Tok.is(AsmToken::Integer)) {
2057 if (!isNegative && Tok.getString().startswith("0x")) {
2058 Val = Tok.getIntVal();
2059 if (Val > 255 || Val < 0) {
2060 TokError("encoded floating point value out of range");
2061 return MatchOperand_ParseFail;
2064 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2065 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2066 // If we had a '-' in front, toggle the sign bit.
2067 IntVal ^= (uint64_t)isNegative << 63;
2068 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2070 Parser.Lex(); // Eat the token.
2071 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2072 return MatchOperand_Success;
2076 return MatchOperand_NoMatch;
2078 TokError("invalid floating point immediate");
2079 return MatchOperand_ParseFail;
2082 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2083 AArch64AsmParser::OperandMatchResultTy
2084 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2087 if (Parser.getTok().is(AsmToken::Hash))
2088 Parser.Lex(); // Eat '#'
2089 else if (Parser.getTok().isNot(AsmToken::Integer))
2090 // Operand should start from # or should be integer, emit error otherwise.
2091 return MatchOperand_NoMatch;
2094 if (parseSymbolicImmVal(Imm))
2095 return MatchOperand_ParseFail;
2096 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2097 uint64_t ShiftAmount = 0;
2098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2100 int64_t Val = MCE->getValue();
2101 if (Val > 0xfff && (Val & 0xfff) == 0) {
2102 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2106 SMLoc E = Parser.getTok().getLoc();
2107 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2109 return MatchOperand_Success;
2115 // The optional operand must be "lsl #N" where N is non-negative.
2116 if (!Parser.getTok().is(AsmToken::Identifier) ||
2117 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2118 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2119 return MatchOperand_ParseFail;
2125 if (Parser.getTok().is(AsmToken::Hash)) {
2129 if (Parser.getTok().isNot(AsmToken::Integer)) {
2130 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2131 return MatchOperand_ParseFail;
2134 int64_t ShiftAmount = Parser.getTok().getIntVal();
2136 if (ShiftAmount < 0) {
2137 Error(Parser.getTok().getLoc(), "positive shift amount required");
2138 return MatchOperand_ParseFail;
2140 Parser.Lex(); // Eat the number
2142 SMLoc E = Parser.getTok().getLoc();
2143 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2144 S, E, getContext()));
2145 return MatchOperand_Success;
2148 /// parseCondCodeString - Parse a Condition Code string.
2149 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2150 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2151 .Case("eq", AArch64CC::EQ)
2152 .Case("ne", AArch64CC::NE)
2153 .Case("cs", AArch64CC::HS)
2154 .Case("hs", AArch64CC::HS)
2155 .Case("cc", AArch64CC::LO)
2156 .Case("lo", AArch64CC::LO)
2157 .Case("mi", AArch64CC::MI)
2158 .Case("pl", AArch64CC::PL)
2159 .Case("vs", AArch64CC::VS)
2160 .Case("vc", AArch64CC::VC)
2161 .Case("hi", AArch64CC::HI)
2162 .Case("ls", AArch64CC::LS)
2163 .Case("ge", AArch64CC::GE)
2164 .Case("lt", AArch64CC::LT)
2165 .Case("gt", AArch64CC::GT)
2166 .Case("le", AArch64CC::LE)
2167 .Case("al", AArch64CC::AL)
2168 .Case("nv", AArch64CC::NV)
2169 .Default(AArch64CC::Invalid);
2173 /// parseCondCode - Parse a Condition Code operand.
2174 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2175 bool invertCondCode) {
2177 const AsmToken &Tok = Parser.getTok();
2178 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2180 StringRef Cond = Tok.getString();
2181 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2182 if (CC == AArch64CC::Invalid)
2183 return TokError("invalid condition code");
2184 Parser.Lex(); // Eat identifier token.
2187 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2190 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2194 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2195 /// them if present.
2196 AArch64AsmParser::OperandMatchResultTy
2197 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2198 const AsmToken &Tok = Parser.getTok();
2199 std::string LowerID = Tok.getString().lower();
2200 AArch64_AM::ShiftExtendType ShOp =
2201 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2202 .Case("lsl", AArch64_AM::LSL)
2203 .Case("lsr", AArch64_AM::LSR)
2204 .Case("asr", AArch64_AM::ASR)
2205 .Case("ror", AArch64_AM::ROR)
2206 .Case("msl", AArch64_AM::MSL)
2207 .Case("uxtb", AArch64_AM::UXTB)
2208 .Case("uxth", AArch64_AM::UXTH)
2209 .Case("uxtw", AArch64_AM::UXTW)
2210 .Case("uxtx", AArch64_AM::UXTX)
2211 .Case("sxtb", AArch64_AM::SXTB)
2212 .Case("sxth", AArch64_AM::SXTH)
2213 .Case("sxtw", AArch64_AM::SXTW)
2214 .Case("sxtx", AArch64_AM::SXTX)
2215 .Default(AArch64_AM::InvalidShiftExtend);
2217 if (ShOp == AArch64_AM::InvalidShiftExtend)
2218 return MatchOperand_NoMatch;
2220 SMLoc S = Tok.getLoc();
2223 bool Hash = getLexer().is(AsmToken::Hash);
2224 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2225 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2226 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2227 ShOp == AArch64_AM::MSL) {
2228 // We expect a number here.
2229 TokError("expected #imm after shift specifier");
2230 return MatchOperand_ParseFail;
2233 // "extend" type operatoins don't need an immediate, #0 is implicit.
2234 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2236 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2237 return MatchOperand_Success;
2241 Parser.Lex(); // Eat the '#'.
2243 // Make sure we do actually have a number
2244 if (!Parser.getTok().is(AsmToken::Integer)) {
2245 Error(Parser.getTok().getLoc(),
2246 "expected integer shift amount");
2247 return MatchOperand_ParseFail;
2250 const MCExpr *ImmVal;
2251 if (getParser().parseExpression(ImmVal))
2252 return MatchOperand_ParseFail;
2254 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2256 TokError("expected #imm after shift specifier");
2257 return MatchOperand_ParseFail;
2260 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2261 Operands.push_back(AArch64Operand::CreateShiftExtend(
2262 ShOp, MCE->getValue(), true, S, E, getContext()));
2263 return MatchOperand_Success;
2266 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2267 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2268 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2269 OperandVector &Operands) {
2270 if (Name.find('.') != StringRef::npos)
2271 return TokError("invalid operand");
2275 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2277 const AsmToken &Tok = Parser.getTok();
2278 StringRef Op = Tok.getString();
2279 SMLoc S = Tok.getLoc();
2281 const MCExpr *Expr = nullptr;
2283 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2285 Expr = MCConstantExpr::Create(op1, getContext()); \
2286 Operands.push_back( \
2287 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2288 Operands.push_back( \
2289 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2290 Operands.push_back( \
2291 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2292 Expr = MCConstantExpr::Create(op2, getContext()); \
2293 Operands.push_back( \
2294 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2297 if (Mnemonic == "ic") {
2298 if (!Op.compare_lower("ialluis")) {
2299 // SYS #0, C7, C1, #0
2300 SYS_ALIAS(0, 7, 1, 0);
2301 } else if (!Op.compare_lower("iallu")) {
2302 // SYS #0, C7, C5, #0
2303 SYS_ALIAS(0, 7, 5, 0);
2304 } else if (!Op.compare_lower("ivau")) {
2305 // SYS #3, C7, C5, #1
2306 SYS_ALIAS(3, 7, 5, 1);
2308 return TokError("invalid operand for IC instruction");
2310 } else if (Mnemonic == "dc") {
2311 if (!Op.compare_lower("zva")) {
2312 // SYS #3, C7, C4, #1
2313 SYS_ALIAS(3, 7, 4, 1);
2314 } else if (!Op.compare_lower("ivac")) {
2315 // SYS #3, C7, C6, #1
2316 SYS_ALIAS(0, 7, 6, 1);
2317 } else if (!Op.compare_lower("isw")) {
2318 // SYS #0, C7, C6, #2
2319 SYS_ALIAS(0, 7, 6, 2);
2320 } else if (!Op.compare_lower("cvac")) {
2321 // SYS #3, C7, C10, #1
2322 SYS_ALIAS(3, 7, 10, 1);
2323 } else if (!Op.compare_lower("csw")) {
2324 // SYS #0, C7, C10, #2
2325 SYS_ALIAS(0, 7, 10, 2);
2326 } else if (!Op.compare_lower("cvau")) {
2327 // SYS #3, C7, C11, #1
2328 SYS_ALIAS(3, 7, 11, 1);
2329 } else if (!Op.compare_lower("civac")) {
2330 // SYS #3, C7, C14, #1
2331 SYS_ALIAS(3, 7, 14, 1);
2332 } else if (!Op.compare_lower("cisw")) {
2333 // SYS #0, C7, C14, #2
2334 SYS_ALIAS(0, 7, 14, 2);
2336 return TokError("invalid operand for DC instruction");
2338 } else if (Mnemonic == "at") {
2339 if (!Op.compare_lower("s1e1r")) {
2340 // SYS #0, C7, C8, #0
2341 SYS_ALIAS(0, 7, 8, 0);
2342 } else if (!Op.compare_lower("s1e2r")) {
2343 // SYS #4, C7, C8, #0
2344 SYS_ALIAS(4, 7, 8, 0);
2345 } else if (!Op.compare_lower("s1e3r")) {
2346 // SYS #6, C7, C8, #0
2347 SYS_ALIAS(6, 7, 8, 0);
2348 } else if (!Op.compare_lower("s1e1w")) {
2349 // SYS #0, C7, C8, #1
2350 SYS_ALIAS(0, 7, 8, 1);
2351 } else if (!Op.compare_lower("s1e2w")) {
2352 // SYS #4, C7, C8, #1
2353 SYS_ALIAS(4, 7, 8, 1);
2354 } else if (!Op.compare_lower("s1e3w")) {
2355 // SYS #6, C7, C8, #1
2356 SYS_ALIAS(6, 7, 8, 1);
2357 } else if (!Op.compare_lower("s1e0r")) {
2358 // SYS #0, C7, C8, #3
2359 SYS_ALIAS(0, 7, 8, 2);
2360 } else if (!Op.compare_lower("s1e0w")) {
2361 // SYS #0, C7, C8, #3
2362 SYS_ALIAS(0, 7, 8, 3);
2363 } else if (!Op.compare_lower("s12e1r")) {
2364 // SYS #4, C7, C8, #4
2365 SYS_ALIAS(4, 7, 8, 4);
2366 } else if (!Op.compare_lower("s12e1w")) {
2367 // SYS #4, C7, C8, #5
2368 SYS_ALIAS(4, 7, 8, 5);
2369 } else if (!Op.compare_lower("s12e0r")) {
2370 // SYS #4, C7, C8, #6
2371 SYS_ALIAS(4, 7, 8, 6);
2372 } else if (!Op.compare_lower("s12e0w")) {
2373 // SYS #4, C7, C8, #7
2374 SYS_ALIAS(4, 7, 8, 7);
2376 return TokError("invalid operand for AT instruction");
2378 } else if (Mnemonic == "tlbi") {
2379 if (!Op.compare_lower("vmalle1is")) {
2380 // SYS #0, C8, C3, #0
2381 SYS_ALIAS(0, 8, 3, 0);
2382 } else if (!Op.compare_lower("alle2is")) {
2383 // SYS #4, C8, C3, #0
2384 SYS_ALIAS(4, 8, 3, 0);
2385 } else if (!Op.compare_lower("alle3is")) {
2386 // SYS #6, C8, C3, #0
2387 SYS_ALIAS(6, 8, 3, 0);
2388 } else if (!Op.compare_lower("vae1is")) {
2389 // SYS #0, C8, C3, #1
2390 SYS_ALIAS(0, 8, 3, 1);
2391 } else if (!Op.compare_lower("vae2is")) {
2392 // SYS #4, C8, C3, #1
2393 SYS_ALIAS(4, 8, 3, 1);
2394 } else if (!Op.compare_lower("vae3is")) {
2395 // SYS #6, C8, C3, #1
2396 SYS_ALIAS(6, 8, 3, 1);
2397 } else if (!Op.compare_lower("aside1is")) {
2398 // SYS #0, C8, C3, #2
2399 SYS_ALIAS(0, 8, 3, 2);
2400 } else if (!Op.compare_lower("vaae1is")) {
2401 // SYS #0, C8, C3, #3
2402 SYS_ALIAS(0, 8, 3, 3);
2403 } else if (!Op.compare_lower("alle1is")) {
2404 // SYS #4, C8, C3, #4
2405 SYS_ALIAS(4, 8, 3, 4);
2406 } else if (!Op.compare_lower("vale1is")) {
2407 // SYS #0, C8, C3, #5
2408 SYS_ALIAS(0, 8, 3, 5);
2409 } else if (!Op.compare_lower("vaale1is")) {
2410 // SYS #0, C8, C3, #7
2411 SYS_ALIAS(0, 8, 3, 7);
2412 } else if (!Op.compare_lower("vmalle1")) {
2413 // SYS #0, C8, C7, #0
2414 SYS_ALIAS(0, 8, 7, 0);
2415 } else if (!Op.compare_lower("alle2")) {
2416 // SYS #4, C8, C7, #0
2417 SYS_ALIAS(4, 8, 7, 0);
2418 } else if (!Op.compare_lower("vale2is")) {
2419 // SYS #4, C8, C3, #5
2420 SYS_ALIAS(4, 8, 3, 5);
2421 } else if (!Op.compare_lower("vale3is")) {
2422 // SYS #6, C8, C3, #5
2423 SYS_ALIAS(6, 8, 3, 5);
2424 } else if (!Op.compare_lower("alle3")) {
2425 // SYS #6, C8, C7, #0
2426 SYS_ALIAS(6, 8, 7, 0);
2427 } else if (!Op.compare_lower("vae1")) {
2428 // SYS #0, C8, C7, #1
2429 SYS_ALIAS(0, 8, 7, 1);
2430 } else if (!Op.compare_lower("vae2")) {
2431 // SYS #4, C8, C7, #1
2432 SYS_ALIAS(4, 8, 7, 1);
2433 } else if (!Op.compare_lower("vae3")) {
2434 // SYS #6, C8, C7, #1
2435 SYS_ALIAS(6, 8, 7, 1);
2436 } else if (!Op.compare_lower("aside1")) {
2437 // SYS #0, C8, C7, #2
2438 SYS_ALIAS(0, 8, 7, 2);
2439 } else if (!Op.compare_lower("vaae1")) {
2440 // SYS #0, C8, C7, #3
2441 SYS_ALIAS(0, 8, 7, 3);
2442 } else if (!Op.compare_lower("alle1")) {
2443 // SYS #4, C8, C7, #4
2444 SYS_ALIAS(4, 8, 7, 4);
2445 } else if (!Op.compare_lower("vale1")) {
2446 // SYS #0, C8, C7, #5
2447 SYS_ALIAS(0, 8, 7, 5);
2448 } else if (!Op.compare_lower("vale2")) {
2449 // SYS #4, C8, C7, #5
2450 SYS_ALIAS(4, 8, 7, 5);
2451 } else if (!Op.compare_lower("vale3")) {
2452 // SYS #6, C8, C7, #5
2453 SYS_ALIAS(6, 8, 7, 5);
2454 } else if (!Op.compare_lower("vaale1")) {
2455 // SYS #0, C8, C7, #7
2456 SYS_ALIAS(0, 8, 7, 7);
2457 } else if (!Op.compare_lower("ipas2e1")) {
2458 // SYS #4, C8, C4, #1
2459 SYS_ALIAS(4, 8, 4, 1);
2460 } else if (!Op.compare_lower("ipas2le1")) {
2461 // SYS #4, C8, C4, #5
2462 SYS_ALIAS(4, 8, 4, 5);
2463 } else if (!Op.compare_lower("ipas2e1is")) {
2464 // SYS #4, C8, C4, #1
2465 SYS_ALIAS(4, 8, 0, 1);
2466 } else if (!Op.compare_lower("ipas2le1is")) {
2467 // SYS #4, C8, C4, #5
2468 SYS_ALIAS(4, 8, 0, 5);
2469 } else if (!Op.compare_lower("vmalls12e1")) {
2470 // SYS #4, C8, C7, #6
2471 SYS_ALIAS(4, 8, 7, 6);
2472 } else if (!Op.compare_lower("vmalls12e1is")) {
2473 // SYS #4, C8, C3, #6
2474 SYS_ALIAS(4, 8, 3, 6);
2476 return TokError("invalid operand for TLBI instruction");
2482 Parser.Lex(); // Eat operand.
2484 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2485 bool HasRegister = false;
2487 // Check for the optional register operand.
2488 if (getLexer().is(AsmToken::Comma)) {
2489 Parser.Lex(); // Eat comma.
2491 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2492 return TokError("expected register operand");
2497 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2498 Parser.eatToEndOfStatement();
2499 return TokError("unexpected token in argument list");
2502 if (ExpectRegister && !HasRegister) {
2503 return TokError("specified " + Mnemonic + " op requires a register");
2505 else if (!ExpectRegister && HasRegister) {
2506 return TokError("specified " + Mnemonic + " op does not use a register");
2509 Parser.Lex(); // Consume the EndOfStatement
2513 AArch64AsmParser::OperandMatchResultTy
2514 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2515 const AsmToken &Tok = Parser.getTok();
2517 // Can be either a #imm style literal or an option name
2518 bool Hash = Tok.is(AsmToken::Hash);
2519 if (Hash || Tok.is(AsmToken::Integer)) {
2520 // Immediate operand.
2522 Parser.Lex(); // Eat the '#'
2523 const MCExpr *ImmVal;
2524 SMLoc ExprLoc = getLoc();
2525 if (getParser().parseExpression(ImmVal))
2526 return MatchOperand_ParseFail;
2527 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2529 Error(ExprLoc, "immediate value expected for barrier operand");
2530 return MatchOperand_ParseFail;
2532 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2533 Error(ExprLoc, "barrier operand out of range");
2534 return MatchOperand_ParseFail;
2537 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2538 return MatchOperand_Success;
2541 if (Tok.isNot(AsmToken::Identifier)) {
2542 TokError("invalid operand for instruction");
2543 return MatchOperand_ParseFail;
2547 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2549 TokError("invalid barrier option name");
2550 return MatchOperand_ParseFail;
2553 // The only valid named option for ISB is 'sy'
2554 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2555 TokError("'sy' or #imm operand expected");
2556 return MatchOperand_ParseFail;
2560 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2561 Parser.Lex(); // Consume the option
2563 return MatchOperand_Success;
2566 AArch64AsmParser::OperandMatchResultTy
2567 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2568 const AsmToken &Tok = Parser.getTok();
2570 if (Tok.isNot(AsmToken::Identifier))
2571 return MatchOperand_NoMatch;
2573 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2574 STI.getFeatureBits(), getContext()));
2575 Parser.Lex(); // Eat identifier
2577 return MatchOperand_Success;
2580 /// tryParseVectorRegister - Parse a vector register operand.
2581 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2582 if (Parser.getTok().isNot(AsmToken::Identifier))
2586 // Check for a vector register specifier first.
2588 int64_t Reg = tryMatchVectorRegister(Kind, false);
2592 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2593 // If there was an explicit qualifier, that goes on as a literal text
2597 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2599 // If there is an index specifier following the register, parse that too.
2600 if (Parser.getTok().is(AsmToken::LBrac)) {
2601 SMLoc SIdx = getLoc();
2602 Parser.Lex(); // Eat left bracket token.
2604 const MCExpr *ImmVal;
2605 if (getParser().parseExpression(ImmVal))
2607 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2609 TokError("immediate value expected for vector index");
2614 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2615 Error(E, "']' expected");
2619 Parser.Lex(); // Eat right bracket token.
2621 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2628 /// parseRegister - Parse a non-vector register operand.
2629 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2631 // Try for a vector register.
2632 if (!tryParseVectorRegister(Operands))
2635 // Try for a scalar register.
2636 int64_t Reg = tryParseRegister();
2640 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2642 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2643 // as a string token in the instruction itself.
2644 if (getLexer().getKind() == AsmToken::LBrac) {
2645 SMLoc LBracS = getLoc();
2647 const AsmToken &Tok = Parser.getTok();
2648 if (Tok.is(AsmToken::Integer)) {
2649 SMLoc IntS = getLoc();
2650 int64_t Val = Tok.getIntVal();
2653 if (getLexer().getKind() == AsmToken::RBrac) {
2654 SMLoc RBracS = getLoc();
2657 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2659 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2661 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2671 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2672 bool HasELFModifier = false;
2673 AArch64MCExpr::VariantKind RefKind;
2675 if (Parser.getTok().is(AsmToken::Colon)) {
2676 Parser.Lex(); // Eat ':"
2677 HasELFModifier = true;
2679 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2680 Error(Parser.getTok().getLoc(),
2681 "expect relocation specifier in operand after ':'");
2685 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2686 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2687 .Case("lo12", AArch64MCExpr::VK_LO12)
2688 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2689 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2690 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2691 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2692 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2693 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2694 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2695 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2696 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2697 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2698 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2699 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2700 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2701 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2702 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2703 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2704 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2705 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2706 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2707 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2708 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2709 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2710 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2711 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2712 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2713 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2714 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2715 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2716 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2717 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2718 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2719 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2720 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2721 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2722 .Default(AArch64MCExpr::VK_INVALID);
2724 if (RefKind == AArch64MCExpr::VK_INVALID) {
2725 Error(Parser.getTok().getLoc(),
2726 "expect relocation specifier in operand after ':'");
2730 Parser.Lex(); // Eat identifier
2732 if (Parser.getTok().isNot(AsmToken::Colon)) {
2733 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2736 Parser.Lex(); // Eat ':'
2739 if (getParser().parseExpression(ImmVal))
2743 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2748 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2749 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2750 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2752 Parser.Lex(); // Eat left bracket token.
2754 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2757 int64_t PrevReg = FirstReg;
2760 if (Parser.getTok().is(AsmToken::Minus)) {
2761 Parser.Lex(); // Eat the minus.
2763 SMLoc Loc = getLoc();
2765 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2768 // Any Kind suffices must match on all regs in the list.
2769 if (Kind != NextKind)
2770 return Error(Loc, "mismatched register size suffix");
2772 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2774 if (Space == 0 || Space > 3) {
2775 return Error(Loc, "invalid number of vectors");
2781 while (Parser.getTok().is(AsmToken::Comma)) {
2782 Parser.Lex(); // Eat the comma token.
2784 SMLoc Loc = getLoc();
2786 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2789 // Any Kind suffices must match on all regs in the list.
2790 if (Kind != NextKind)
2791 return Error(Loc, "mismatched register size suffix");
2793 // Registers must be incremental (with wraparound at 31)
2794 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2795 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2796 return Error(Loc, "registers must be sequential");
2803 if (Parser.getTok().isNot(AsmToken::RCurly))
2804 return Error(getLoc(), "'}' expected");
2805 Parser.Lex(); // Eat the '}' token.
2808 return Error(S, "invalid number of vectors");
2810 unsigned NumElements = 0;
2811 char ElementKind = 0;
2813 parseValidVectorKind(Kind, NumElements, ElementKind);
2815 Operands.push_back(AArch64Operand::CreateVectorList(
2816 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2818 // If there is an index specifier following the list, parse that too.
2819 if (Parser.getTok().is(AsmToken::LBrac)) {
2820 SMLoc SIdx = getLoc();
2821 Parser.Lex(); // Eat left bracket token.
2823 const MCExpr *ImmVal;
2824 if (getParser().parseExpression(ImmVal))
2826 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2828 TokError("immediate value expected for vector index");
2833 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2834 Error(E, "']' expected");
2838 Parser.Lex(); // Eat right bracket token.
2840 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2846 AArch64AsmParser::OperandMatchResultTy
2847 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2848 const AsmToken &Tok = Parser.getTok();
2849 if (!Tok.is(AsmToken::Identifier))
2850 return MatchOperand_NoMatch;
2852 unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2854 MCContext &Ctx = getContext();
2855 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2856 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2857 return MatchOperand_NoMatch;
2860 Parser.Lex(); // Eat register
2862 if (Parser.getTok().isNot(AsmToken::Comma)) {
2864 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2865 return MatchOperand_Success;
2867 Parser.Lex(); // Eat comma.
2869 if (Parser.getTok().is(AsmToken::Hash))
2870 Parser.Lex(); // Eat hash
2872 if (Parser.getTok().isNot(AsmToken::Integer)) {
2873 Error(getLoc(), "index must be absent or #0");
2874 return MatchOperand_ParseFail;
2877 const MCExpr *ImmVal;
2878 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2879 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2880 Error(getLoc(), "index must be absent or #0");
2881 return MatchOperand_ParseFail;
2885 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2886 return MatchOperand_Success;
2889 /// parseOperand - Parse a arm instruction operand. For now this parses the
2890 /// operand regardless of the mnemonic.
2891 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2892 bool invertCondCode) {
2893 // Check if the current operand has a custom associated parser, if so, try to
2894 // custom parse the operand, or fallback to the general approach.
2895 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2896 if (ResTy == MatchOperand_Success)
2898 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2899 // there was a match, but an error occurred, in which case, just return that
2900 // the operand parsing failed.
2901 if (ResTy == MatchOperand_ParseFail)
2904 // Nothing custom, so do general case parsing.
2906 switch (getLexer().getKind()) {
2910 if (parseSymbolicImmVal(Expr))
2911 return Error(S, "invalid operand");
2913 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2914 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2917 case AsmToken::LBrac: {
2918 SMLoc Loc = Parser.getTok().getLoc();
2919 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2921 Parser.Lex(); // Eat '['
2923 // There's no comma after a '[', so we can parse the next operand
2925 return parseOperand(Operands, false, false);
2927 case AsmToken::LCurly:
2928 return parseVectorList(Operands);
2929 case AsmToken::Identifier: {
2930 // If we're expecting a Condition Code operand, then just parse that.
2932 return parseCondCode(Operands, invertCondCode);
2934 // If it's a register name, parse it.
2935 if (!parseRegister(Operands))
2938 // This could be an optional "shift" or "extend" operand.
2939 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2940 // We can only continue if no tokens were eaten.
2941 if (GotShift != MatchOperand_NoMatch)
2944 // This was not a register so parse other operands that start with an
2945 // identifier (like labels) as expressions and create them as immediates.
2946 const MCExpr *IdVal;
2948 if (getParser().parseExpression(IdVal))
2951 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2952 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2955 case AsmToken::Integer:
2956 case AsmToken::Real:
2957 case AsmToken::Hash: {
2958 // #42 -> immediate.
2960 if (getLexer().is(AsmToken::Hash))
2963 // Parse a negative sign
2964 bool isNegative = false;
2965 if (Parser.getTok().is(AsmToken::Minus)) {
2967 // We need to consume this token only when we have a Real, otherwise
2968 // we let parseSymbolicImmVal take care of it
2969 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2973 // The only Real that should come through here is a literal #0.0 for
2974 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2975 // so convert the value.
2976 const AsmToken &Tok = Parser.getTok();
2977 if (Tok.is(AsmToken::Real)) {
2978 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2979 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2980 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2981 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2982 Mnemonic != "fcmlt")
2983 return TokError("unexpected floating point literal");
2984 else if (IntVal != 0 || isNegative)
2985 return TokError("expected floating-point constant #0.0");
2986 Parser.Lex(); // Eat the token.
2989 AArch64Operand::CreateToken("#0", false, S, getContext()));
2991 AArch64Operand::CreateToken(".0", false, S, getContext()));
2995 const MCExpr *ImmVal;
2996 if (parseSymbolicImmVal(ImmVal))
2999 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3000 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3006 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3008 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3009 StringRef Name, SMLoc NameLoc,
3010 OperandVector &Operands) {
3011 Name = StringSwitch<StringRef>(Name.lower())
3012 .Case("beq", "b.eq")
3013 .Case("bne", "b.ne")
3014 .Case("bhs", "b.hs")
3015 .Case("bcs", "b.cs")
3016 .Case("blo", "b.lo")
3017 .Case("bcc", "b.cc")
3018 .Case("bmi", "b.mi")
3019 .Case("bpl", "b.pl")
3020 .Case("bvs", "b.vs")
3021 .Case("bvc", "b.vc")
3022 .Case("bhi", "b.hi")
3023 .Case("bls", "b.ls")
3024 .Case("bge", "b.ge")
3025 .Case("blt", "b.lt")
3026 .Case("bgt", "b.gt")
3027 .Case("ble", "b.le")
3028 .Case("bal", "b.al")
3029 .Case("bnv", "b.nv")
3032 // Create the leading tokens for the mnemonic, split by '.' characters.
3033 size_t Start = 0, Next = Name.find('.');
3034 StringRef Head = Name.slice(Start, Next);
3036 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3037 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3038 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3039 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3040 Parser.eatToEndOfStatement();
3045 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3048 // Handle condition codes for a branch mnemonic
3049 if (Head == "b" && Next != StringRef::npos) {
3051 Next = Name.find('.', Start + 1);
3052 Head = Name.slice(Start + 1, Next);
3054 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3055 (Head.data() - Name.data()));
3056 AArch64CC::CondCode CC = parseCondCodeString(Head);
3057 if (CC == AArch64CC::Invalid)
3058 return Error(SuffixLoc, "invalid condition code");
3060 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3062 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3065 // Add the remaining tokens in the mnemonic.
3066 while (Next != StringRef::npos) {
3068 Next = Name.find('.', Start + 1);
3069 Head = Name.slice(Start, Next);
3070 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3071 (Head.data() - Name.data()) + 1);
3073 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3076 // Conditional compare instructions have a Condition Code operand, which needs
3077 // to be parsed and an immediate operand created.
3078 bool condCodeFourthOperand =
3079 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3080 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3081 Head == "csinc" || Head == "csinv" || Head == "csneg");
3083 // These instructions are aliases to some of the conditional select
3084 // instructions. However, the condition code is inverted in the aliased
3087 // FIXME: Is this the correct way to handle these? Or should the parser
3088 // generate the aliased instructions directly?
3089 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3090 bool condCodeThirdOperand =
3091 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3093 // Read the remaining operands.
3094 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3095 // Read the first operand.
3096 if (parseOperand(Operands, false, false)) {
3097 Parser.eatToEndOfStatement();
3102 while (getLexer().is(AsmToken::Comma)) {
3103 Parser.Lex(); // Eat the comma.
3105 // Parse and remember the operand.
3106 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3107 (N == 3 && condCodeThirdOperand) ||
3108 (N == 2 && condCodeSecondOperand),
3109 condCodeSecondOperand || condCodeThirdOperand)) {
3110 Parser.eatToEndOfStatement();
3114 // After successfully parsing some operands there are two special cases to
3115 // consider (i.e. notional operands not separated by commas). Both are due
3116 // to memory specifiers:
3117 // + An RBrac will end an address for load/store/prefetch
3118 // + An '!' will indicate a pre-indexed operation.
3120 // It's someone else's responsibility to make sure these tokens are sane
3121 // in the given context!
3122 if (Parser.getTok().is(AsmToken::RBrac)) {
3123 SMLoc Loc = Parser.getTok().getLoc();
3124 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3129 if (Parser.getTok().is(AsmToken::Exclaim)) {
3130 SMLoc Loc = Parser.getTok().getLoc();
3131 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3140 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3141 SMLoc Loc = Parser.getTok().getLoc();
3142 Parser.eatToEndOfStatement();
3143 return Error(Loc, "unexpected token in argument list");
3146 Parser.Lex(); // Consume the EndOfStatement
3150 // FIXME: This entire function is a giant hack to provide us with decent
3151 // operand range validation/diagnostics until TableGen/MC can be extended
3152 // to support autogeneration of this kind of validation.
3153 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3154 SmallVectorImpl<SMLoc> &Loc) {
3155 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3156 // Check for indexed addressing modes w/ the base register being the
3157 // same as a destination/source register or pair load where
3158 // the Rt == Rt2. All of those are undefined behaviour.
3159 switch (Inst.getOpcode()) {
3160 case AArch64::LDPSWpre:
3161 case AArch64::LDPWpost:
3162 case AArch64::LDPWpre:
3163 case AArch64::LDPXpost:
3164 case AArch64::LDPXpre: {
3165 unsigned Rt = Inst.getOperand(1).getReg();
3166 unsigned Rt2 = Inst.getOperand(2).getReg();
3167 unsigned Rn = Inst.getOperand(3).getReg();
3168 if (RI->isSubRegisterEq(Rn, Rt))
3169 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3170 "is also a destination");
3171 if (RI->isSubRegisterEq(Rn, Rt2))
3172 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3173 "is also a destination");
3176 case AArch64::LDPDi:
3177 case AArch64::LDPQi:
3178 case AArch64::LDPSi:
3179 case AArch64::LDPSWi:
3180 case AArch64::LDPWi:
3181 case AArch64::LDPXi: {
3182 unsigned Rt = Inst.getOperand(0).getReg();
3183 unsigned Rt2 = Inst.getOperand(1).getReg();
3185 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3188 case AArch64::LDPDpost:
3189 case AArch64::LDPDpre:
3190 case AArch64::LDPQpost:
3191 case AArch64::LDPQpre:
3192 case AArch64::LDPSpost:
3193 case AArch64::LDPSpre:
3194 case AArch64::LDPSWpost: {
3195 unsigned Rt = Inst.getOperand(1).getReg();
3196 unsigned Rt2 = Inst.getOperand(2).getReg();
3198 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3201 case AArch64::STPDpost:
3202 case AArch64::STPDpre:
3203 case AArch64::STPQpost:
3204 case AArch64::STPQpre:
3205 case AArch64::STPSpost:
3206 case AArch64::STPSpre:
3207 case AArch64::STPWpost:
3208 case AArch64::STPWpre:
3209 case AArch64::STPXpost:
3210 case AArch64::STPXpre: {
3211 unsigned Rt = Inst.getOperand(1).getReg();
3212 unsigned Rt2 = Inst.getOperand(2).getReg();
3213 unsigned Rn = Inst.getOperand(3).getReg();
3214 if (RI->isSubRegisterEq(Rn, Rt))
3215 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3216 "is also a source");
3217 if (RI->isSubRegisterEq(Rn, Rt2))
3218 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3219 "is also a source");
3222 case AArch64::LDRBBpre:
3223 case AArch64::LDRBpre:
3224 case AArch64::LDRHHpre:
3225 case AArch64::LDRHpre:
3226 case AArch64::LDRSBWpre:
3227 case AArch64::LDRSBXpre:
3228 case AArch64::LDRSHWpre:
3229 case AArch64::LDRSHXpre:
3230 case AArch64::LDRSWpre:
3231 case AArch64::LDRWpre:
3232 case AArch64::LDRXpre:
3233 case AArch64::LDRBBpost:
3234 case AArch64::LDRBpost:
3235 case AArch64::LDRHHpost:
3236 case AArch64::LDRHpost:
3237 case AArch64::LDRSBWpost:
3238 case AArch64::LDRSBXpost:
3239 case AArch64::LDRSHWpost:
3240 case AArch64::LDRSHXpost:
3241 case AArch64::LDRSWpost:
3242 case AArch64::LDRWpost:
3243 case AArch64::LDRXpost: {
3244 unsigned Rt = Inst.getOperand(1).getReg();
3245 unsigned Rn = Inst.getOperand(2).getReg();
3246 if (RI->isSubRegisterEq(Rn, Rt))
3247 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3248 "is also a source");
3251 case AArch64::STRBBpost:
3252 case AArch64::STRBpost:
3253 case AArch64::STRHHpost:
3254 case AArch64::STRHpost:
3255 case AArch64::STRWpost:
3256 case AArch64::STRXpost:
3257 case AArch64::STRBBpre:
3258 case AArch64::STRBpre:
3259 case AArch64::STRHHpre:
3260 case AArch64::STRHpre:
3261 case AArch64::STRWpre:
3262 case AArch64::STRXpre: {
3263 unsigned Rt = Inst.getOperand(1).getReg();
3264 unsigned Rn = Inst.getOperand(2).getReg();
3265 if (RI->isSubRegisterEq(Rn, Rt))
3266 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3267 "is also a source");
3272 // Now check immediate ranges. Separate from the above as there is overlap
3273 // in the instructions being checked and this keeps the nested conditionals
3275 switch (Inst.getOpcode()) {
3276 case AArch64::ADDSWri:
3277 case AArch64::ADDSXri:
3278 case AArch64::ADDWri:
3279 case AArch64::ADDXri:
3280 case AArch64::SUBSWri:
3281 case AArch64::SUBSXri:
3282 case AArch64::SUBWri:
3283 case AArch64::SUBXri: {
3284 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3285 // some slight duplication here.
3286 if (Inst.getOperand(2).isExpr()) {
3287 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3288 AArch64MCExpr::VariantKind ELFRefKind;
3289 MCSymbolRefExpr::VariantKind DarwinRefKind;
3291 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3292 return Error(Loc[2], "invalid immediate expression");
3295 // Only allow these with ADDXri.
3296 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3297 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3298 Inst.getOpcode() == AArch64::ADDXri)
3301 // Only allow these with ADDXri/ADDWri
3302 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3303 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3304 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3305 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3306 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3307 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3308 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3309 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3310 (Inst.getOpcode() == AArch64::ADDXri ||
3311 Inst.getOpcode() == AArch64::ADDWri))
3314 // Don't allow expressions in the immediate field otherwise
3315 return Error(Loc[2], "invalid immediate expression");
3324 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3326 case Match_MissingFeature:
3328 "instruction requires a CPU feature not currently enabled");
3329 case Match_InvalidOperand:
3330 return Error(Loc, "invalid operand for instruction");
3331 case Match_InvalidSuffix:
3332 return Error(Loc, "invalid type suffix for instruction");
3333 case Match_InvalidCondCode:
3334 return Error(Loc, "expected AArch64 condition code");
3335 case Match_AddSubRegExtendSmall:
3337 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3338 case Match_AddSubRegExtendLarge:
3340 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3341 case Match_AddSubSecondSource:
3343 "expected compatible register, symbol or integer in range [0, 4095]");
3344 case Match_LogicalSecondSource:
3345 return Error(Loc, "expected compatible register or logical immediate");
3346 case Match_InvalidMovImm32Shift:
3347 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3348 case Match_InvalidMovImm64Shift:
3349 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3350 case Match_AddSubRegShift32:
3352 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3353 case Match_AddSubRegShift64:
3355 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3356 case Match_InvalidFPImm:
3358 "expected compatible register or floating-point constant");
3359 case Match_InvalidMemoryIndexedSImm9:
3360 return Error(Loc, "index must be an integer in range [-256, 255].");
3361 case Match_InvalidMemoryIndexed4SImm7:
3362 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3363 case Match_InvalidMemoryIndexed8SImm7:
3364 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3365 case Match_InvalidMemoryIndexed16SImm7:
3366 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3367 case Match_InvalidMemoryWExtend8:
3369 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3370 case Match_InvalidMemoryWExtend16:
3372 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3373 case Match_InvalidMemoryWExtend32:
3375 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3376 case Match_InvalidMemoryWExtend64:
3378 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3379 case Match_InvalidMemoryWExtend128:
3381 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3382 case Match_InvalidMemoryXExtend8:
3384 "expected 'lsl' or 'sxtx' with optional shift of #0");
3385 case Match_InvalidMemoryXExtend16:
3387 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3388 case Match_InvalidMemoryXExtend32:
3390 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3391 case Match_InvalidMemoryXExtend64:
3393 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3394 case Match_InvalidMemoryXExtend128:
3396 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3397 case Match_InvalidMemoryIndexed1:
3398 return Error(Loc, "index must be an integer in range [0, 4095].");
3399 case Match_InvalidMemoryIndexed2:
3400 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3401 case Match_InvalidMemoryIndexed4:
3402 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3403 case Match_InvalidMemoryIndexed8:
3404 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3405 case Match_InvalidMemoryIndexed16:
3406 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3407 case Match_InvalidImm0_7:
3408 return Error(Loc, "immediate must be an integer in range [0, 7].");
3409 case Match_InvalidImm0_15:
3410 return Error(Loc, "immediate must be an integer in range [0, 15].");
3411 case Match_InvalidImm0_31:
3412 return Error(Loc, "immediate must be an integer in range [0, 31].");
3413 case Match_InvalidImm0_63:
3414 return Error(Loc, "immediate must be an integer in range [0, 63].");
3415 case Match_InvalidImm0_127:
3416 return Error(Loc, "immediate must be an integer in range [0, 127].");
3417 case Match_InvalidImm0_65535:
3418 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3419 case Match_InvalidImm1_8:
3420 return Error(Loc, "immediate must be an integer in range [1, 8].");
3421 case Match_InvalidImm1_16:
3422 return Error(Loc, "immediate must be an integer in range [1, 16].");
3423 case Match_InvalidImm1_32:
3424 return Error(Loc, "immediate must be an integer in range [1, 32].");
3425 case Match_InvalidImm1_64:
3426 return Error(Loc, "immediate must be an integer in range [1, 64].");
3427 case Match_InvalidIndex1:
3428 return Error(Loc, "expected lane specifier '[1]'");
3429 case Match_InvalidIndexB:
3430 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3431 case Match_InvalidIndexH:
3432 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3433 case Match_InvalidIndexS:
3434 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3435 case Match_InvalidIndexD:
3436 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3437 case Match_InvalidLabel:
3438 return Error(Loc, "expected label or encodable integer pc offset");
3440 return Error(Loc, "expected readable system register");
3442 return Error(Loc, "expected writable system register or pstate");
3443 case Match_MnemonicFail:
3444 return Error(Loc, "unrecognized instruction mnemonic");
3446 assert(0 && "unexpected error code!");
3447 return Error(Loc, "invalid instruction format");
3451 static const char *getSubtargetFeatureName(unsigned Val);
3453 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3454 OperandVector &Operands,
3456 unsigned &ErrorInfo,
3457 bool MatchingInlineAsm) {
3458 assert(!Operands.empty() && "Unexpect empty operand list!");
3459 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[0]);
3460 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3462 StringRef Tok = Op->getToken();
3463 unsigned NumOperands = Operands.size();
3465 if (NumOperands == 4 && Tok == "lsl") {
3466 AArch64Operand *Op2 = static_cast<AArch64Operand *>(Operands[2]);
3467 AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3468 if (Op2->isReg() && Op3->isImm()) {
3469 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3471 uint64_t Op3Val = Op3CE->getValue();
3472 uint64_t NewOp3Val = 0;
3473 uint64_t NewOp4Val = 0;
3474 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3476 NewOp3Val = (32 - Op3Val) & 0x1f;
3477 NewOp4Val = 31 - Op3Val;
3479 NewOp3Val = (64 - Op3Val) & 0x3f;
3480 NewOp4Val = 63 - Op3Val;
3483 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3484 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3486 Operands[0] = AArch64Operand::CreateToken(
3487 "ubfm", false, Op->getStartLoc(), getContext());
3488 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3489 Op3->getEndLoc(), getContext());
3490 Operands.push_back(AArch64Operand::CreateImm(
3491 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
3496 } else if (NumOperands == 5) {
3497 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3498 // UBFIZ -> UBFM aliases.
3499 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3500 AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
3501 AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3502 AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
3504 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3505 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3506 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3508 if (Op3CE && Op4CE) {
3509 uint64_t Op3Val = Op3CE->getValue();
3510 uint64_t Op4Val = Op4CE->getValue();
3512 uint64_t RegWidth = 0;
3513 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3519 if (Op3Val >= RegWidth)
3520 return Error(Op3->getStartLoc(),
3521 "expected integer in range [0, 31]");
3522 if (Op4Val < 1 || Op4Val > RegWidth)
3523 return Error(Op4->getStartLoc(),
3524 "expected integer in range [1, 32]");
3526 uint64_t NewOp3Val = 0;
3527 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3529 NewOp3Val = (32 - Op3Val) & 0x1f;
3531 NewOp3Val = (64 - Op3Val) & 0x3f;
3533 uint64_t NewOp4Val = Op4Val - 1;
3535 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3536 return Error(Op4->getStartLoc(),
3537 "requested insert overflows register");
3539 const MCExpr *NewOp3 =
3540 MCConstantExpr::Create(NewOp3Val, getContext());
3541 const MCExpr *NewOp4 =
3542 MCConstantExpr::Create(NewOp4Val, getContext());
3543 Operands[3] = AArch64Operand::CreateImm(
3544 NewOp3, Op3->getStartLoc(), Op3->getEndLoc(), getContext());
3545 Operands[4] = AArch64Operand::CreateImm(
3546 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3548 Operands[0] = AArch64Operand::CreateToken(
3549 "bfm", false, Op->getStartLoc(), getContext());
3550 else if (Tok == "sbfiz")
3551 Operands[0] = AArch64Operand::CreateToken(
3552 "sbfm", false, Op->getStartLoc(), getContext());
3553 else if (Tok == "ubfiz")
3554 Operands[0] = AArch64Operand::CreateToken(
3555 "ubfm", false, Op->getStartLoc(), getContext());
3557 llvm_unreachable("No valid mnemonic for alias?");
3565 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3566 // UBFX -> UBFM aliases.
3567 } else if (NumOperands == 5 &&
3568 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3569 AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
3570 AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3571 AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
3573 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3574 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3575 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3577 if (Op3CE && Op4CE) {
3578 uint64_t Op3Val = Op3CE->getValue();
3579 uint64_t Op4Val = Op4CE->getValue();
3581 uint64_t RegWidth = 0;
3582 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3588 if (Op3Val >= RegWidth)
3589 return Error(Op3->getStartLoc(),
3590 "expected integer in range [0, 31]");
3591 if (Op4Val < 1 || Op4Val > RegWidth)
3592 return Error(Op4->getStartLoc(),
3593 "expected integer in range [1, 32]");
3595 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3597 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3598 return Error(Op4->getStartLoc(),
3599 "requested extract overflows register");
3601 const MCExpr *NewOp4 =
3602 MCConstantExpr::Create(NewOp4Val, getContext());
3603 Operands[4] = AArch64Operand::CreateImm(
3604 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3606 Operands[0] = AArch64Operand::CreateToken(
3607 "bfm", false, Op->getStartLoc(), getContext());
3608 else if (Tok == "sbfx")
3609 Operands[0] = AArch64Operand::CreateToken(
3610 "sbfm", false, Op->getStartLoc(), getContext());
3611 else if (Tok == "ubfx")
3612 Operands[0] = AArch64Operand::CreateToken(
3613 "ubfm", false, Op->getStartLoc(), getContext());
3615 llvm_unreachable("No valid mnemonic for alias?");
3623 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3624 // InstAlias can't quite handle this since the reg classes aren't
3626 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3627 // The source register can be Wn here, but the matcher expects a
3628 // GPR64. Twiddle it here if necessary.
3629 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
3631 unsigned Reg = getXRegFromWReg(Op->getReg());
3632 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3633 Op->getEndLoc(), getContext());
3637 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3638 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3639 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3641 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3643 // The source register can be Wn here, but the matcher expects a
3644 // GPR64. Twiddle it here if necessary.
3645 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
3647 unsigned Reg = getXRegFromWReg(Op->getReg());
3648 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3649 Op->getEndLoc(), getContext());
3654 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3655 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3656 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3658 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3660 // The source register can be Wn here, but the matcher expects a
3661 // GPR32. Twiddle it here if necessary.
3662 AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3664 unsigned Reg = getWRegFromXReg(Op->getReg());
3665 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3666 Op->getEndLoc(), getContext());
3672 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3673 if (NumOperands == 3 && Tok == "fmov") {
3674 AArch64Operand *RegOp = static_cast<AArch64Operand *>(Operands[1]);
3675 AArch64Operand *ImmOp = static_cast<AArch64Operand *>(Operands[2]);
3676 if (RegOp->isReg() && ImmOp->isFPImm() &&
3677 ImmOp->getFPImm() == (unsigned)-1) {
3679 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3683 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op->getStartLoc(),
3684 Op->getEndLoc(), getContext());
3690 // First try to match against the secondary set of tables containing the
3691 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3692 unsigned MatchResult =
3693 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3695 // If that fails, try against the alternate table containing long-form NEON:
3696 // "fadd v0.2s, v1.2s, v2.2s"
3697 if (MatchResult != Match_Success)
3699 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3701 switch (MatchResult) {
3702 case Match_Success: {
3703 // Perform range checking and other semantic validations
3704 SmallVector<SMLoc, 8> OperandLocs;
3705 NumOperands = Operands.size();
3706 for (unsigned i = 1; i < NumOperands; ++i)
3707 OperandLocs.push_back(Operands[i]->getStartLoc());
3708 if (validateInstruction(Inst, OperandLocs))
3712 Out.EmitInstruction(Inst, STI);
3715 case Match_MissingFeature: {
3716 assert(ErrorInfo && "Unknown missing feature!");
3717 // Special case the error message for the very common case where only
3718 // a single subtarget feature is missing (neon, e.g.).
3719 std::string Msg = "instruction requires:";
3721 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3722 if (ErrorInfo & Mask) {
3724 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3728 return Error(IDLoc, Msg);
3730 case Match_MnemonicFail:
3731 return showMatchError(IDLoc, MatchResult);
3732 case Match_InvalidOperand: {
3733 SMLoc ErrorLoc = IDLoc;
3734 if (ErrorInfo != ~0U) {
3735 if (ErrorInfo >= Operands.size())
3736 return Error(IDLoc, "too few operands for instruction");
3738 ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
3739 if (ErrorLoc == SMLoc())
3742 // If the match failed on a suffix token operand, tweak the diagnostic
3744 if (((AArch64Operand *)Operands[ErrorInfo])->isToken() &&
3745 ((AArch64Operand *)Operands[ErrorInfo])->isTokenSuffix())
3746 MatchResult = Match_InvalidSuffix;
3748 return showMatchError(ErrorLoc, MatchResult);
3750 case Match_InvalidMemoryIndexed1:
3751 case Match_InvalidMemoryIndexed2:
3752 case Match_InvalidMemoryIndexed4:
3753 case Match_InvalidMemoryIndexed8:
3754 case Match_InvalidMemoryIndexed16:
3755 case Match_InvalidCondCode:
3756 case Match_AddSubRegExtendSmall:
3757 case Match_AddSubRegExtendLarge:
3758 case Match_AddSubSecondSource:
3759 case Match_LogicalSecondSource:
3760 case Match_AddSubRegShift32:
3761 case Match_AddSubRegShift64:
3762 case Match_InvalidMovImm32Shift:
3763 case Match_InvalidMovImm64Shift:
3764 case Match_InvalidFPImm:
3765 case Match_InvalidMemoryWExtend8:
3766 case Match_InvalidMemoryWExtend16:
3767 case Match_InvalidMemoryWExtend32:
3768 case Match_InvalidMemoryWExtend64:
3769 case Match_InvalidMemoryWExtend128:
3770 case Match_InvalidMemoryXExtend8:
3771 case Match_InvalidMemoryXExtend16:
3772 case Match_InvalidMemoryXExtend32:
3773 case Match_InvalidMemoryXExtend64:
3774 case Match_InvalidMemoryXExtend128:
3775 case Match_InvalidMemoryIndexed4SImm7:
3776 case Match_InvalidMemoryIndexed8SImm7:
3777 case Match_InvalidMemoryIndexed16SImm7:
3778 case Match_InvalidMemoryIndexedSImm9:
3779 case Match_InvalidImm0_7:
3780 case Match_InvalidImm0_15:
3781 case Match_InvalidImm0_31:
3782 case Match_InvalidImm0_63:
3783 case Match_InvalidImm0_127:
3784 case Match_InvalidImm0_65535:
3785 case Match_InvalidImm1_8:
3786 case Match_InvalidImm1_16:
3787 case Match_InvalidImm1_32:
3788 case Match_InvalidImm1_64:
3789 case Match_InvalidIndex1:
3790 case Match_InvalidIndexB:
3791 case Match_InvalidIndexH:
3792 case Match_InvalidIndexS:
3793 case Match_InvalidIndexD:
3794 case Match_InvalidLabel:
3797 if (ErrorInfo >= Operands.size())
3798 return Error(IDLoc, "too few operands for instruction");
3799 // Any time we get here, there's nothing fancy to do. Just get the
3800 // operand SMLoc and display the diagnostic.
3801 SMLoc ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
3802 if (ErrorLoc == SMLoc())
3804 return showMatchError(ErrorLoc, MatchResult);
3808 llvm_unreachable("Implement any new match types added!");
3812 /// ParseDirective parses the arm specific directives
3813 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3814 StringRef IDVal = DirectiveID.getIdentifier();
3815 SMLoc Loc = DirectiveID.getLoc();
3816 if (IDVal == ".hword")
3817 return parseDirectiveWord(2, Loc);
3818 if (IDVal == ".word")
3819 return parseDirectiveWord(4, Loc);
3820 if (IDVal == ".xword")
3821 return parseDirectiveWord(8, Loc);
3822 if (IDVal == ".tlsdesccall")
3823 return parseDirectiveTLSDescCall(Loc);
3825 return parseDirectiveLOH(IDVal, Loc);
3828 /// parseDirectiveWord
3829 /// ::= .word [ expression (, expression)* ]
3830 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3831 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3833 const MCExpr *Value;
3834 if (getParser().parseExpression(Value))
3837 getParser().getStreamer().EmitValue(Value, Size);
3839 if (getLexer().is(AsmToken::EndOfStatement))
3842 // FIXME: Improve diagnostic.
3843 if (getLexer().isNot(AsmToken::Comma))
3844 return Error(L, "unexpected token in directive");
3853 // parseDirectiveTLSDescCall:
3854 // ::= .tlsdesccall symbol
3855 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3857 if (getParser().parseIdentifier(Name))
3858 return Error(L, "expected symbol after directive");
3860 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3861 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3862 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3865 Inst.setOpcode(AArch64::TLSDESCCALL);
3866 Inst.addOperand(MCOperand::CreateExpr(Expr));
3868 getParser().getStreamer().EmitInstruction(Inst, STI);
3872 /// ::= .loh <lohName | lohId> label1, ..., labelN
3873 /// The number of arguments depends on the loh identifier.
3874 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3875 if (IDVal != MCLOHDirectiveName())
3878 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3879 if (getParser().getTok().isNot(AsmToken::Integer))
3880 return TokError("expected an identifier or a number in directive");
3881 // We successfully get a numeric value for the identifier.
3882 // Check if it is valid.
3883 int64_t Id = getParser().getTok().getIntVal();
3884 Kind = (MCLOHType)Id;
3885 // Check that Id does not overflow MCLOHType.
3886 if (!isValidMCLOHType(Kind) || Id != Kind)
3887 return TokError("invalid numeric identifier in directive");
3889 StringRef Name = getTok().getIdentifier();
3890 // We successfully parse an identifier.
3891 // Check if it is a recognized one.
3892 int Id = MCLOHNameToId(Name);
3895 return TokError("invalid identifier in directive");
3896 Kind = (MCLOHType)Id;
3898 // Consume the identifier.
3900 // Get the number of arguments of this LOH.
3901 int NbArgs = MCLOHIdToNbArgs(Kind);
3903 assert(NbArgs != -1 && "Invalid number of arguments");
3905 SmallVector<MCSymbol *, 3> Args;
3906 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3908 if (getParser().parseIdentifier(Name))
3909 return TokError("expected identifier in directive");
3910 Args.push_back(getContext().GetOrCreateSymbol(Name));
3912 if (Idx + 1 == NbArgs)
3914 if (getLexer().isNot(AsmToken::Comma))
3915 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3918 if (getLexer().isNot(AsmToken::EndOfStatement))
3919 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3921 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3926 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
3927 AArch64MCExpr::VariantKind &ELFRefKind,
3928 MCSymbolRefExpr::VariantKind &DarwinRefKind,
3930 ELFRefKind = AArch64MCExpr::VK_INVALID;
3931 DarwinRefKind = MCSymbolRefExpr::VK_None;
3934 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
3935 ELFRefKind = AE->getKind();
3936 Expr = AE->getSubExpr();
3939 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3941 // It's a simple symbol reference with no addend.
3942 DarwinRefKind = SE->getKind();
3946 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3950 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3953 DarwinRefKind = SE->getKind();
3955 if (BE->getOpcode() != MCBinaryExpr::Add &&
3956 BE->getOpcode() != MCBinaryExpr::Sub)
3959 // See if the addend is is a constant, otherwise there's more going
3960 // on here than we can deal with.
3961 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
3965 Addend = AddendExpr->getValue();
3966 if (BE->getOpcode() == MCBinaryExpr::Sub)
3969 // It's some symbol reference + a constant addend, but really
3970 // shouldn't use both Darwin and ELF syntax.
3971 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
3972 DarwinRefKind == MCSymbolRefExpr::VK_None;
3975 /// Force static initialization.
3976 extern "C" void LLVMInitializeAArch64AsmParser() {
3977 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
3978 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
3980 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
3981 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
3984 #define GET_REGISTER_MATCHER
3985 #define GET_SUBTARGET_FEATURE_NAME
3986 #define GET_MATCHER_IMPLEMENTATION
3987 #include "AArch64GenAsmMatcher.inc"
3989 // Define this matcher function after the auto-generated include so we
3990 // have the match class enum definitions.
3991 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
3993 AArch64Operand *Op = static_cast<AArch64Operand *>(AsmOp);
3994 // If the kind is a token for a literal immediate, check if our asm
3995 // operand matches. This is for InstAliases which have a fixed-value
3996 // immediate in the syntax.
3997 int64_t ExpectedVal;
4000 return Match_InvalidOperand;
4042 return Match_InvalidOperand;
4043 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4045 return Match_InvalidOperand;
4046 if (CE->getValue() == ExpectedVal)
4047 return Match_Success;
4048 return Match_InvalidOperand;