1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 MCAsmParser &getParser() const { return Parser; }
47 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
49 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
51 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
52 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
53 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
54 int tryParseRegister();
55 int tryMatchVectorRegister(StringRef &Kind, bool expected);
56 bool parseRegister(OperandVector &Operands);
57 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
58 bool parseVectorList(OperandVector &Operands);
59 bool parseOperand(OperandVector &Operands, bool isCondCode,
62 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
63 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
64 bool showMatchError(SMLoc Loc, unsigned ErrCode);
66 bool parseDirectiveWord(unsigned Size, SMLoc L);
67 bool parseDirectiveTLSDescCall(SMLoc L);
69 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
71 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
72 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
73 OperandVector &Operands, MCStreamer &Out,
75 bool MatchingInlineAsm) override;
76 /// @name Auto-generated Match Functions
79 #define GET_ASSEMBLER_HEADER
80 #include "AArch64GenAsmMatcher.inc"
84 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
85 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
86 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
87 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
88 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
89 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
90 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
91 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
92 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
93 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
94 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
95 bool tryParseVectorRegister(OperandVector &Operands);
98 enum AArch64MatchResultTy {
99 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
100 #define GET_OPERAND_DIAGNOSTIC_TYPES
101 #include "AArch64GenAsmMatcher.inc"
103 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
104 const MCInstrInfo &MII,
105 const MCTargetOptions &Options)
106 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
107 MCAsmParserExtension::Initialize(_Parser);
109 // Initialize the set of available features.
110 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
113 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
114 SMLoc NameLoc, OperandVector &Operands) override;
115 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
116 bool ParseDirective(AsmToken DirectiveID) override;
117 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
118 unsigned Kind) override;
120 static bool classifySymbolRef(const MCExpr *Expr,
121 AArch64MCExpr::VariantKind &ELFRefKind,
122 MCSymbolRefExpr::VariantKind &DarwinRefKind,
125 } // end anonymous namespace
129 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
131 class AArch64Operand : public MCParsedAsmOperand {
149 SMLoc StartLoc, EndLoc;
154 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
162 struct VectorListOp {
165 unsigned NumElements;
166 unsigned ElementKind;
169 struct VectorIndexOp {
177 struct ShiftedImmOp {
179 unsigned ShiftAmount;
183 AArch64CC::CondCode Code;
187 unsigned Val; // Encoded 8-bit representation.
191 unsigned Val; // Not the enum since not all values have names.
197 uint64_t FeatureBits; // We need to pass through information about which
198 // core we are compiling for so that the SysReg
199 // Mappers can appropriately conditionalize.
210 struct ShiftExtendOp {
211 AArch64_AM::ShiftExtendType Type;
213 bool HasExplicitAmount;
223 struct VectorListOp VectorList;
224 struct VectorIndexOp VectorIndex;
226 struct ShiftedImmOp ShiftedImm;
227 struct CondCodeOp CondCode;
228 struct FPImmOp FPImm;
229 struct BarrierOp Barrier;
230 struct SysRegOp SysReg;
231 struct SysCRImmOp SysCRImm;
232 struct PrefetchOp Prefetch;
233 struct ShiftExtendOp ShiftExtend;
236 // Keep the MCContext around as the MCExprs may need manipulated during
237 // the add<>Operands() calls.
241 AArch64Operand(KindTy K, MCContext &_Ctx)
242 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
244 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
246 StartLoc = o.StartLoc;
256 ShiftedImm = o.ShiftedImm;
259 CondCode = o.CondCode;
271 VectorList = o.VectorList;
274 VectorIndex = o.VectorIndex;
280 SysCRImm = o.SysCRImm;
283 Prefetch = o.Prefetch;
286 ShiftExtend = o.ShiftExtend;
291 /// getStartLoc - Get the location of the first token of this operand.
292 SMLoc getStartLoc() const override { return StartLoc; }
293 /// getEndLoc - Get the location of the last token of this operand.
294 SMLoc getEndLoc() const override { return EndLoc; }
296 StringRef getToken() const {
297 assert(Kind == k_Token && "Invalid access!");
298 return StringRef(Tok.Data, Tok.Length);
301 bool isTokenSuffix() const {
302 assert(Kind == k_Token && "Invalid access!");
306 const MCExpr *getImm() const {
307 assert(Kind == k_Immediate && "Invalid access!");
311 const MCExpr *getShiftedImmVal() const {
312 assert(Kind == k_ShiftedImm && "Invalid access!");
313 return ShiftedImm.Val;
316 unsigned getShiftedImmShift() const {
317 assert(Kind == k_ShiftedImm && "Invalid access!");
318 return ShiftedImm.ShiftAmount;
321 AArch64CC::CondCode getCondCode() const {
322 assert(Kind == k_CondCode && "Invalid access!");
323 return CondCode.Code;
326 unsigned getFPImm() const {
327 assert(Kind == k_FPImm && "Invalid access!");
331 unsigned getBarrier() const {
332 assert(Kind == k_Barrier && "Invalid access!");
336 unsigned getReg() const override {
337 assert(Kind == k_Register && "Invalid access!");
341 unsigned getVectorListStart() const {
342 assert(Kind == k_VectorList && "Invalid access!");
343 return VectorList.RegNum;
346 unsigned getVectorListCount() const {
347 assert(Kind == k_VectorList && "Invalid access!");
348 return VectorList.Count;
351 unsigned getVectorIndex() const {
352 assert(Kind == k_VectorIndex && "Invalid access!");
353 return VectorIndex.Val;
356 StringRef getSysReg() const {
357 assert(Kind == k_SysReg && "Invalid access!");
358 return StringRef(SysReg.Data, SysReg.Length);
361 uint64_t getSysRegFeatureBits() const {
362 assert(Kind == k_SysReg && "Invalid access!");
363 return SysReg.FeatureBits;
366 unsigned getSysCR() const {
367 assert(Kind == k_SysCR && "Invalid access!");
371 unsigned getPrefetch() const {
372 assert(Kind == k_Prefetch && "Invalid access!");
376 AArch64_AM::ShiftExtendType getShiftExtendType() const {
377 assert(Kind == k_ShiftExtend && "Invalid access!");
378 return ShiftExtend.Type;
381 unsigned getShiftExtendAmount() const {
382 assert(Kind == k_ShiftExtend && "Invalid access!");
383 return ShiftExtend.Amount;
386 bool hasShiftExtendAmount() const {
387 assert(Kind == k_ShiftExtend && "Invalid access!");
388 return ShiftExtend.HasExplicitAmount;
391 bool isImm() const override { return Kind == k_Immediate; }
392 bool isMem() const override { return false; }
393 bool isSImm9() const {
396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
399 int64_t Val = MCE->getValue();
400 return (Val >= -256 && Val < 256);
402 bool isSImm7s4() const {
405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
408 int64_t Val = MCE->getValue();
409 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
411 bool isSImm7s8() const {
414 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
417 int64_t Val = MCE->getValue();
418 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
420 bool isSImm7s16() const {
423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 int64_t Val = MCE->getValue();
427 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
430 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
431 AArch64MCExpr::VariantKind ELFRefKind;
432 MCSymbolRefExpr::VariantKind DarwinRefKind;
434 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
436 // If we don't understand the expression, assume the best and
437 // let the fixup and relocation code deal with it.
441 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
442 ELFRefKind == AArch64MCExpr::VK_LO12 ||
443 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
444 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
445 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
446 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
447 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
448 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
449 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
450 // Note that we don't range-check the addend. It's adjusted modulo page
451 // size when converted, so there is no "out of range" condition when using
453 return Addend >= 0 && (Addend % Scale) == 0;
454 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
455 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
456 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
463 template <int Scale> bool isUImm12Offset() const {
467 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
469 return isSymbolicUImm12Offset(getImm(), Scale);
471 int64_t Val = MCE->getValue();
472 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
475 bool isImm0_7() const {
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
481 int64_t Val = MCE->getValue();
482 return (Val >= 0 && Val < 8);
484 bool isImm1_8() const {
487 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
490 int64_t Val = MCE->getValue();
491 return (Val > 0 && Val < 9);
493 bool isImm0_15() const {
496 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
499 int64_t Val = MCE->getValue();
500 return (Val >= 0 && Val < 16);
502 bool isImm1_16() const {
505 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 int64_t Val = MCE->getValue();
509 return (Val > 0 && Val < 17);
511 bool isImm0_31() const {
514 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 int64_t Val = MCE->getValue();
518 return (Val >= 0 && Val < 32);
520 bool isImm1_31() const {
523 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 int64_t Val = MCE->getValue();
527 return (Val >= 1 && Val < 32);
529 bool isImm1_32() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 int64_t Val = MCE->getValue();
536 return (Val >= 1 && Val < 33);
538 bool isImm0_63() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 int64_t Val = MCE->getValue();
545 return (Val >= 0 && Val < 64);
547 bool isImm1_63() const {
550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
553 int64_t Val = MCE->getValue();
554 return (Val >= 1 && Val < 64);
556 bool isImm1_64() const {
559 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
562 int64_t Val = MCE->getValue();
563 return (Val >= 1 && Val < 65);
565 bool isImm0_127() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 int64_t Val = MCE->getValue();
572 return (Val >= 0 && Val < 128);
574 bool isImm0_255() const {
577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 int64_t Val = MCE->getValue();
581 return (Val >= 0 && Val < 256);
583 bool isImm0_65535() const {
586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 int64_t Val = MCE->getValue();
590 return (Val >= 0 && Val < 65536);
592 bool isImm32_63() const {
595 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
598 int64_t Val = MCE->getValue();
599 return (Val >= 32 && Val < 64);
601 bool isLogicalImm32() const {
604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
609 bool isLogicalImm64() const {
612 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
615 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
617 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
618 bool isAddSubImm() const {
619 if (!isShiftedImm() && !isImm())
624 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
625 if (isShiftedImm()) {
626 unsigned Shift = ShiftedImm.ShiftAmount;
627 Expr = ShiftedImm.Val;
628 if (Shift != 0 && Shift != 12)
634 AArch64MCExpr::VariantKind ELFRefKind;
635 MCSymbolRefExpr::VariantKind DarwinRefKind;
637 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
638 DarwinRefKind, Addend)) {
639 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
640 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
641 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
642 || ELFRefKind == AArch64MCExpr::VK_LO12
643 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
644 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
645 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
646 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
647 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
648 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
649 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
652 // Otherwise it should be a real immediate in range:
653 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
654 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
656 bool isCondCode() const { return Kind == k_CondCode; }
657 bool isSIMDImmType10() const {
660 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
663 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
665 bool isBranchTarget26() const {
668 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
671 int64_t Val = MCE->getValue();
674 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
676 bool isPCRelLabel19() const {
679 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
682 int64_t Val = MCE->getValue();
685 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
687 bool isBranchTarget14() const {
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 int64_t Val = MCE->getValue();
696 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
700 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
704 AArch64MCExpr::VariantKind ELFRefKind;
705 MCSymbolRefExpr::VariantKind DarwinRefKind;
707 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
708 DarwinRefKind, Addend)) {
711 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
714 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
715 if (ELFRefKind == AllowedModifiers[i])
722 bool isMovZSymbolG3() const {
723 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
724 return isMovWSymbol(Variants);
727 bool isMovZSymbolG2() const {
728 static AArch64MCExpr::VariantKind Variants[] = {
729 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
730 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
731 return isMovWSymbol(Variants);
734 bool isMovZSymbolG1() const {
735 static AArch64MCExpr::VariantKind Variants[] = {
736 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
737 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
738 AArch64MCExpr::VK_DTPREL_G1,
740 return isMovWSymbol(Variants);
743 bool isMovZSymbolG0() const {
744 static AArch64MCExpr::VariantKind Variants[] = {
745 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
746 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
747 return isMovWSymbol(Variants);
750 bool isMovKSymbolG3() const {
751 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
752 return isMovWSymbol(Variants);
755 bool isMovKSymbolG2() const {
756 static AArch64MCExpr::VariantKind Variants[] = {
757 AArch64MCExpr::VK_ABS_G2_NC};
758 return isMovWSymbol(Variants);
761 bool isMovKSymbolG1() const {
762 static AArch64MCExpr::VariantKind Variants[] = {
763 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
764 AArch64MCExpr::VK_DTPREL_G1_NC
766 return isMovWSymbol(Variants);
769 bool isMovKSymbolG0() const {
770 static AArch64MCExpr::VariantKind Variants[] = {
771 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
772 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
774 return isMovWSymbol(Variants);
777 template<int RegWidth, int Shift>
778 bool isMOVZMovAlias() const {
779 if (!isImm()) return false;
781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782 if (!CE) return false;
783 uint64_t Value = CE->getValue();
786 Value &= 0xffffffffULL;
788 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
789 if (Value == 0 && Shift != 0)
792 return (Value & ~(0xffffULL << Shift)) == 0;
795 template<int RegWidth, int Shift>
796 bool isMOVNMovAlias() const {
797 if (!isImm()) return false;
799 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800 if (!CE) return false;
801 uint64_t Value = CE->getValue();
803 // MOVZ takes precedence over MOVN.
804 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
805 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
810 Value &= 0xffffffffULL;
812 return (Value & ~(0xffffULL << Shift)) == 0;
815 bool isFPImm() const { return Kind == k_FPImm; }
816 bool isBarrier() const { return Kind == k_Barrier; }
817 bool isSysReg() const { return Kind == k_SysReg; }
818 bool isMRSSystemRegister() const {
819 if (!isSysReg()) return false;
821 bool IsKnownRegister;
822 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
823 Mapper.fromString(getSysReg(), IsKnownRegister);
825 return IsKnownRegister;
827 bool isMSRSystemRegister() const {
828 if (!isSysReg()) return false;
830 bool IsKnownRegister;
831 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
832 Mapper.fromString(getSysReg(), IsKnownRegister);
834 return IsKnownRegister;
836 bool isSystemPStateField() const {
837 if (!isSysReg()) return false;
839 bool IsKnownRegister;
840 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
842 return IsKnownRegister;
844 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
845 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
846 bool isVectorRegLo() const {
847 return Kind == k_Register && Reg.isVector &&
848 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
851 bool isGPR32as64() const {
852 return Kind == k_Register && !Reg.isVector &&
853 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
856 bool isGPR64sp0() const {
857 return Kind == k_Register && !Reg.isVector &&
858 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
861 /// Is this a vector list with the type implicit (presumably attached to the
862 /// instruction itself)?
863 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
864 return Kind == k_VectorList && VectorList.Count == NumRegs &&
865 !VectorList.ElementKind;
868 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
869 bool isTypedVectorList() const {
870 if (Kind != k_VectorList)
872 if (VectorList.Count != NumRegs)
874 if (VectorList.ElementKind != ElementKind)
876 return VectorList.NumElements == NumElements;
879 bool isVectorIndex1() const {
880 return Kind == k_VectorIndex && VectorIndex.Val == 1;
882 bool isVectorIndexB() const {
883 return Kind == k_VectorIndex && VectorIndex.Val < 16;
885 bool isVectorIndexH() const {
886 return Kind == k_VectorIndex && VectorIndex.Val < 8;
888 bool isVectorIndexS() const {
889 return Kind == k_VectorIndex && VectorIndex.Val < 4;
891 bool isVectorIndexD() const {
892 return Kind == k_VectorIndex && VectorIndex.Val < 2;
894 bool isToken() const override { return Kind == k_Token; }
895 bool isTokenEqual(StringRef Str) const {
896 return Kind == k_Token && getToken() == Str;
898 bool isSysCR() const { return Kind == k_SysCR; }
899 bool isPrefetch() const { return Kind == k_Prefetch; }
900 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
901 bool isShifter() const {
902 if (!isShiftExtend())
905 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
906 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
907 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
908 ST == AArch64_AM::MSL);
910 bool isExtend() const {
911 if (!isShiftExtend())
914 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
915 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
916 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
917 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
918 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
919 ET == AArch64_AM::LSL) &&
920 getShiftExtendAmount() <= 4;
923 bool isExtend64() const {
926 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
927 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
928 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
930 bool isExtendLSL64() const {
933 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
934 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
935 ET == AArch64_AM::LSL) &&
936 getShiftExtendAmount() <= 4;
939 template<int Width> bool isMemXExtend() const {
942 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
943 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
944 (getShiftExtendAmount() == Log2_32(Width / 8) ||
945 getShiftExtendAmount() == 0);
948 template<int Width> bool isMemWExtend() const {
951 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
953 (getShiftExtendAmount() == Log2_32(Width / 8) ||
954 getShiftExtendAmount() == 0);
957 template <unsigned width>
958 bool isArithmeticShifter() const {
962 // An arithmetic shifter is LSL, LSR, or ASR.
963 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
964 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
965 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
968 template <unsigned width>
969 bool isLogicalShifter() const {
973 // A logical shifter is LSL, LSR, ASR or ROR.
974 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
975 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
976 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
977 getShiftExtendAmount() < width;
980 bool isMovImm32Shifter() const {
984 // A MOVi shifter is LSL of 0, 16, 32, or 48.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 if (ST != AArch64_AM::LSL)
988 uint64_t Val = getShiftExtendAmount();
989 return (Val == 0 || Val == 16);
992 bool isMovImm64Shifter() const {
996 // A MOVi shifter is LSL of 0 or 16.
997 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
998 if (ST != AArch64_AM::LSL)
1000 uint64_t Val = getShiftExtendAmount();
1001 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1004 bool isLogicalVecShifter() const {
1008 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1009 unsigned Shift = getShiftExtendAmount();
1010 return getShiftExtendType() == AArch64_AM::LSL &&
1011 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1014 bool isLogicalVecHalfWordShifter() const {
1015 if (!isLogicalVecShifter())
1018 // A logical vector shifter is a left shift by 0 or 8.
1019 unsigned Shift = getShiftExtendAmount();
1020 return getShiftExtendType() == AArch64_AM::LSL &&
1021 (Shift == 0 || Shift == 8);
1024 bool isMoveVecShifter() const {
1025 if (!isShiftExtend())
1028 // A logical vector shifter is a left shift by 8 or 16.
1029 unsigned Shift = getShiftExtendAmount();
1030 return getShiftExtendType() == AArch64_AM::MSL &&
1031 (Shift == 8 || Shift == 16);
1034 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1035 // to LDUR/STUR when the offset is not legal for the former but is for
1036 // the latter. As such, in addition to checking for being a legal unscaled
1037 // address, also check that it is not a legal scaled address. This avoids
1038 // ambiguity in the matcher.
1040 bool isSImm9OffsetFB() const {
1041 return isSImm9() && !isUImm12Offset<Width / 8>();
1044 bool isAdrpLabel() const {
1045 // Validation was handled during parsing, so we just sanity check that
1046 // something didn't go haywire.
1050 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1051 int64_t Val = CE->getValue();
1052 int64_t Min = - (4096 * (1LL << (21 - 1)));
1053 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1054 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1060 bool isAdrLabel() const {
1061 // Validation was handled during parsing, so we just sanity check that
1062 // something didn't go haywire.
1066 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1067 int64_t Val = CE->getValue();
1068 int64_t Min = - (1LL << (21 - 1));
1069 int64_t Max = ((1LL << (21 - 1)) - 1);
1070 return Val >= Min && Val <= Max;
1076 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1077 // Add as immediates when possible. Null MCExpr = 0.
1079 Inst.addOperand(MCOperand::CreateImm(0));
1080 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1081 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1083 Inst.addOperand(MCOperand::CreateExpr(Expr));
1086 void addRegOperands(MCInst &Inst, unsigned N) const {
1087 assert(N == 1 && "Invalid number of operands!");
1088 Inst.addOperand(MCOperand::CreateReg(getReg()));
1091 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1092 assert(N == 1 && "Invalid number of operands!");
1094 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1096 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1097 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1098 RI->getEncodingValue(getReg()));
1100 Inst.addOperand(MCOperand::CreateReg(Reg));
1103 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1104 assert(N == 1 && "Invalid number of operands!");
1106 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1107 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1110 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1111 assert(N == 1 && "Invalid number of operands!");
1113 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1114 Inst.addOperand(MCOperand::CreateReg(getReg()));
1117 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1118 assert(N == 1 && "Invalid number of operands!");
1119 Inst.addOperand(MCOperand::CreateReg(getReg()));
1122 template <unsigned NumRegs>
1123 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1126 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1127 unsigned FirstReg = FirstRegs[NumRegs - 1];
1130 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1133 template <unsigned NumRegs>
1134 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1135 assert(N == 1 && "Invalid number of operands!");
1136 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1137 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1138 unsigned FirstReg = FirstRegs[NumRegs - 1];
1141 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1144 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1149 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1154 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1159 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1164 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1169 void addImmOperands(MCInst &Inst, unsigned N) const {
1170 assert(N == 1 && "Invalid number of operands!");
1171 // If this is a pageoff symrefexpr with an addend, adjust the addend
1172 // to be only the page-offset portion. Otherwise, just add the expr
1174 addExpr(Inst, getImm());
1177 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 2 && "Invalid number of operands!");
1179 if (isShiftedImm()) {
1180 addExpr(Inst, getShiftedImmVal());
1181 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1183 addExpr(Inst, getImm());
1184 Inst.addOperand(MCOperand::CreateImm(0));
1188 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1193 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1194 assert(N == 1 && "Invalid number of operands!");
1195 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1197 addExpr(Inst, getImm());
1199 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1202 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1203 addImmOperands(Inst, N);
1207 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1208 assert(N == 1 && "Invalid number of operands!");
1209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1212 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1215 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1218 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1219 assert(N == 1 && "Invalid number of operands!");
1220 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1221 assert(MCE && "Invalid constant immediate operand!");
1222 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1225 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1228 assert(MCE && "Invalid constant immediate operand!");
1229 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1232 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1235 assert(MCE && "Invalid constant immediate operand!");
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1239 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1242 assert(MCE && "Invalid constant immediate operand!");
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1246 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1249 assert(MCE && "Invalid constant immediate operand!");
1250 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1253 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1256 assert(MCE && "Invalid constant immediate operand!");
1257 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1260 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1263 assert(MCE && "Invalid constant immediate operand!");
1264 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1267 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1270 assert(MCE && "Invalid constant immediate operand!");
1271 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1274 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1275 assert(N == 1 && "Invalid number of operands!");
1276 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 assert(MCE && "Invalid constant immediate operand!");
1278 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1281 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1284 assert(MCE && "Invalid constant immediate operand!");
1285 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1288 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1291 assert(MCE && "Invalid constant immediate operand!");
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1298 assert(MCE && "Invalid constant immediate operand!");
1299 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1302 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!");
1304 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1305 assert(MCE && "Invalid constant immediate operand!");
1306 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1309 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1312 assert(MCE && "Invalid constant immediate operand!");
1313 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1316 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1319 assert(MCE && "Invalid constant immediate operand!");
1320 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1323 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1324 assert(N == 1 && "Invalid number of operands!");
1325 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1326 assert(MCE && "Invalid constant immediate operand!");
1327 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1330 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1332 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1333 assert(MCE && "Invalid constant immediate operand!");
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1340 assert(MCE && "Invalid constant immediate operand!");
1341 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1344 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1345 assert(N == 1 && "Invalid number of operands!");
1346 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1347 assert(MCE && "Invalid logical immediate operand!");
1348 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1349 Inst.addOperand(MCOperand::CreateImm(encoding));
1352 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355 assert(MCE && "Invalid logical immediate operand!");
1356 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1357 Inst.addOperand(MCOperand::CreateImm(encoding));
1360 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1361 assert(N == 1 && "Invalid number of operands!");
1362 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1363 assert(MCE && "Invalid immediate operand!");
1364 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1365 Inst.addOperand(MCOperand::CreateImm(encoding));
1368 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1369 // Branch operands don't encode the low bits, so shift them off
1370 // here. If it's a label, however, just put it on directly as there's
1371 // not enough information now to do anything.
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1375 addExpr(Inst, getImm());
1378 assert(MCE && "Invalid constant immediate operand!");
1379 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1382 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1383 // Branch operands don't encode the low bits, so shift them off
1384 // here. If it's a label, however, just put it on directly as there's
1385 // not enough information now to do anything.
1386 assert(N == 1 && "Invalid number of operands!");
1387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1389 addExpr(Inst, getImm());
1392 assert(MCE && "Invalid constant immediate operand!");
1393 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1396 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1397 // Branch operands don't encode the low bits, so shift them off
1398 // here. If it's a label, however, just put it on directly as there's
1399 // not enough information now to do anything.
1400 assert(N == 1 && "Invalid number of operands!");
1401 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1403 addExpr(Inst, getImm());
1406 assert(MCE && "Invalid constant immediate operand!");
1407 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1410 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1415 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1420 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1424 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1425 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1427 Inst.addOperand(MCOperand::CreateImm(Bits));
1430 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1434 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1435 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1437 Inst.addOperand(MCOperand::CreateImm(Bits));
1440 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1445 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1447 Inst.addOperand(MCOperand::CreateImm(Bits));
1450 void addSysCROperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1452 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1455 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1457 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1460 void addShifterOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 1 && "Invalid number of operands!");
1463 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1464 Inst.addOperand(MCOperand::CreateImm(Imm));
1467 void addExtendOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1469 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1470 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1471 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1472 Inst.addOperand(MCOperand::CreateImm(Imm));
1475 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1477 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1478 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1479 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1480 Inst.addOperand(MCOperand::CreateImm(Imm));
1483 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 2 && "Invalid number of operands!");
1485 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1486 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1487 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1488 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1491 // For 8-bit load/store instructions with a register offset, both the
1492 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1493 // they're disambiguated by whether the shift was explicit or implicit rather
1495 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1496 assert(N == 2 && "Invalid number of operands!");
1497 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1498 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1499 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1500 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1504 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1505 assert(N == 1 && "Invalid number of operands!");
1507 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1508 uint64_t Value = CE->getValue();
1509 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1513 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1514 assert(N == 1 && "Invalid number of operands!");
1516 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1517 uint64_t Value = CE->getValue();
1518 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1521 void print(raw_ostream &OS) const override;
1523 static std::unique_ptr<AArch64Operand>
1524 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1525 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1526 Op->Tok.Data = Str.data();
1527 Op->Tok.Length = Str.size();
1528 Op->Tok.IsSuffix = IsSuffix;
1534 static std::unique_ptr<AArch64Operand>
1535 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1536 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1537 Op->Reg.RegNum = RegNum;
1538 Op->Reg.isVector = isVector;
1544 static std::unique_ptr<AArch64Operand>
1545 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1546 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1547 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1548 Op->VectorList.RegNum = RegNum;
1549 Op->VectorList.Count = Count;
1550 Op->VectorList.NumElements = NumElements;
1551 Op->VectorList.ElementKind = ElementKind;
1557 static std::unique_ptr<AArch64Operand>
1558 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1559 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1560 Op->VectorIndex.Val = Idx;
1566 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1567 SMLoc E, MCContext &Ctx) {
1568 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1575 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1576 unsigned ShiftAmount,
1579 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1580 Op->ShiftedImm .Val = Val;
1581 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1587 static std::unique_ptr<AArch64Operand>
1588 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1589 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1590 Op->CondCode.Code = Code;
1596 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1598 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1599 Op->FPImm.Val = Val;
1605 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1607 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1608 Op->Barrier.Val = Val;
1614 static std::unique_ptr<AArch64Operand>
1615 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1616 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1617 Op->SysReg.Data = Str.data();
1618 Op->SysReg.Length = Str.size();
1619 Op->SysReg.FeatureBits = FeatureBits;
1625 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1626 SMLoc E, MCContext &Ctx) {
1627 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1628 Op->SysCRImm.Val = Val;
1634 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1636 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1637 Op->Prefetch.Val = Val;
1643 static std::unique_ptr<AArch64Operand>
1644 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1645 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1646 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1647 Op->ShiftExtend.Type = ShOp;
1648 Op->ShiftExtend.Amount = Val;
1649 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1656 } // end anonymous namespace.
1658 void AArch64Operand::print(raw_ostream &OS) const {
1661 OS << "<fpimm " << getFPImm() << "("
1662 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1666 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1668 OS << "<barrier " << Name << ">";
1670 OS << "<barrier invalid #" << getBarrier() << ">";
1674 getImm()->print(OS);
1676 case k_ShiftedImm: {
1677 unsigned Shift = getShiftedImmShift();
1678 OS << "<shiftedimm ";
1679 getShiftedImmVal()->print(OS);
1680 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1684 OS << "<condcode " << getCondCode() << ">";
1687 OS << "<register " << getReg() << ">";
1689 case k_VectorList: {
1690 OS << "<vectorlist ";
1691 unsigned Reg = getVectorListStart();
1692 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1693 OS << Reg + i << " ";
1698 OS << "<vectorindex " << getVectorIndex() << ">";
1701 OS << "<sysreg: " << getSysReg() << '>';
1704 OS << "'" << getToken() << "'";
1707 OS << "c" << getSysCR();
1711 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1713 OS << "<prfop " << Name << ">";
1715 OS << "<prfop invalid #" << getPrefetch() << ">";
1718 case k_ShiftExtend: {
1719 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1720 << getShiftExtendAmount();
1721 if (!hasShiftExtendAmount())
1729 /// @name Auto-generated Match Functions
1732 static unsigned MatchRegisterName(StringRef Name);
1736 static unsigned matchVectorRegName(StringRef Name) {
1737 return StringSwitch<unsigned>(Name)
1738 .Case("v0", AArch64::Q0)
1739 .Case("v1", AArch64::Q1)
1740 .Case("v2", AArch64::Q2)
1741 .Case("v3", AArch64::Q3)
1742 .Case("v4", AArch64::Q4)
1743 .Case("v5", AArch64::Q5)
1744 .Case("v6", AArch64::Q6)
1745 .Case("v7", AArch64::Q7)
1746 .Case("v8", AArch64::Q8)
1747 .Case("v9", AArch64::Q9)
1748 .Case("v10", AArch64::Q10)
1749 .Case("v11", AArch64::Q11)
1750 .Case("v12", AArch64::Q12)
1751 .Case("v13", AArch64::Q13)
1752 .Case("v14", AArch64::Q14)
1753 .Case("v15", AArch64::Q15)
1754 .Case("v16", AArch64::Q16)
1755 .Case("v17", AArch64::Q17)
1756 .Case("v18", AArch64::Q18)
1757 .Case("v19", AArch64::Q19)
1758 .Case("v20", AArch64::Q20)
1759 .Case("v21", AArch64::Q21)
1760 .Case("v22", AArch64::Q22)
1761 .Case("v23", AArch64::Q23)
1762 .Case("v24", AArch64::Q24)
1763 .Case("v25", AArch64::Q25)
1764 .Case("v26", AArch64::Q26)
1765 .Case("v27", AArch64::Q27)
1766 .Case("v28", AArch64::Q28)
1767 .Case("v29", AArch64::Q29)
1768 .Case("v30", AArch64::Q30)
1769 .Case("v31", AArch64::Q31)
1773 static bool isValidVectorKind(StringRef Name) {
1774 return StringSwitch<bool>(Name.lower())
1784 // Accept the width neutral ones, too, for verbose syntax. If those
1785 // aren't used in the right places, the token operand won't match so
1786 // all will work out.
1794 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1795 char &ElementKind) {
1796 assert(isValidVectorKind(Name));
1798 ElementKind = Name.lower()[Name.size() - 1];
1801 if (Name.size() == 2)
1804 // Parse the lane count
1805 Name = Name.drop_front();
1806 while (isdigit(Name.front())) {
1807 NumElements = 10 * NumElements + (Name.front() - '0');
1808 Name = Name.drop_front();
1812 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1814 StartLoc = getLoc();
1815 RegNo = tryParseRegister();
1816 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1817 return (RegNo == (unsigned)-1);
1820 /// tryParseRegister - Try to parse a register name. The token must be an
1821 /// Identifier when called, and if it is a register name the token is eaten and
1822 /// the register is added to the operand list.
1823 int AArch64AsmParser::tryParseRegister() {
1824 const AsmToken &Tok = Parser.getTok();
1825 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1827 std::string lowerCase = Tok.getString().lower();
1828 unsigned RegNum = MatchRegisterName(lowerCase);
1829 // Also handle a few aliases of registers.
1831 RegNum = StringSwitch<unsigned>(lowerCase)
1832 .Case("fp", AArch64::FP)
1833 .Case("lr", AArch64::LR)
1834 .Case("x31", AArch64::XZR)
1835 .Case("w31", AArch64::WZR)
1841 Parser.Lex(); // Eat identifier token.
1845 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1846 /// kind specifier. If it is a register specifier, eat the token and return it.
1847 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1848 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1849 TokError("vector register expected");
1853 StringRef Name = Parser.getTok().getString();
1854 // If there is a kind specifier, it's separated from the register name by
1856 size_t Start = 0, Next = Name.find('.');
1857 StringRef Head = Name.slice(Start, Next);
1858 unsigned RegNum = matchVectorRegName(Head);
1860 if (Next != StringRef::npos) {
1861 Kind = Name.slice(Next, StringRef::npos);
1862 if (!isValidVectorKind(Kind)) {
1863 TokError("invalid vector kind qualifier");
1867 Parser.Lex(); // Eat the register token.
1872 TokError("vector register expected");
1876 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1877 AArch64AsmParser::OperandMatchResultTy
1878 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1881 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1882 Error(S, "Expected cN operand where 0 <= N <= 15");
1883 return MatchOperand_ParseFail;
1886 StringRef Tok = Parser.getTok().getIdentifier();
1887 if (Tok[0] != 'c' && Tok[0] != 'C') {
1888 Error(S, "Expected cN operand where 0 <= N <= 15");
1889 return MatchOperand_ParseFail;
1893 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1894 if (BadNum || CRNum > 15) {
1895 Error(S, "Expected cN operand where 0 <= N <= 15");
1896 return MatchOperand_ParseFail;
1899 Parser.Lex(); // Eat identifier token.
1901 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1902 return MatchOperand_Success;
1905 /// tryParsePrefetch - Try to parse a prefetch operand.
1906 AArch64AsmParser::OperandMatchResultTy
1907 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1909 const AsmToken &Tok = Parser.getTok();
1910 // Either an identifier for named values or a 5-bit immediate.
1911 bool Hash = Tok.is(AsmToken::Hash);
1912 if (Hash || Tok.is(AsmToken::Integer)) {
1914 Parser.Lex(); // Eat hash token.
1915 const MCExpr *ImmVal;
1916 if (getParser().parseExpression(ImmVal))
1917 return MatchOperand_ParseFail;
1919 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1921 TokError("immediate value expected for prefetch operand");
1922 return MatchOperand_ParseFail;
1924 unsigned prfop = MCE->getValue();
1926 TokError("prefetch operand out of range, [0,31] expected");
1927 return MatchOperand_ParseFail;
1930 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1931 return MatchOperand_Success;
1934 if (Tok.isNot(AsmToken::Identifier)) {
1935 TokError("pre-fetch hint expected");
1936 return MatchOperand_ParseFail;
1940 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1942 TokError("pre-fetch hint expected");
1943 return MatchOperand_ParseFail;
1946 Parser.Lex(); // Eat identifier token.
1947 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1948 return MatchOperand_Success;
1951 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1953 AArch64AsmParser::OperandMatchResultTy
1954 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1958 if (Parser.getTok().is(AsmToken::Hash)) {
1959 Parser.Lex(); // Eat hash token.
1962 if (parseSymbolicImmVal(Expr))
1963 return MatchOperand_ParseFail;
1965 AArch64MCExpr::VariantKind ELFRefKind;
1966 MCSymbolRefExpr::VariantKind DarwinRefKind;
1968 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1969 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1970 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1971 // No modifier was specified at all; this is the syntax for an ELF basic
1972 // ADRP relocation (unfortunately).
1974 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1975 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1976 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1978 Error(S, "gotpage label reference not allowed an addend");
1979 return MatchOperand_ParseFail;
1980 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1981 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1982 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1983 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
1984 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
1985 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
1986 // The operand must be an @page or @gotpage qualified symbolref.
1987 Error(S, "page or gotpage label reference expected");
1988 return MatchOperand_ParseFail;
1992 // We have either a label reference possibly with addend or an immediate. The
1993 // addend is a raw value here. The linker will adjust it to only reference the
1995 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1996 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
1998 return MatchOperand_Success;
2001 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2003 AArch64AsmParser::OperandMatchResultTy
2004 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2008 if (Parser.getTok().is(AsmToken::Hash)) {
2009 Parser.Lex(); // Eat hash token.
2012 if (getParser().parseExpression(Expr))
2013 return MatchOperand_ParseFail;
2015 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2016 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2018 return MatchOperand_Success;
2021 /// tryParseFPImm - A floating point immediate expression operand.
2022 AArch64AsmParser::OperandMatchResultTy
2023 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2027 if (Parser.getTok().is(AsmToken::Hash)) {
2028 Parser.Lex(); // Eat '#'
2032 // Handle negation, as that still comes through as a separate token.
2033 bool isNegative = false;
2034 if (Parser.getTok().is(AsmToken::Minus)) {
2038 const AsmToken &Tok = Parser.getTok();
2039 if (Tok.is(AsmToken::Real)) {
2040 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2041 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2042 // If we had a '-' in front, toggle the sign bit.
2043 IntVal ^= (uint64_t)isNegative << 63;
2044 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2045 Parser.Lex(); // Eat the token.
2046 // Check for out of range values. As an exception, we let Zero through,
2047 // as we handle that special case in post-processing before matching in
2048 // order to use the zero register for it.
2049 if (Val == -1 && !RealVal.isZero()) {
2050 TokError("expected compatible register or floating-point constant");
2051 return MatchOperand_ParseFail;
2053 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2054 return MatchOperand_Success;
2056 if (Tok.is(AsmToken::Integer)) {
2058 if (!isNegative && Tok.getString().startswith("0x")) {
2059 Val = Tok.getIntVal();
2060 if (Val > 255 || Val < 0) {
2061 TokError("encoded floating point value out of range");
2062 return MatchOperand_ParseFail;
2065 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2066 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2067 // If we had a '-' in front, toggle the sign bit.
2068 IntVal ^= (uint64_t)isNegative << 63;
2069 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2071 Parser.Lex(); // Eat the token.
2072 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2073 return MatchOperand_Success;
2077 return MatchOperand_NoMatch;
2079 TokError("invalid floating point immediate");
2080 return MatchOperand_ParseFail;
2083 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2084 AArch64AsmParser::OperandMatchResultTy
2085 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2088 if (Parser.getTok().is(AsmToken::Hash))
2089 Parser.Lex(); // Eat '#'
2090 else if (Parser.getTok().isNot(AsmToken::Integer))
2091 // Operand should start from # or should be integer, emit error otherwise.
2092 return MatchOperand_NoMatch;
2095 if (parseSymbolicImmVal(Imm))
2096 return MatchOperand_ParseFail;
2097 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2098 uint64_t ShiftAmount = 0;
2099 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2101 int64_t Val = MCE->getValue();
2102 if (Val > 0xfff && (Val & 0xfff) == 0) {
2103 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2107 SMLoc E = Parser.getTok().getLoc();
2108 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2110 return MatchOperand_Success;
2116 // The optional operand must be "lsl #N" where N is non-negative.
2117 if (!Parser.getTok().is(AsmToken::Identifier) ||
2118 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2119 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2120 return MatchOperand_ParseFail;
2126 if (Parser.getTok().is(AsmToken::Hash)) {
2130 if (Parser.getTok().isNot(AsmToken::Integer)) {
2131 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2132 return MatchOperand_ParseFail;
2135 int64_t ShiftAmount = Parser.getTok().getIntVal();
2137 if (ShiftAmount < 0) {
2138 Error(Parser.getTok().getLoc(), "positive shift amount required");
2139 return MatchOperand_ParseFail;
2141 Parser.Lex(); // Eat the number
2143 SMLoc E = Parser.getTok().getLoc();
2144 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2145 S, E, getContext()));
2146 return MatchOperand_Success;
2149 /// parseCondCodeString - Parse a Condition Code string.
2150 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2151 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2152 .Case("eq", AArch64CC::EQ)
2153 .Case("ne", AArch64CC::NE)
2154 .Case("cs", AArch64CC::HS)
2155 .Case("hs", AArch64CC::HS)
2156 .Case("cc", AArch64CC::LO)
2157 .Case("lo", AArch64CC::LO)
2158 .Case("mi", AArch64CC::MI)
2159 .Case("pl", AArch64CC::PL)
2160 .Case("vs", AArch64CC::VS)
2161 .Case("vc", AArch64CC::VC)
2162 .Case("hi", AArch64CC::HI)
2163 .Case("ls", AArch64CC::LS)
2164 .Case("ge", AArch64CC::GE)
2165 .Case("lt", AArch64CC::LT)
2166 .Case("gt", AArch64CC::GT)
2167 .Case("le", AArch64CC::LE)
2168 .Case("al", AArch64CC::AL)
2169 .Case("nv", AArch64CC::NV)
2170 .Default(AArch64CC::Invalid);
2174 /// parseCondCode - Parse a Condition Code operand.
2175 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2176 bool invertCondCode) {
2178 const AsmToken &Tok = Parser.getTok();
2179 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2181 StringRef Cond = Tok.getString();
2182 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2183 if (CC == AArch64CC::Invalid)
2184 return TokError("invalid condition code");
2185 Parser.Lex(); // Eat identifier token.
2188 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2191 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2195 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2196 /// them if present.
2197 AArch64AsmParser::OperandMatchResultTy
2198 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2199 const AsmToken &Tok = Parser.getTok();
2200 std::string LowerID = Tok.getString().lower();
2201 AArch64_AM::ShiftExtendType ShOp =
2202 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2203 .Case("lsl", AArch64_AM::LSL)
2204 .Case("lsr", AArch64_AM::LSR)
2205 .Case("asr", AArch64_AM::ASR)
2206 .Case("ror", AArch64_AM::ROR)
2207 .Case("msl", AArch64_AM::MSL)
2208 .Case("uxtb", AArch64_AM::UXTB)
2209 .Case("uxth", AArch64_AM::UXTH)
2210 .Case("uxtw", AArch64_AM::UXTW)
2211 .Case("uxtx", AArch64_AM::UXTX)
2212 .Case("sxtb", AArch64_AM::SXTB)
2213 .Case("sxth", AArch64_AM::SXTH)
2214 .Case("sxtw", AArch64_AM::SXTW)
2215 .Case("sxtx", AArch64_AM::SXTX)
2216 .Default(AArch64_AM::InvalidShiftExtend);
2218 if (ShOp == AArch64_AM::InvalidShiftExtend)
2219 return MatchOperand_NoMatch;
2221 SMLoc S = Tok.getLoc();
2224 bool Hash = getLexer().is(AsmToken::Hash);
2225 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2226 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2227 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2228 ShOp == AArch64_AM::MSL) {
2229 // We expect a number here.
2230 TokError("expected #imm after shift specifier");
2231 return MatchOperand_ParseFail;
2234 // "extend" type operatoins don't need an immediate, #0 is implicit.
2235 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2237 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2238 return MatchOperand_Success;
2242 Parser.Lex(); // Eat the '#'.
2244 // Make sure we do actually have a number
2245 if (!Parser.getTok().is(AsmToken::Integer)) {
2246 Error(Parser.getTok().getLoc(),
2247 "expected integer shift amount");
2248 return MatchOperand_ParseFail;
2251 const MCExpr *ImmVal;
2252 if (getParser().parseExpression(ImmVal))
2253 return MatchOperand_ParseFail;
2255 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2257 TokError("expected #imm after shift specifier");
2258 return MatchOperand_ParseFail;
2261 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2262 Operands.push_back(AArch64Operand::CreateShiftExtend(
2263 ShOp, MCE->getValue(), true, S, E, getContext()));
2264 return MatchOperand_Success;
2267 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2268 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2269 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2270 OperandVector &Operands) {
2271 if (Name.find('.') != StringRef::npos)
2272 return TokError("invalid operand");
2276 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2278 const AsmToken &Tok = Parser.getTok();
2279 StringRef Op = Tok.getString();
2280 SMLoc S = Tok.getLoc();
2282 const MCExpr *Expr = nullptr;
2284 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2286 Expr = MCConstantExpr::Create(op1, getContext()); \
2287 Operands.push_back( \
2288 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2289 Operands.push_back( \
2290 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2291 Operands.push_back( \
2292 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2293 Expr = MCConstantExpr::Create(op2, getContext()); \
2294 Operands.push_back( \
2295 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2298 if (Mnemonic == "ic") {
2299 if (!Op.compare_lower("ialluis")) {
2300 // SYS #0, C7, C1, #0
2301 SYS_ALIAS(0, 7, 1, 0);
2302 } else if (!Op.compare_lower("iallu")) {
2303 // SYS #0, C7, C5, #0
2304 SYS_ALIAS(0, 7, 5, 0);
2305 } else if (!Op.compare_lower("ivau")) {
2306 // SYS #3, C7, C5, #1
2307 SYS_ALIAS(3, 7, 5, 1);
2309 return TokError("invalid operand for IC instruction");
2311 } else if (Mnemonic == "dc") {
2312 if (!Op.compare_lower("zva")) {
2313 // SYS #3, C7, C4, #1
2314 SYS_ALIAS(3, 7, 4, 1);
2315 } else if (!Op.compare_lower("ivac")) {
2316 // SYS #3, C7, C6, #1
2317 SYS_ALIAS(0, 7, 6, 1);
2318 } else if (!Op.compare_lower("isw")) {
2319 // SYS #0, C7, C6, #2
2320 SYS_ALIAS(0, 7, 6, 2);
2321 } else if (!Op.compare_lower("cvac")) {
2322 // SYS #3, C7, C10, #1
2323 SYS_ALIAS(3, 7, 10, 1);
2324 } else if (!Op.compare_lower("csw")) {
2325 // SYS #0, C7, C10, #2
2326 SYS_ALIAS(0, 7, 10, 2);
2327 } else if (!Op.compare_lower("cvau")) {
2328 // SYS #3, C7, C11, #1
2329 SYS_ALIAS(3, 7, 11, 1);
2330 } else if (!Op.compare_lower("civac")) {
2331 // SYS #3, C7, C14, #1
2332 SYS_ALIAS(3, 7, 14, 1);
2333 } else if (!Op.compare_lower("cisw")) {
2334 // SYS #0, C7, C14, #2
2335 SYS_ALIAS(0, 7, 14, 2);
2337 return TokError("invalid operand for DC instruction");
2339 } else if (Mnemonic == "at") {
2340 if (!Op.compare_lower("s1e1r")) {
2341 // SYS #0, C7, C8, #0
2342 SYS_ALIAS(0, 7, 8, 0);
2343 } else if (!Op.compare_lower("s1e2r")) {
2344 // SYS #4, C7, C8, #0
2345 SYS_ALIAS(4, 7, 8, 0);
2346 } else if (!Op.compare_lower("s1e3r")) {
2347 // SYS #6, C7, C8, #0
2348 SYS_ALIAS(6, 7, 8, 0);
2349 } else if (!Op.compare_lower("s1e1w")) {
2350 // SYS #0, C7, C8, #1
2351 SYS_ALIAS(0, 7, 8, 1);
2352 } else if (!Op.compare_lower("s1e2w")) {
2353 // SYS #4, C7, C8, #1
2354 SYS_ALIAS(4, 7, 8, 1);
2355 } else if (!Op.compare_lower("s1e3w")) {
2356 // SYS #6, C7, C8, #1
2357 SYS_ALIAS(6, 7, 8, 1);
2358 } else if (!Op.compare_lower("s1e0r")) {
2359 // SYS #0, C7, C8, #3
2360 SYS_ALIAS(0, 7, 8, 2);
2361 } else if (!Op.compare_lower("s1e0w")) {
2362 // SYS #0, C7, C8, #3
2363 SYS_ALIAS(0, 7, 8, 3);
2364 } else if (!Op.compare_lower("s12e1r")) {
2365 // SYS #4, C7, C8, #4
2366 SYS_ALIAS(4, 7, 8, 4);
2367 } else if (!Op.compare_lower("s12e1w")) {
2368 // SYS #4, C7, C8, #5
2369 SYS_ALIAS(4, 7, 8, 5);
2370 } else if (!Op.compare_lower("s12e0r")) {
2371 // SYS #4, C7, C8, #6
2372 SYS_ALIAS(4, 7, 8, 6);
2373 } else if (!Op.compare_lower("s12e0w")) {
2374 // SYS #4, C7, C8, #7
2375 SYS_ALIAS(4, 7, 8, 7);
2377 return TokError("invalid operand for AT instruction");
2379 } else if (Mnemonic == "tlbi") {
2380 if (!Op.compare_lower("vmalle1is")) {
2381 // SYS #0, C8, C3, #0
2382 SYS_ALIAS(0, 8, 3, 0);
2383 } else if (!Op.compare_lower("alle2is")) {
2384 // SYS #4, C8, C3, #0
2385 SYS_ALIAS(4, 8, 3, 0);
2386 } else if (!Op.compare_lower("alle3is")) {
2387 // SYS #6, C8, C3, #0
2388 SYS_ALIAS(6, 8, 3, 0);
2389 } else if (!Op.compare_lower("vae1is")) {
2390 // SYS #0, C8, C3, #1
2391 SYS_ALIAS(0, 8, 3, 1);
2392 } else if (!Op.compare_lower("vae2is")) {
2393 // SYS #4, C8, C3, #1
2394 SYS_ALIAS(4, 8, 3, 1);
2395 } else if (!Op.compare_lower("vae3is")) {
2396 // SYS #6, C8, C3, #1
2397 SYS_ALIAS(6, 8, 3, 1);
2398 } else if (!Op.compare_lower("aside1is")) {
2399 // SYS #0, C8, C3, #2
2400 SYS_ALIAS(0, 8, 3, 2);
2401 } else if (!Op.compare_lower("vaae1is")) {
2402 // SYS #0, C8, C3, #3
2403 SYS_ALIAS(0, 8, 3, 3);
2404 } else if (!Op.compare_lower("alle1is")) {
2405 // SYS #4, C8, C3, #4
2406 SYS_ALIAS(4, 8, 3, 4);
2407 } else if (!Op.compare_lower("vale1is")) {
2408 // SYS #0, C8, C3, #5
2409 SYS_ALIAS(0, 8, 3, 5);
2410 } else if (!Op.compare_lower("vaale1is")) {
2411 // SYS #0, C8, C3, #7
2412 SYS_ALIAS(0, 8, 3, 7);
2413 } else if (!Op.compare_lower("vmalle1")) {
2414 // SYS #0, C8, C7, #0
2415 SYS_ALIAS(0, 8, 7, 0);
2416 } else if (!Op.compare_lower("alle2")) {
2417 // SYS #4, C8, C7, #0
2418 SYS_ALIAS(4, 8, 7, 0);
2419 } else if (!Op.compare_lower("vale2is")) {
2420 // SYS #4, C8, C3, #5
2421 SYS_ALIAS(4, 8, 3, 5);
2422 } else if (!Op.compare_lower("vale3is")) {
2423 // SYS #6, C8, C3, #5
2424 SYS_ALIAS(6, 8, 3, 5);
2425 } else if (!Op.compare_lower("alle3")) {
2426 // SYS #6, C8, C7, #0
2427 SYS_ALIAS(6, 8, 7, 0);
2428 } else if (!Op.compare_lower("vae1")) {
2429 // SYS #0, C8, C7, #1
2430 SYS_ALIAS(0, 8, 7, 1);
2431 } else if (!Op.compare_lower("vae2")) {
2432 // SYS #4, C8, C7, #1
2433 SYS_ALIAS(4, 8, 7, 1);
2434 } else if (!Op.compare_lower("vae3")) {
2435 // SYS #6, C8, C7, #1
2436 SYS_ALIAS(6, 8, 7, 1);
2437 } else if (!Op.compare_lower("aside1")) {
2438 // SYS #0, C8, C7, #2
2439 SYS_ALIAS(0, 8, 7, 2);
2440 } else if (!Op.compare_lower("vaae1")) {
2441 // SYS #0, C8, C7, #3
2442 SYS_ALIAS(0, 8, 7, 3);
2443 } else if (!Op.compare_lower("alle1")) {
2444 // SYS #4, C8, C7, #4
2445 SYS_ALIAS(4, 8, 7, 4);
2446 } else if (!Op.compare_lower("vale1")) {
2447 // SYS #0, C8, C7, #5
2448 SYS_ALIAS(0, 8, 7, 5);
2449 } else if (!Op.compare_lower("vale2")) {
2450 // SYS #4, C8, C7, #5
2451 SYS_ALIAS(4, 8, 7, 5);
2452 } else if (!Op.compare_lower("vale3")) {
2453 // SYS #6, C8, C7, #5
2454 SYS_ALIAS(6, 8, 7, 5);
2455 } else if (!Op.compare_lower("vaale1")) {
2456 // SYS #0, C8, C7, #7
2457 SYS_ALIAS(0, 8, 7, 7);
2458 } else if (!Op.compare_lower("ipas2e1")) {
2459 // SYS #4, C8, C4, #1
2460 SYS_ALIAS(4, 8, 4, 1);
2461 } else if (!Op.compare_lower("ipas2le1")) {
2462 // SYS #4, C8, C4, #5
2463 SYS_ALIAS(4, 8, 4, 5);
2464 } else if (!Op.compare_lower("ipas2e1is")) {
2465 // SYS #4, C8, C4, #1
2466 SYS_ALIAS(4, 8, 0, 1);
2467 } else if (!Op.compare_lower("ipas2le1is")) {
2468 // SYS #4, C8, C4, #5
2469 SYS_ALIAS(4, 8, 0, 5);
2470 } else if (!Op.compare_lower("vmalls12e1")) {
2471 // SYS #4, C8, C7, #6
2472 SYS_ALIAS(4, 8, 7, 6);
2473 } else if (!Op.compare_lower("vmalls12e1is")) {
2474 // SYS #4, C8, C3, #6
2475 SYS_ALIAS(4, 8, 3, 6);
2477 return TokError("invalid operand for TLBI instruction");
2483 Parser.Lex(); // Eat operand.
2485 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2486 bool HasRegister = false;
2488 // Check for the optional register operand.
2489 if (getLexer().is(AsmToken::Comma)) {
2490 Parser.Lex(); // Eat comma.
2492 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2493 return TokError("expected register operand");
2498 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2499 Parser.eatToEndOfStatement();
2500 return TokError("unexpected token in argument list");
2503 if (ExpectRegister && !HasRegister) {
2504 return TokError("specified " + Mnemonic + " op requires a register");
2506 else if (!ExpectRegister && HasRegister) {
2507 return TokError("specified " + Mnemonic + " op does not use a register");
2510 Parser.Lex(); // Consume the EndOfStatement
2514 AArch64AsmParser::OperandMatchResultTy
2515 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2516 const AsmToken &Tok = Parser.getTok();
2518 // Can be either a #imm style literal or an option name
2519 bool Hash = Tok.is(AsmToken::Hash);
2520 if (Hash || Tok.is(AsmToken::Integer)) {
2521 // Immediate operand.
2523 Parser.Lex(); // Eat the '#'
2524 const MCExpr *ImmVal;
2525 SMLoc ExprLoc = getLoc();
2526 if (getParser().parseExpression(ImmVal))
2527 return MatchOperand_ParseFail;
2528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2530 Error(ExprLoc, "immediate value expected for barrier operand");
2531 return MatchOperand_ParseFail;
2533 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2534 Error(ExprLoc, "barrier operand out of range");
2535 return MatchOperand_ParseFail;
2538 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2539 return MatchOperand_Success;
2542 if (Tok.isNot(AsmToken::Identifier)) {
2543 TokError("invalid operand for instruction");
2544 return MatchOperand_ParseFail;
2548 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2550 TokError("invalid barrier option name");
2551 return MatchOperand_ParseFail;
2554 // The only valid named option for ISB is 'sy'
2555 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2556 TokError("'sy' or #imm operand expected");
2557 return MatchOperand_ParseFail;
2561 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2562 Parser.Lex(); // Consume the option
2564 return MatchOperand_Success;
2567 AArch64AsmParser::OperandMatchResultTy
2568 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2569 const AsmToken &Tok = Parser.getTok();
2571 if (Tok.isNot(AsmToken::Identifier))
2572 return MatchOperand_NoMatch;
2574 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2575 STI.getFeatureBits(), getContext()));
2576 Parser.Lex(); // Eat identifier
2578 return MatchOperand_Success;
2581 /// tryParseVectorRegister - Parse a vector register operand.
2582 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2583 if (Parser.getTok().isNot(AsmToken::Identifier))
2587 // Check for a vector register specifier first.
2589 int64_t Reg = tryMatchVectorRegister(Kind, false);
2593 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2594 // If there was an explicit qualifier, that goes on as a literal text
2598 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2600 // If there is an index specifier following the register, parse that too.
2601 if (Parser.getTok().is(AsmToken::LBrac)) {
2602 SMLoc SIdx = getLoc();
2603 Parser.Lex(); // Eat left bracket token.
2605 const MCExpr *ImmVal;
2606 if (getParser().parseExpression(ImmVal))
2608 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2610 TokError("immediate value expected for vector index");
2615 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2616 Error(E, "']' expected");
2620 Parser.Lex(); // Eat right bracket token.
2622 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2629 /// parseRegister - Parse a non-vector register operand.
2630 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2632 // Try for a vector register.
2633 if (!tryParseVectorRegister(Operands))
2636 // Try for a scalar register.
2637 int64_t Reg = tryParseRegister();
2641 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2643 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2644 // as a string token in the instruction itself.
2645 if (getLexer().getKind() == AsmToken::LBrac) {
2646 SMLoc LBracS = getLoc();
2648 const AsmToken &Tok = Parser.getTok();
2649 if (Tok.is(AsmToken::Integer)) {
2650 SMLoc IntS = getLoc();
2651 int64_t Val = Tok.getIntVal();
2654 if (getLexer().getKind() == AsmToken::RBrac) {
2655 SMLoc RBracS = getLoc();
2658 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2660 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2662 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2672 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2673 bool HasELFModifier = false;
2674 AArch64MCExpr::VariantKind RefKind;
2676 if (Parser.getTok().is(AsmToken::Colon)) {
2677 Parser.Lex(); // Eat ':"
2678 HasELFModifier = true;
2680 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2681 Error(Parser.getTok().getLoc(),
2682 "expect relocation specifier in operand after ':'");
2686 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2687 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2688 .Case("lo12", AArch64MCExpr::VK_LO12)
2689 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2690 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2691 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2692 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2693 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2694 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2695 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2696 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2697 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2698 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2699 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2700 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2701 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2702 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2703 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2704 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2705 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2706 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2707 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2708 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2709 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2710 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2711 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2712 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2713 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2714 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2715 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2716 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2717 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2718 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2719 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2720 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2721 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2722 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2723 .Default(AArch64MCExpr::VK_INVALID);
2725 if (RefKind == AArch64MCExpr::VK_INVALID) {
2726 Error(Parser.getTok().getLoc(),
2727 "expect relocation specifier in operand after ':'");
2731 Parser.Lex(); // Eat identifier
2733 if (Parser.getTok().isNot(AsmToken::Colon)) {
2734 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2737 Parser.Lex(); // Eat ':'
2740 if (getParser().parseExpression(ImmVal))
2744 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2749 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2750 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2751 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2753 Parser.Lex(); // Eat left bracket token.
2755 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2758 int64_t PrevReg = FirstReg;
2761 if (Parser.getTok().is(AsmToken::Minus)) {
2762 Parser.Lex(); // Eat the minus.
2764 SMLoc Loc = getLoc();
2766 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2769 // Any Kind suffices must match on all regs in the list.
2770 if (Kind != NextKind)
2771 return Error(Loc, "mismatched register size suffix");
2773 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2775 if (Space == 0 || Space > 3) {
2776 return Error(Loc, "invalid number of vectors");
2782 while (Parser.getTok().is(AsmToken::Comma)) {
2783 Parser.Lex(); // Eat the comma token.
2785 SMLoc Loc = getLoc();
2787 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2790 // Any Kind suffices must match on all regs in the list.
2791 if (Kind != NextKind)
2792 return Error(Loc, "mismatched register size suffix");
2794 // Registers must be incremental (with wraparound at 31)
2795 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2796 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2797 return Error(Loc, "registers must be sequential");
2804 if (Parser.getTok().isNot(AsmToken::RCurly))
2805 return Error(getLoc(), "'}' expected");
2806 Parser.Lex(); // Eat the '}' token.
2809 return Error(S, "invalid number of vectors");
2811 unsigned NumElements = 0;
2812 char ElementKind = 0;
2814 parseValidVectorKind(Kind, NumElements, ElementKind);
2816 Operands.push_back(AArch64Operand::CreateVectorList(
2817 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2819 // If there is an index specifier following the list, parse that too.
2820 if (Parser.getTok().is(AsmToken::LBrac)) {
2821 SMLoc SIdx = getLoc();
2822 Parser.Lex(); // Eat left bracket token.
2824 const MCExpr *ImmVal;
2825 if (getParser().parseExpression(ImmVal))
2827 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2829 TokError("immediate value expected for vector index");
2834 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2835 Error(E, "']' expected");
2839 Parser.Lex(); // Eat right bracket token.
2841 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2847 AArch64AsmParser::OperandMatchResultTy
2848 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2849 const AsmToken &Tok = Parser.getTok();
2850 if (!Tok.is(AsmToken::Identifier))
2851 return MatchOperand_NoMatch;
2853 unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2855 MCContext &Ctx = getContext();
2856 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2857 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2858 return MatchOperand_NoMatch;
2861 Parser.Lex(); // Eat register
2863 if (Parser.getTok().isNot(AsmToken::Comma)) {
2865 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2866 return MatchOperand_Success;
2868 Parser.Lex(); // Eat comma.
2870 if (Parser.getTok().is(AsmToken::Hash))
2871 Parser.Lex(); // Eat hash
2873 if (Parser.getTok().isNot(AsmToken::Integer)) {
2874 Error(getLoc(), "index must be absent or #0");
2875 return MatchOperand_ParseFail;
2878 const MCExpr *ImmVal;
2879 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2880 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2881 Error(getLoc(), "index must be absent or #0");
2882 return MatchOperand_ParseFail;
2886 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2887 return MatchOperand_Success;
2890 /// parseOperand - Parse a arm instruction operand. For now this parses the
2891 /// operand regardless of the mnemonic.
2892 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2893 bool invertCondCode) {
2894 // Check if the current operand has a custom associated parser, if so, try to
2895 // custom parse the operand, or fallback to the general approach.
2896 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2897 if (ResTy == MatchOperand_Success)
2899 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2900 // there was a match, but an error occurred, in which case, just return that
2901 // the operand parsing failed.
2902 if (ResTy == MatchOperand_ParseFail)
2905 // Nothing custom, so do general case parsing.
2907 switch (getLexer().getKind()) {
2911 if (parseSymbolicImmVal(Expr))
2912 return Error(S, "invalid operand");
2914 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2915 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2918 case AsmToken::LBrac: {
2919 SMLoc Loc = Parser.getTok().getLoc();
2920 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2922 Parser.Lex(); // Eat '['
2924 // There's no comma after a '[', so we can parse the next operand
2926 return parseOperand(Operands, false, false);
2928 case AsmToken::LCurly:
2929 return parseVectorList(Operands);
2930 case AsmToken::Identifier: {
2931 // If we're expecting a Condition Code operand, then just parse that.
2933 return parseCondCode(Operands, invertCondCode);
2935 // If it's a register name, parse it.
2936 if (!parseRegister(Operands))
2939 // This could be an optional "shift" or "extend" operand.
2940 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2941 // We can only continue if no tokens were eaten.
2942 if (GotShift != MatchOperand_NoMatch)
2945 // This was not a register so parse other operands that start with an
2946 // identifier (like labels) as expressions and create them as immediates.
2947 const MCExpr *IdVal;
2949 if (getParser().parseExpression(IdVal))
2952 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2953 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2956 case AsmToken::Integer:
2957 case AsmToken::Real:
2958 case AsmToken::Hash: {
2959 // #42 -> immediate.
2961 if (getLexer().is(AsmToken::Hash))
2964 // Parse a negative sign
2965 bool isNegative = false;
2966 if (Parser.getTok().is(AsmToken::Minus)) {
2968 // We need to consume this token only when we have a Real, otherwise
2969 // we let parseSymbolicImmVal take care of it
2970 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2974 // The only Real that should come through here is a literal #0.0 for
2975 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2976 // so convert the value.
2977 const AsmToken &Tok = Parser.getTok();
2978 if (Tok.is(AsmToken::Real)) {
2979 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2980 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2981 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2982 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2983 Mnemonic != "fcmlt")
2984 return TokError("unexpected floating point literal");
2985 else if (IntVal != 0 || isNegative)
2986 return TokError("expected floating-point constant #0.0");
2987 Parser.Lex(); // Eat the token.
2990 AArch64Operand::CreateToken("#0", false, S, getContext()));
2992 AArch64Operand::CreateToken(".0", false, S, getContext()));
2996 const MCExpr *ImmVal;
2997 if (parseSymbolicImmVal(ImmVal))
3000 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3001 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3007 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3009 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3010 StringRef Name, SMLoc NameLoc,
3011 OperandVector &Operands) {
3012 Name = StringSwitch<StringRef>(Name.lower())
3013 .Case("beq", "b.eq")
3014 .Case("bne", "b.ne")
3015 .Case("bhs", "b.hs")
3016 .Case("bcs", "b.cs")
3017 .Case("blo", "b.lo")
3018 .Case("bcc", "b.cc")
3019 .Case("bmi", "b.mi")
3020 .Case("bpl", "b.pl")
3021 .Case("bvs", "b.vs")
3022 .Case("bvc", "b.vc")
3023 .Case("bhi", "b.hi")
3024 .Case("bls", "b.ls")
3025 .Case("bge", "b.ge")
3026 .Case("blt", "b.lt")
3027 .Case("bgt", "b.gt")
3028 .Case("ble", "b.le")
3029 .Case("bal", "b.al")
3030 .Case("bnv", "b.nv")
3033 // Create the leading tokens for the mnemonic, split by '.' characters.
3034 size_t Start = 0, Next = Name.find('.');
3035 StringRef Head = Name.slice(Start, Next);
3037 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3038 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3039 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3040 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3041 Parser.eatToEndOfStatement();
3046 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3049 // Handle condition codes for a branch mnemonic
3050 if (Head == "b" && Next != StringRef::npos) {
3052 Next = Name.find('.', Start + 1);
3053 Head = Name.slice(Start + 1, Next);
3055 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3056 (Head.data() - Name.data()));
3057 AArch64CC::CondCode CC = parseCondCodeString(Head);
3058 if (CC == AArch64CC::Invalid)
3059 return Error(SuffixLoc, "invalid condition code");
3061 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3063 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3066 // Add the remaining tokens in the mnemonic.
3067 while (Next != StringRef::npos) {
3069 Next = Name.find('.', Start + 1);
3070 Head = Name.slice(Start, Next);
3071 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3072 (Head.data() - Name.data()) + 1);
3074 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3077 // Conditional compare instructions have a Condition Code operand, which needs
3078 // to be parsed and an immediate operand created.
3079 bool condCodeFourthOperand =
3080 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3081 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3082 Head == "csinc" || Head == "csinv" || Head == "csneg");
3084 // These instructions are aliases to some of the conditional select
3085 // instructions. However, the condition code is inverted in the aliased
3088 // FIXME: Is this the correct way to handle these? Or should the parser
3089 // generate the aliased instructions directly?
3090 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3091 bool condCodeThirdOperand =
3092 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3094 // Read the remaining operands.
3095 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3096 // Read the first operand.
3097 if (parseOperand(Operands, false, false)) {
3098 Parser.eatToEndOfStatement();
3103 while (getLexer().is(AsmToken::Comma)) {
3104 Parser.Lex(); // Eat the comma.
3106 // Parse and remember the operand.
3107 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3108 (N == 3 && condCodeThirdOperand) ||
3109 (N == 2 && condCodeSecondOperand),
3110 condCodeSecondOperand || condCodeThirdOperand)) {
3111 Parser.eatToEndOfStatement();
3115 // After successfully parsing some operands there are two special cases to
3116 // consider (i.e. notional operands not separated by commas). Both are due
3117 // to memory specifiers:
3118 // + An RBrac will end an address for load/store/prefetch
3119 // + An '!' will indicate a pre-indexed operation.
3121 // It's someone else's responsibility to make sure these tokens are sane
3122 // in the given context!
3123 if (Parser.getTok().is(AsmToken::RBrac)) {
3124 SMLoc Loc = Parser.getTok().getLoc();
3125 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3130 if (Parser.getTok().is(AsmToken::Exclaim)) {
3131 SMLoc Loc = Parser.getTok().getLoc();
3132 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3141 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3142 SMLoc Loc = Parser.getTok().getLoc();
3143 Parser.eatToEndOfStatement();
3144 return Error(Loc, "unexpected token in argument list");
3147 Parser.Lex(); // Consume the EndOfStatement
3151 // FIXME: This entire function is a giant hack to provide us with decent
3152 // operand range validation/diagnostics until TableGen/MC can be extended
3153 // to support autogeneration of this kind of validation.
3154 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3155 SmallVectorImpl<SMLoc> &Loc) {
3156 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3157 // Check for indexed addressing modes w/ the base register being the
3158 // same as a destination/source register or pair load where
3159 // the Rt == Rt2. All of those are undefined behaviour.
3160 switch (Inst.getOpcode()) {
3161 case AArch64::LDPSWpre:
3162 case AArch64::LDPWpost:
3163 case AArch64::LDPWpre:
3164 case AArch64::LDPXpost:
3165 case AArch64::LDPXpre: {
3166 unsigned Rt = Inst.getOperand(1).getReg();
3167 unsigned Rt2 = Inst.getOperand(2).getReg();
3168 unsigned Rn = Inst.getOperand(3).getReg();
3169 if (RI->isSubRegisterEq(Rn, Rt))
3170 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3171 "is also a destination");
3172 if (RI->isSubRegisterEq(Rn, Rt2))
3173 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3174 "is also a destination");
3177 case AArch64::LDPDi:
3178 case AArch64::LDPQi:
3179 case AArch64::LDPSi:
3180 case AArch64::LDPSWi:
3181 case AArch64::LDPWi:
3182 case AArch64::LDPXi: {
3183 unsigned Rt = Inst.getOperand(0).getReg();
3184 unsigned Rt2 = Inst.getOperand(1).getReg();
3186 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3189 case AArch64::LDPDpost:
3190 case AArch64::LDPDpre:
3191 case AArch64::LDPQpost:
3192 case AArch64::LDPQpre:
3193 case AArch64::LDPSpost:
3194 case AArch64::LDPSpre:
3195 case AArch64::LDPSWpost: {
3196 unsigned Rt = Inst.getOperand(1).getReg();
3197 unsigned Rt2 = Inst.getOperand(2).getReg();
3199 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3202 case AArch64::STPDpost:
3203 case AArch64::STPDpre:
3204 case AArch64::STPQpost:
3205 case AArch64::STPQpre:
3206 case AArch64::STPSpost:
3207 case AArch64::STPSpre:
3208 case AArch64::STPWpost:
3209 case AArch64::STPWpre:
3210 case AArch64::STPXpost:
3211 case AArch64::STPXpre: {
3212 unsigned Rt = Inst.getOperand(1).getReg();
3213 unsigned Rt2 = Inst.getOperand(2).getReg();
3214 unsigned Rn = Inst.getOperand(3).getReg();
3215 if (RI->isSubRegisterEq(Rn, Rt))
3216 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3217 "is also a source");
3218 if (RI->isSubRegisterEq(Rn, Rt2))
3219 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3220 "is also a source");
3223 case AArch64::LDRBBpre:
3224 case AArch64::LDRBpre:
3225 case AArch64::LDRHHpre:
3226 case AArch64::LDRHpre:
3227 case AArch64::LDRSBWpre:
3228 case AArch64::LDRSBXpre:
3229 case AArch64::LDRSHWpre:
3230 case AArch64::LDRSHXpre:
3231 case AArch64::LDRSWpre:
3232 case AArch64::LDRWpre:
3233 case AArch64::LDRXpre:
3234 case AArch64::LDRBBpost:
3235 case AArch64::LDRBpost:
3236 case AArch64::LDRHHpost:
3237 case AArch64::LDRHpost:
3238 case AArch64::LDRSBWpost:
3239 case AArch64::LDRSBXpost:
3240 case AArch64::LDRSHWpost:
3241 case AArch64::LDRSHXpost:
3242 case AArch64::LDRSWpost:
3243 case AArch64::LDRWpost:
3244 case AArch64::LDRXpost: {
3245 unsigned Rt = Inst.getOperand(1).getReg();
3246 unsigned Rn = Inst.getOperand(2).getReg();
3247 if (RI->isSubRegisterEq(Rn, Rt))
3248 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3249 "is also a source");
3252 case AArch64::STRBBpost:
3253 case AArch64::STRBpost:
3254 case AArch64::STRHHpost:
3255 case AArch64::STRHpost:
3256 case AArch64::STRWpost:
3257 case AArch64::STRXpost:
3258 case AArch64::STRBBpre:
3259 case AArch64::STRBpre:
3260 case AArch64::STRHHpre:
3261 case AArch64::STRHpre:
3262 case AArch64::STRWpre:
3263 case AArch64::STRXpre: {
3264 unsigned Rt = Inst.getOperand(1).getReg();
3265 unsigned Rn = Inst.getOperand(2).getReg();
3266 if (RI->isSubRegisterEq(Rn, Rt))
3267 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3268 "is also a source");
3273 // Now check immediate ranges. Separate from the above as there is overlap
3274 // in the instructions being checked and this keeps the nested conditionals
3276 switch (Inst.getOpcode()) {
3277 case AArch64::ADDSWri:
3278 case AArch64::ADDSXri:
3279 case AArch64::ADDWri:
3280 case AArch64::ADDXri:
3281 case AArch64::SUBSWri:
3282 case AArch64::SUBSXri:
3283 case AArch64::SUBWri:
3284 case AArch64::SUBXri: {
3285 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3286 // some slight duplication here.
3287 if (Inst.getOperand(2).isExpr()) {
3288 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3289 AArch64MCExpr::VariantKind ELFRefKind;
3290 MCSymbolRefExpr::VariantKind DarwinRefKind;
3292 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3293 return Error(Loc[2], "invalid immediate expression");
3296 // Only allow these with ADDXri.
3297 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3298 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3299 Inst.getOpcode() == AArch64::ADDXri)
3302 // Only allow these with ADDXri/ADDWri
3303 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3304 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3305 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3306 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3307 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3308 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3309 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3310 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3311 (Inst.getOpcode() == AArch64::ADDXri ||
3312 Inst.getOpcode() == AArch64::ADDWri))
3315 // Don't allow expressions in the immediate field otherwise
3316 return Error(Loc[2], "invalid immediate expression");
3325 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3327 case Match_MissingFeature:
3329 "instruction requires a CPU feature not currently enabled");
3330 case Match_InvalidOperand:
3331 return Error(Loc, "invalid operand for instruction");
3332 case Match_InvalidSuffix:
3333 return Error(Loc, "invalid type suffix for instruction");
3334 case Match_InvalidCondCode:
3335 return Error(Loc, "expected AArch64 condition code");
3336 case Match_AddSubRegExtendSmall:
3338 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3339 case Match_AddSubRegExtendLarge:
3341 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3342 case Match_AddSubSecondSource:
3344 "expected compatible register, symbol or integer in range [0, 4095]");
3345 case Match_LogicalSecondSource:
3346 return Error(Loc, "expected compatible register or logical immediate");
3347 case Match_InvalidMovImm32Shift:
3348 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3349 case Match_InvalidMovImm64Shift:
3350 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3351 case Match_AddSubRegShift32:
3353 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3354 case Match_AddSubRegShift64:
3356 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3357 case Match_InvalidFPImm:
3359 "expected compatible register or floating-point constant");
3360 case Match_InvalidMemoryIndexedSImm9:
3361 return Error(Loc, "index must be an integer in range [-256, 255].");
3362 case Match_InvalidMemoryIndexed4SImm7:
3363 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3364 case Match_InvalidMemoryIndexed8SImm7:
3365 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3366 case Match_InvalidMemoryIndexed16SImm7:
3367 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3368 case Match_InvalidMemoryWExtend8:
3370 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3371 case Match_InvalidMemoryWExtend16:
3373 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3374 case Match_InvalidMemoryWExtend32:
3376 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3377 case Match_InvalidMemoryWExtend64:
3379 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3380 case Match_InvalidMemoryWExtend128:
3382 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3383 case Match_InvalidMemoryXExtend8:
3385 "expected 'lsl' or 'sxtx' with optional shift of #0");
3386 case Match_InvalidMemoryXExtend16:
3388 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3389 case Match_InvalidMemoryXExtend32:
3391 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3392 case Match_InvalidMemoryXExtend64:
3394 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3395 case Match_InvalidMemoryXExtend128:
3397 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3398 case Match_InvalidMemoryIndexed1:
3399 return Error(Loc, "index must be an integer in range [0, 4095].");
3400 case Match_InvalidMemoryIndexed2:
3401 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3402 case Match_InvalidMemoryIndexed4:
3403 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3404 case Match_InvalidMemoryIndexed8:
3405 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3406 case Match_InvalidMemoryIndexed16:
3407 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3408 case Match_InvalidImm0_7:
3409 return Error(Loc, "immediate must be an integer in range [0, 7].");
3410 case Match_InvalidImm0_15:
3411 return Error(Loc, "immediate must be an integer in range [0, 15].");
3412 case Match_InvalidImm0_31:
3413 return Error(Loc, "immediate must be an integer in range [0, 31].");
3414 case Match_InvalidImm0_63:
3415 return Error(Loc, "immediate must be an integer in range [0, 63].");
3416 case Match_InvalidImm0_127:
3417 return Error(Loc, "immediate must be an integer in range [0, 127].");
3418 case Match_InvalidImm0_65535:
3419 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3420 case Match_InvalidImm1_8:
3421 return Error(Loc, "immediate must be an integer in range [1, 8].");
3422 case Match_InvalidImm1_16:
3423 return Error(Loc, "immediate must be an integer in range [1, 16].");
3424 case Match_InvalidImm1_32:
3425 return Error(Loc, "immediate must be an integer in range [1, 32].");
3426 case Match_InvalidImm1_64:
3427 return Error(Loc, "immediate must be an integer in range [1, 64].");
3428 case Match_InvalidIndex1:
3429 return Error(Loc, "expected lane specifier '[1]'");
3430 case Match_InvalidIndexB:
3431 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3432 case Match_InvalidIndexH:
3433 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3434 case Match_InvalidIndexS:
3435 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3436 case Match_InvalidIndexD:
3437 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3438 case Match_InvalidLabel:
3439 return Error(Loc, "expected label or encodable integer pc offset");
3441 return Error(Loc, "expected readable system register");
3443 return Error(Loc, "expected writable system register or pstate");
3444 case Match_MnemonicFail:
3445 return Error(Loc, "unrecognized instruction mnemonic");
3447 assert(0 && "unexpected error code!");
3448 return Error(Loc, "invalid instruction format");
3452 static const char *getSubtargetFeatureName(unsigned Val);
3454 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3455 OperandVector &Operands,
3457 unsigned &ErrorInfo,
3458 bool MatchingInlineAsm) {
3459 assert(!Operands.empty() && "Unexpect empty operand list!");
3460 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3461 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3463 StringRef Tok = Op.getToken();
3464 unsigned NumOperands = Operands.size();
3466 if (NumOperands == 4 && Tok == "lsl") {
3467 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3468 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3469 if (Op2.isReg() && Op3.isImm()) {
3470 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3472 uint64_t Op3Val = Op3CE->getValue();
3473 uint64_t NewOp3Val = 0;
3474 uint64_t NewOp4Val = 0;
3475 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3477 NewOp3Val = (32 - Op3Val) & 0x1f;
3478 NewOp4Val = 31 - Op3Val;
3480 NewOp3Val = (64 - Op3Val) & 0x3f;
3481 NewOp4Val = 63 - Op3Val;
3484 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3485 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3487 Operands[0] = AArch64Operand::CreateToken(
3488 "ubfm", false, Op.getStartLoc(), getContext());
3489 Operands.push_back(AArch64Operand::CreateImm(
3490 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3491 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3492 Op3.getEndLoc(), getContext());
3495 } else if (NumOperands == 5) {
3496 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3497 // UBFIZ -> UBFM aliases.
3498 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3499 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3500 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3501 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3503 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3504 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3505 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3507 if (Op3CE && Op4CE) {
3508 uint64_t Op3Val = Op3CE->getValue();
3509 uint64_t Op4Val = Op4CE->getValue();
3511 uint64_t RegWidth = 0;
3512 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3518 if (Op3Val >= RegWidth)
3519 return Error(Op3.getStartLoc(),
3520 "expected integer in range [0, 31]");
3521 if (Op4Val < 1 || Op4Val > RegWidth)
3522 return Error(Op4.getStartLoc(),
3523 "expected integer in range [1, 32]");
3525 uint64_t NewOp3Val = 0;
3526 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3528 NewOp3Val = (32 - Op3Val) & 0x1f;
3530 NewOp3Val = (64 - Op3Val) & 0x3f;
3532 uint64_t NewOp4Val = Op4Val - 1;
3534 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3535 return Error(Op4.getStartLoc(),
3536 "requested insert overflows register");
3538 const MCExpr *NewOp3 =
3539 MCConstantExpr::Create(NewOp3Val, getContext());
3540 const MCExpr *NewOp4 =
3541 MCConstantExpr::Create(NewOp4Val, getContext());
3542 Operands[3] = AArch64Operand::CreateImm(
3543 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3544 Operands[4] = AArch64Operand::CreateImm(
3545 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3547 Operands[0] = AArch64Operand::CreateToken(
3548 "bfm", false, Op.getStartLoc(), getContext());
3549 else if (Tok == "sbfiz")
3550 Operands[0] = AArch64Operand::CreateToken(
3551 "sbfm", false, Op.getStartLoc(), getContext());
3552 else if (Tok == "ubfiz")
3553 Operands[0] = AArch64Operand::CreateToken(
3554 "ubfm", false, Op.getStartLoc(), getContext());
3556 llvm_unreachable("No valid mnemonic for alias?");
3560 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3561 // UBFX -> UBFM aliases.
3562 } else if (NumOperands == 5 &&
3563 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3564 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3565 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3566 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3568 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3569 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3570 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3572 if (Op3CE && Op4CE) {
3573 uint64_t Op3Val = Op3CE->getValue();
3574 uint64_t Op4Val = Op4CE->getValue();
3576 uint64_t RegWidth = 0;
3577 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3583 if (Op3Val >= RegWidth)
3584 return Error(Op3.getStartLoc(),
3585 "expected integer in range [0, 31]");
3586 if (Op4Val < 1 || Op4Val > RegWidth)
3587 return Error(Op4.getStartLoc(),
3588 "expected integer in range [1, 32]");
3590 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3592 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3593 return Error(Op4.getStartLoc(),
3594 "requested extract overflows register");
3596 const MCExpr *NewOp4 =
3597 MCConstantExpr::Create(NewOp4Val, getContext());
3598 Operands[4] = AArch64Operand::CreateImm(
3599 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3601 Operands[0] = AArch64Operand::CreateToken(
3602 "bfm", false, Op.getStartLoc(), getContext());
3603 else if (Tok == "sbfx")
3604 Operands[0] = AArch64Operand::CreateToken(
3605 "sbfm", false, Op.getStartLoc(), getContext());
3606 else if (Tok == "ubfx")
3607 Operands[0] = AArch64Operand::CreateToken(
3608 "ubfm", false, Op.getStartLoc(), getContext());
3610 llvm_unreachable("No valid mnemonic for alias?");
3615 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3616 // InstAlias can't quite handle this since the reg classes aren't
3618 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3619 // The source register can be Wn here, but the matcher expects a
3620 // GPR64. Twiddle it here if necessary.
3621 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3623 unsigned Reg = getXRegFromWReg(Op.getReg());
3624 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3625 Op.getEndLoc(), getContext());
3628 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3629 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3630 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3632 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3634 // The source register can be Wn here, but the matcher expects a
3635 // GPR64. Twiddle it here if necessary.
3636 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3638 unsigned Reg = getXRegFromWReg(Op.getReg());
3639 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3640 Op.getEndLoc(), getContext());
3644 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3645 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3646 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3648 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3650 // The source register can be Wn here, but the matcher expects a
3651 // GPR32. Twiddle it here if necessary.
3652 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3654 unsigned Reg = getWRegFromXReg(Op.getReg());
3655 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3656 Op.getEndLoc(), getContext());
3661 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3662 if (NumOperands == 3 && Tok == "fmov") {
3663 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3664 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3665 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3667 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3671 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3672 Op.getEndLoc(), getContext());
3677 // First try to match against the secondary set of tables containing the
3678 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3679 unsigned MatchResult =
3680 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3682 // If that fails, try against the alternate table containing long-form NEON:
3683 // "fadd v0.2s, v1.2s, v2.2s"
3684 if (MatchResult != Match_Success)
3686 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3688 switch (MatchResult) {
3689 case Match_Success: {
3690 // Perform range checking and other semantic validations
3691 SmallVector<SMLoc, 8> OperandLocs;
3692 NumOperands = Operands.size();
3693 for (unsigned i = 1; i < NumOperands; ++i)
3694 OperandLocs.push_back(Operands[i]->getStartLoc());
3695 if (validateInstruction(Inst, OperandLocs))
3699 Out.EmitInstruction(Inst, STI);
3702 case Match_MissingFeature: {
3703 assert(ErrorInfo && "Unknown missing feature!");
3704 // Special case the error message for the very common case where only
3705 // a single subtarget feature is missing (neon, e.g.).
3706 std::string Msg = "instruction requires:";
3708 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3709 if (ErrorInfo & Mask) {
3711 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3715 return Error(IDLoc, Msg);
3717 case Match_MnemonicFail:
3718 return showMatchError(IDLoc, MatchResult);
3719 case Match_InvalidOperand: {
3720 SMLoc ErrorLoc = IDLoc;
3721 if (ErrorInfo != ~0U) {
3722 if (ErrorInfo >= Operands.size())
3723 return Error(IDLoc, "too few operands for instruction");
3725 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3726 if (ErrorLoc == SMLoc())
3729 // If the match failed on a suffix token operand, tweak the diagnostic
3731 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3732 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3733 MatchResult = Match_InvalidSuffix;
3735 return showMatchError(ErrorLoc, MatchResult);
3737 case Match_InvalidMemoryIndexed1:
3738 case Match_InvalidMemoryIndexed2:
3739 case Match_InvalidMemoryIndexed4:
3740 case Match_InvalidMemoryIndexed8:
3741 case Match_InvalidMemoryIndexed16:
3742 case Match_InvalidCondCode:
3743 case Match_AddSubRegExtendSmall:
3744 case Match_AddSubRegExtendLarge:
3745 case Match_AddSubSecondSource:
3746 case Match_LogicalSecondSource:
3747 case Match_AddSubRegShift32:
3748 case Match_AddSubRegShift64:
3749 case Match_InvalidMovImm32Shift:
3750 case Match_InvalidMovImm64Shift:
3751 case Match_InvalidFPImm:
3752 case Match_InvalidMemoryWExtend8:
3753 case Match_InvalidMemoryWExtend16:
3754 case Match_InvalidMemoryWExtend32:
3755 case Match_InvalidMemoryWExtend64:
3756 case Match_InvalidMemoryWExtend128:
3757 case Match_InvalidMemoryXExtend8:
3758 case Match_InvalidMemoryXExtend16:
3759 case Match_InvalidMemoryXExtend32:
3760 case Match_InvalidMemoryXExtend64:
3761 case Match_InvalidMemoryXExtend128:
3762 case Match_InvalidMemoryIndexed4SImm7:
3763 case Match_InvalidMemoryIndexed8SImm7:
3764 case Match_InvalidMemoryIndexed16SImm7:
3765 case Match_InvalidMemoryIndexedSImm9:
3766 case Match_InvalidImm0_7:
3767 case Match_InvalidImm0_15:
3768 case Match_InvalidImm0_31:
3769 case Match_InvalidImm0_63:
3770 case Match_InvalidImm0_127:
3771 case Match_InvalidImm0_65535:
3772 case Match_InvalidImm1_8:
3773 case Match_InvalidImm1_16:
3774 case Match_InvalidImm1_32:
3775 case Match_InvalidImm1_64:
3776 case Match_InvalidIndex1:
3777 case Match_InvalidIndexB:
3778 case Match_InvalidIndexH:
3779 case Match_InvalidIndexS:
3780 case Match_InvalidIndexD:
3781 case Match_InvalidLabel:
3784 if (ErrorInfo >= Operands.size())
3785 return Error(IDLoc, "too few operands for instruction");
3786 // Any time we get here, there's nothing fancy to do. Just get the
3787 // operand SMLoc and display the diagnostic.
3788 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3789 if (ErrorLoc == SMLoc())
3791 return showMatchError(ErrorLoc, MatchResult);
3795 llvm_unreachable("Implement any new match types added!");
3799 /// ParseDirective parses the arm specific directives
3800 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3801 StringRef IDVal = DirectiveID.getIdentifier();
3802 SMLoc Loc = DirectiveID.getLoc();
3803 if (IDVal == ".hword")
3804 return parseDirectiveWord(2, Loc);
3805 if (IDVal == ".word")
3806 return parseDirectiveWord(4, Loc);
3807 if (IDVal == ".xword")
3808 return parseDirectiveWord(8, Loc);
3809 if (IDVal == ".tlsdesccall")
3810 return parseDirectiveTLSDescCall(Loc);
3812 return parseDirectiveLOH(IDVal, Loc);
3815 /// parseDirectiveWord
3816 /// ::= .word [ expression (, expression)* ]
3817 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3818 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3820 const MCExpr *Value;
3821 if (getParser().parseExpression(Value))
3824 getParser().getStreamer().EmitValue(Value, Size);
3826 if (getLexer().is(AsmToken::EndOfStatement))
3829 // FIXME: Improve diagnostic.
3830 if (getLexer().isNot(AsmToken::Comma))
3831 return Error(L, "unexpected token in directive");
3840 // parseDirectiveTLSDescCall:
3841 // ::= .tlsdesccall symbol
3842 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3844 if (getParser().parseIdentifier(Name))
3845 return Error(L, "expected symbol after directive");
3847 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3848 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3849 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3852 Inst.setOpcode(AArch64::TLSDESCCALL);
3853 Inst.addOperand(MCOperand::CreateExpr(Expr));
3855 getParser().getStreamer().EmitInstruction(Inst, STI);
3859 /// ::= .loh <lohName | lohId> label1, ..., labelN
3860 /// The number of arguments depends on the loh identifier.
3861 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3862 if (IDVal != MCLOHDirectiveName())
3865 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3866 if (getParser().getTok().isNot(AsmToken::Integer))
3867 return TokError("expected an identifier or a number in directive");
3868 // We successfully get a numeric value for the identifier.
3869 // Check if it is valid.
3870 int64_t Id = getParser().getTok().getIntVal();
3871 Kind = (MCLOHType)Id;
3872 // Check that Id does not overflow MCLOHType.
3873 if (!isValidMCLOHType(Kind) || Id != Kind)
3874 return TokError("invalid numeric identifier in directive");
3876 StringRef Name = getTok().getIdentifier();
3877 // We successfully parse an identifier.
3878 // Check if it is a recognized one.
3879 int Id = MCLOHNameToId(Name);
3882 return TokError("invalid identifier in directive");
3883 Kind = (MCLOHType)Id;
3885 // Consume the identifier.
3887 // Get the number of arguments of this LOH.
3888 int NbArgs = MCLOHIdToNbArgs(Kind);
3890 assert(NbArgs != -1 && "Invalid number of arguments");
3892 SmallVector<MCSymbol *, 3> Args;
3893 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3895 if (getParser().parseIdentifier(Name))
3896 return TokError("expected identifier in directive");
3897 Args.push_back(getContext().GetOrCreateSymbol(Name));
3899 if (Idx + 1 == NbArgs)
3901 if (getLexer().isNot(AsmToken::Comma))
3902 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3905 if (getLexer().isNot(AsmToken::EndOfStatement))
3906 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3908 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3913 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
3914 AArch64MCExpr::VariantKind &ELFRefKind,
3915 MCSymbolRefExpr::VariantKind &DarwinRefKind,
3917 ELFRefKind = AArch64MCExpr::VK_INVALID;
3918 DarwinRefKind = MCSymbolRefExpr::VK_None;
3921 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
3922 ELFRefKind = AE->getKind();
3923 Expr = AE->getSubExpr();
3926 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3928 // It's a simple symbol reference with no addend.
3929 DarwinRefKind = SE->getKind();
3933 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3937 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3940 DarwinRefKind = SE->getKind();
3942 if (BE->getOpcode() != MCBinaryExpr::Add &&
3943 BE->getOpcode() != MCBinaryExpr::Sub)
3946 // See if the addend is is a constant, otherwise there's more going
3947 // on here than we can deal with.
3948 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
3952 Addend = AddendExpr->getValue();
3953 if (BE->getOpcode() == MCBinaryExpr::Sub)
3956 // It's some symbol reference + a constant addend, but really
3957 // shouldn't use both Darwin and ELF syntax.
3958 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
3959 DarwinRefKind == MCSymbolRefExpr::VK_None;
3962 /// Force static initialization.
3963 extern "C" void LLVMInitializeAArch64AsmParser() {
3964 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
3965 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
3967 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
3968 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
3971 #define GET_REGISTER_MATCHER
3972 #define GET_SUBTARGET_FEATURE_NAME
3973 #define GET_MATCHER_IMPLEMENTATION
3974 #include "AArch64GenAsmMatcher.inc"
3976 // Define this matcher function after the auto-generated include so we
3977 // have the match class enum definitions.
3978 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
3980 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
3981 // If the kind is a token for a literal immediate, check if our asm
3982 // operand matches. This is for InstAliases which have a fixed-value
3983 // immediate in the syntax.
3984 int64_t ExpectedVal;
3987 return Match_InvalidOperand;
4029 return Match_InvalidOperand;
4030 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4032 return Match_InvalidOperand;
4033 if (CE->getValue() == ExpectedVal)
4034 return Match_Success;
4035 return Match_InvalidOperand;