1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 MCAsmParser &getParser() const { return Parser; }
47 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
49 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
51 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
52 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
53 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
54 int tryParseRegister();
55 int tryMatchVectorRegister(StringRef &Kind, bool expected);
56 bool parseRegister(OperandVector &Operands);
57 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
58 bool parseVectorList(OperandVector &Operands);
59 bool parseOperand(OperandVector &Operands, bool isCondCode,
62 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
63 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
64 bool showMatchError(SMLoc Loc, unsigned ErrCode);
66 bool parseDirectiveWord(unsigned Size, SMLoc L);
67 bool parseDirectiveTLSDescCall(SMLoc L);
69 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
71 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
72 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
73 OperandVector &Operands, MCStreamer &Out,
75 bool MatchingInlineAsm) override;
76 /// @name Auto-generated Match Functions
79 #define GET_ASSEMBLER_HEADER
80 #include "AArch64GenAsmMatcher.inc"
84 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
85 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
86 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
87 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
88 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
89 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
90 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
91 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
92 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
93 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
94 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
95 bool tryParseVectorRegister(OperandVector &Operands);
98 enum AArch64MatchResultTy {
99 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
100 #define GET_OPERAND_DIAGNOSTIC_TYPES
101 #include "AArch64GenAsmMatcher.inc"
103 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
104 const MCInstrInfo &MII,
105 const MCTargetOptions &Options)
106 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
107 MCAsmParserExtension::Initialize(_Parser);
109 // Initialize the set of available features.
110 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
113 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
114 SMLoc NameLoc, OperandVector &Operands) override;
115 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
116 bool ParseDirective(AsmToken DirectiveID) override;
117 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
118 unsigned Kind) override;
120 static bool classifySymbolRef(const MCExpr *Expr,
121 AArch64MCExpr::VariantKind &ELFRefKind,
122 MCSymbolRefExpr::VariantKind &DarwinRefKind,
125 } // end anonymous namespace
129 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
131 class AArch64Operand : public MCParsedAsmOperand {
149 SMLoc StartLoc, EndLoc;
154 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
162 struct VectorListOp {
165 unsigned NumElements;
166 unsigned ElementKind;
169 struct VectorIndexOp {
177 struct ShiftedImmOp {
179 unsigned ShiftAmount;
183 AArch64CC::CondCode Code;
187 unsigned Val; // Encoded 8-bit representation.
191 unsigned Val; // Not the enum since not all values have names.
197 uint64_t FeatureBits; // We need to pass through information about which
198 // core we are compiling for so that the SysReg
199 // Mappers can appropriately conditionalize.
210 struct ShiftExtendOp {
211 AArch64_AM::ShiftExtendType Type;
213 bool HasExplicitAmount;
223 struct VectorListOp VectorList;
224 struct VectorIndexOp VectorIndex;
226 struct ShiftedImmOp ShiftedImm;
227 struct CondCodeOp CondCode;
228 struct FPImmOp FPImm;
229 struct BarrierOp Barrier;
230 struct SysRegOp SysReg;
231 struct SysCRImmOp SysCRImm;
232 struct PrefetchOp Prefetch;
233 struct ShiftExtendOp ShiftExtend;
236 // Keep the MCContext around as the MCExprs may need manipulated during
237 // the add<>Operands() calls.
241 AArch64Operand(KindTy K, MCContext &_Ctx)
242 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
244 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
246 StartLoc = o.StartLoc;
256 ShiftedImm = o.ShiftedImm;
259 CondCode = o.CondCode;
271 VectorList = o.VectorList;
274 VectorIndex = o.VectorIndex;
280 SysCRImm = o.SysCRImm;
283 Prefetch = o.Prefetch;
286 ShiftExtend = o.ShiftExtend;
291 /// getStartLoc - Get the location of the first token of this operand.
292 SMLoc getStartLoc() const override { return StartLoc; }
293 /// getEndLoc - Get the location of the last token of this operand.
294 SMLoc getEndLoc() const override { return EndLoc; }
296 StringRef getToken() const {
297 assert(Kind == k_Token && "Invalid access!");
298 return StringRef(Tok.Data, Tok.Length);
301 bool isTokenSuffix() const {
302 assert(Kind == k_Token && "Invalid access!");
306 const MCExpr *getImm() const {
307 assert(Kind == k_Immediate && "Invalid access!");
311 const MCExpr *getShiftedImmVal() const {
312 assert(Kind == k_ShiftedImm && "Invalid access!");
313 return ShiftedImm.Val;
316 unsigned getShiftedImmShift() const {
317 assert(Kind == k_ShiftedImm && "Invalid access!");
318 return ShiftedImm.ShiftAmount;
321 AArch64CC::CondCode getCondCode() const {
322 assert(Kind == k_CondCode && "Invalid access!");
323 return CondCode.Code;
326 unsigned getFPImm() const {
327 assert(Kind == k_FPImm && "Invalid access!");
331 unsigned getBarrier() const {
332 assert(Kind == k_Barrier && "Invalid access!");
336 unsigned getReg() const override {
337 assert(Kind == k_Register && "Invalid access!");
341 unsigned getVectorListStart() const {
342 assert(Kind == k_VectorList && "Invalid access!");
343 return VectorList.RegNum;
346 unsigned getVectorListCount() const {
347 assert(Kind == k_VectorList && "Invalid access!");
348 return VectorList.Count;
351 unsigned getVectorIndex() const {
352 assert(Kind == k_VectorIndex && "Invalid access!");
353 return VectorIndex.Val;
356 StringRef getSysReg() const {
357 assert(Kind == k_SysReg && "Invalid access!");
358 return StringRef(SysReg.Data, SysReg.Length);
361 uint64_t getSysRegFeatureBits() const {
362 assert(Kind == k_SysReg && "Invalid access!");
363 return SysReg.FeatureBits;
366 unsigned getSysCR() const {
367 assert(Kind == k_SysCR && "Invalid access!");
371 unsigned getPrefetch() const {
372 assert(Kind == k_Prefetch && "Invalid access!");
376 AArch64_AM::ShiftExtendType getShiftExtendType() const {
377 assert(Kind == k_ShiftExtend && "Invalid access!");
378 return ShiftExtend.Type;
381 unsigned getShiftExtendAmount() const {
382 assert(Kind == k_ShiftExtend && "Invalid access!");
383 return ShiftExtend.Amount;
386 bool hasShiftExtendAmount() const {
387 assert(Kind == k_ShiftExtend && "Invalid access!");
388 return ShiftExtend.HasExplicitAmount;
391 bool isImm() const override { return Kind == k_Immediate; }
392 bool isMem() const override { return false; }
393 bool isSImm9() const {
396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
399 int64_t Val = MCE->getValue();
400 return (Val >= -256 && Val < 256);
402 bool isSImm7s4() const {
405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
408 int64_t Val = MCE->getValue();
409 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
411 bool isSImm7s8() const {
414 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
417 int64_t Val = MCE->getValue();
418 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
420 bool isSImm7s16() const {
423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 int64_t Val = MCE->getValue();
427 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
430 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
431 AArch64MCExpr::VariantKind ELFRefKind;
432 MCSymbolRefExpr::VariantKind DarwinRefKind;
434 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
436 // If we don't understand the expression, assume the best and
437 // let the fixup and relocation code deal with it.
441 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
442 ELFRefKind == AArch64MCExpr::VK_LO12 ||
443 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
444 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
445 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
446 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
447 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
448 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
449 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
450 // Note that we don't range-check the addend. It's adjusted modulo page
451 // size when converted, so there is no "out of range" condition when using
453 return Addend >= 0 && (Addend % Scale) == 0;
454 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
455 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
456 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
463 template <int Scale> bool isUImm12Offset() const {
467 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
469 return isSymbolicUImm12Offset(getImm(), Scale);
471 int64_t Val = MCE->getValue();
472 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
475 bool isImm0_7() const {
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
481 int64_t Val = MCE->getValue();
482 return (Val >= 0 && Val < 8);
484 bool isImm1_8() const {
487 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
490 int64_t Val = MCE->getValue();
491 return (Val > 0 && Val < 9);
493 bool isImm0_15() const {
496 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
499 int64_t Val = MCE->getValue();
500 return (Val >= 0 && Val < 16);
502 bool isImm1_16() const {
505 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 int64_t Val = MCE->getValue();
509 return (Val > 0 && Val < 17);
511 bool isImm0_31() const {
514 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 int64_t Val = MCE->getValue();
518 return (Val >= 0 && Val < 32);
520 bool isImm1_31() const {
523 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 int64_t Val = MCE->getValue();
527 return (Val >= 1 && Val < 32);
529 bool isImm1_32() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 int64_t Val = MCE->getValue();
536 return (Val >= 1 && Val < 33);
538 bool isImm0_63() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 int64_t Val = MCE->getValue();
545 return (Val >= 0 && Val < 64);
547 bool isImm1_63() const {
550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
553 int64_t Val = MCE->getValue();
554 return (Val >= 1 && Val < 64);
556 bool isImm1_64() const {
559 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
562 int64_t Val = MCE->getValue();
563 return (Val >= 1 && Val < 65);
565 bool isImm0_127() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 int64_t Val = MCE->getValue();
572 return (Val >= 0 && Val < 128);
574 bool isImm0_255() const {
577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 int64_t Val = MCE->getValue();
581 return (Val >= 0 && Val < 256);
583 bool isImm0_65535() const {
586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 int64_t Val = MCE->getValue();
590 return (Val >= 0 && Val < 65536);
592 bool isImm32_63() const {
595 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
598 int64_t Val = MCE->getValue();
599 return (Val >= 32 && Val < 64);
601 bool isLogicalImm32() const {
604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
609 bool isLogicalImm64() const {
612 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
615 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
617 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
618 bool isAddSubImm() const {
619 if (!isShiftedImm() && !isImm())
624 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
625 if (isShiftedImm()) {
626 unsigned Shift = ShiftedImm.ShiftAmount;
627 Expr = ShiftedImm.Val;
628 if (Shift != 0 && Shift != 12)
634 AArch64MCExpr::VariantKind ELFRefKind;
635 MCSymbolRefExpr::VariantKind DarwinRefKind;
637 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
638 DarwinRefKind, Addend)) {
639 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
640 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
641 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
642 || ELFRefKind == AArch64MCExpr::VK_LO12
643 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
644 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
645 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
646 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
647 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
648 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
649 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
652 // Otherwise it should be a real immediate in range:
653 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
654 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
656 bool isCondCode() const { return Kind == k_CondCode; }
657 bool isSIMDImmType10() const {
660 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
663 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
665 bool isBranchTarget26() const {
668 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
671 int64_t Val = MCE->getValue();
674 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
676 bool isPCRelLabel19() const {
679 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
682 int64_t Val = MCE->getValue();
685 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
687 bool isBranchTarget14() const {
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 int64_t Val = MCE->getValue();
696 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
700 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
704 AArch64MCExpr::VariantKind ELFRefKind;
705 MCSymbolRefExpr::VariantKind DarwinRefKind;
707 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
708 DarwinRefKind, Addend)) {
711 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
714 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
715 if (ELFRefKind == AllowedModifiers[i])
722 bool isMovZSymbolG3() const {
723 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
724 return isMovWSymbol(Variants);
727 bool isMovZSymbolG2() const {
728 static AArch64MCExpr::VariantKind Variants[] = {
729 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
730 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
731 return isMovWSymbol(Variants);
734 bool isMovZSymbolG1() const {
735 static AArch64MCExpr::VariantKind Variants[] = {
736 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
737 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
738 AArch64MCExpr::VK_DTPREL_G1,
740 return isMovWSymbol(Variants);
743 bool isMovZSymbolG0() const {
744 static AArch64MCExpr::VariantKind Variants[] = {
745 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
746 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
747 return isMovWSymbol(Variants);
750 bool isMovKSymbolG3() const {
751 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
752 return isMovWSymbol(Variants);
755 bool isMovKSymbolG2() const {
756 static AArch64MCExpr::VariantKind Variants[] = {
757 AArch64MCExpr::VK_ABS_G2_NC};
758 return isMovWSymbol(Variants);
761 bool isMovKSymbolG1() const {
762 static AArch64MCExpr::VariantKind Variants[] = {
763 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
764 AArch64MCExpr::VK_DTPREL_G1_NC
766 return isMovWSymbol(Variants);
769 bool isMovKSymbolG0() const {
770 static AArch64MCExpr::VariantKind Variants[] = {
771 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
772 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
774 return isMovWSymbol(Variants);
777 template<int RegWidth, int Shift>
778 bool isMOVZMovAlias() const {
779 if (!isImm()) return false;
781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782 if (!CE) return false;
783 uint64_t Value = CE->getValue();
786 Value &= 0xffffffffULL;
788 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
789 if (Value == 0 && Shift != 0)
792 return (Value & ~(0xffffULL << Shift)) == 0;
795 template<int RegWidth, int Shift>
796 bool isMOVNMovAlias() const {
797 if (!isImm()) return false;
799 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800 if (!CE) return false;
801 uint64_t Value = CE->getValue();
803 // MOVZ takes precedence over MOVN.
804 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
805 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
810 Value &= 0xffffffffULL;
812 return (Value & ~(0xffffULL << Shift)) == 0;
815 bool isFPImm() const { return Kind == k_FPImm; }
816 bool isBarrier() const { return Kind == k_Barrier; }
817 bool isSysReg() const { return Kind == k_SysReg; }
818 bool isMRSSystemRegister() const {
819 if (!isSysReg()) return false;
821 bool IsKnownRegister;
822 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
823 Mapper.fromString(getSysReg(), IsKnownRegister);
825 return IsKnownRegister;
827 bool isMSRSystemRegister() const {
828 if (!isSysReg()) return false;
830 bool IsKnownRegister;
831 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
832 Mapper.fromString(getSysReg(), IsKnownRegister);
834 return IsKnownRegister;
836 bool isSystemPStateField() const {
837 if (!isSysReg()) return false;
839 bool IsKnownRegister;
840 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
842 return IsKnownRegister;
844 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
845 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
846 bool isVectorRegLo() const {
847 return Kind == k_Register && Reg.isVector &&
848 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
851 bool isGPR32as64() const {
852 return Kind == k_Register && !Reg.isVector &&
853 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
856 bool isGPR64sp0() const {
857 return Kind == k_Register && !Reg.isVector &&
858 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
861 /// Is this a vector list with the type implicit (presumably attached to the
862 /// instruction itself)?
863 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
864 return Kind == k_VectorList && VectorList.Count == NumRegs &&
865 !VectorList.ElementKind;
868 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
869 bool isTypedVectorList() const {
870 if (Kind != k_VectorList)
872 if (VectorList.Count != NumRegs)
874 if (VectorList.ElementKind != ElementKind)
876 return VectorList.NumElements == NumElements;
879 bool isVectorIndex1() const {
880 return Kind == k_VectorIndex && VectorIndex.Val == 1;
882 bool isVectorIndexB() const {
883 return Kind == k_VectorIndex && VectorIndex.Val < 16;
885 bool isVectorIndexH() const {
886 return Kind == k_VectorIndex && VectorIndex.Val < 8;
888 bool isVectorIndexS() const {
889 return Kind == k_VectorIndex && VectorIndex.Val < 4;
891 bool isVectorIndexD() const {
892 return Kind == k_VectorIndex && VectorIndex.Val < 2;
894 bool isToken() const override { return Kind == k_Token; }
895 bool isTokenEqual(StringRef Str) const {
896 return Kind == k_Token && getToken() == Str;
898 bool isSysCR() const { return Kind == k_SysCR; }
899 bool isPrefetch() const { return Kind == k_Prefetch; }
900 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
901 bool isShifter() const {
902 if (!isShiftExtend())
905 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
906 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
907 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
908 ST == AArch64_AM::MSL);
910 bool isExtend() const {
911 if (!isShiftExtend())
914 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
915 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
916 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
917 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
918 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
919 ET == AArch64_AM::LSL) &&
920 getShiftExtendAmount() <= 4;
923 bool isExtend64() const {
926 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
927 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
928 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
930 bool isExtendLSL64() const {
933 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
934 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
935 ET == AArch64_AM::LSL) &&
936 getShiftExtendAmount() <= 4;
939 template<int Width> bool isMemXExtend() const {
942 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
943 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
944 (getShiftExtendAmount() == Log2_32(Width / 8) ||
945 getShiftExtendAmount() == 0);
948 template<int Width> bool isMemWExtend() const {
951 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
953 (getShiftExtendAmount() == Log2_32(Width / 8) ||
954 getShiftExtendAmount() == 0);
957 template <unsigned width>
958 bool isArithmeticShifter() const {
962 // An arithmetic shifter is LSL, LSR, or ASR.
963 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
964 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
965 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
968 template <unsigned width>
969 bool isLogicalShifter() const {
973 // A logical shifter is LSL, LSR, ASR or ROR.
974 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
975 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
976 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
977 getShiftExtendAmount() < width;
980 bool isMovImm32Shifter() const {
984 // A MOVi shifter is LSL of 0, 16, 32, or 48.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 if (ST != AArch64_AM::LSL)
988 uint64_t Val = getShiftExtendAmount();
989 return (Val == 0 || Val == 16);
992 bool isMovImm64Shifter() const {
996 // A MOVi shifter is LSL of 0 or 16.
997 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
998 if (ST != AArch64_AM::LSL)
1000 uint64_t Val = getShiftExtendAmount();
1001 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1004 bool isLogicalVecShifter() const {
1008 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1009 unsigned Shift = getShiftExtendAmount();
1010 return getShiftExtendType() == AArch64_AM::LSL &&
1011 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1014 bool isLogicalVecHalfWordShifter() const {
1015 if (!isLogicalVecShifter())
1018 // A logical vector shifter is a left shift by 0 or 8.
1019 unsigned Shift = getShiftExtendAmount();
1020 return getShiftExtendType() == AArch64_AM::LSL &&
1021 (Shift == 0 || Shift == 8);
1024 bool isMoveVecShifter() const {
1025 if (!isShiftExtend())
1028 // A logical vector shifter is a left shift by 8 or 16.
1029 unsigned Shift = getShiftExtendAmount();
1030 return getShiftExtendType() == AArch64_AM::MSL &&
1031 (Shift == 8 || Shift == 16);
1034 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1035 // to LDUR/STUR when the offset is not legal for the former but is for
1036 // the latter. As such, in addition to checking for being a legal unscaled
1037 // address, also check that it is not a legal scaled address. This avoids
1038 // ambiguity in the matcher.
1040 bool isSImm9OffsetFB() const {
1041 return isSImm9() && !isUImm12Offset<Width / 8>();
1044 bool isAdrpLabel() const {
1045 // Validation was handled during parsing, so we just sanity check that
1046 // something didn't go haywire.
1050 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1051 int64_t Val = CE->getValue();
1052 int64_t Min = - (4096 * (1LL << (21 - 1)));
1053 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1054 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1060 bool isAdrLabel() const {
1061 // Validation was handled during parsing, so we just sanity check that
1062 // something didn't go haywire.
1066 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1067 int64_t Val = CE->getValue();
1068 int64_t Min = - (1LL << (21 - 1));
1069 int64_t Max = ((1LL << (21 - 1)) - 1);
1070 return Val >= Min && Val <= Max;
1076 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1077 // Add as immediates when possible. Null MCExpr = 0.
1079 Inst.addOperand(MCOperand::CreateImm(0));
1080 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1081 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1083 Inst.addOperand(MCOperand::CreateExpr(Expr));
1086 void addRegOperands(MCInst &Inst, unsigned N) const {
1087 assert(N == 1 && "Invalid number of operands!");
1088 Inst.addOperand(MCOperand::CreateReg(getReg()));
1091 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1092 assert(N == 1 && "Invalid number of operands!");
1094 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1096 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1097 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1098 RI->getEncodingValue(getReg()));
1100 Inst.addOperand(MCOperand::CreateReg(Reg));
1103 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1104 assert(N == 1 && "Invalid number of operands!");
1106 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1107 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1110 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1111 assert(N == 1 && "Invalid number of operands!");
1113 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1114 Inst.addOperand(MCOperand::CreateReg(getReg()));
1117 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1118 assert(N == 1 && "Invalid number of operands!");
1119 Inst.addOperand(MCOperand::CreateReg(getReg()));
1122 template <unsigned NumRegs>
1123 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1126 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1127 unsigned FirstReg = FirstRegs[NumRegs - 1];
1130 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1133 template <unsigned NumRegs>
1134 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1135 assert(N == 1 && "Invalid number of operands!");
1136 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1137 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1138 unsigned FirstReg = FirstRegs[NumRegs - 1];
1141 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1144 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1149 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1154 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1159 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1164 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1169 void addImmOperands(MCInst &Inst, unsigned N) const {
1170 assert(N == 1 && "Invalid number of operands!");
1171 // If this is a pageoff symrefexpr with an addend, adjust the addend
1172 // to be only the page-offset portion. Otherwise, just add the expr
1174 addExpr(Inst, getImm());
1177 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 2 && "Invalid number of operands!");
1179 if (isShiftedImm()) {
1180 addExpr(Inst, getShiftedImmVal());
1181 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1183 addExpr(Inst, getImm());
1184 Inst.addOperand(MCOperand::CreateImm(0));
1188 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1193 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1194 assert(N == 1 && "Invalid number of operands!");
1195 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1197 addExpr(Inst, getImm());
1199 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1202 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1203 addImmOperands(Inst, N);
1207 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1208 assert(N == 1 && "Invalid number of operands!");
1209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1212 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1215 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1218 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1219 assert(N == 1 && "Invalid number of operands!");
1220 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1221 assert(MCE && "Invalid constant immediate operand!");
1222 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1225 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1228 assert(MCE && "Invalid constant immediate operand!");
1229 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1232 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1235 assert(MCE && "Invalid constant immediate operand!");
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1239 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1242 assert(MCE && "Invalid constant immediate operand!");
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1246 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1249 assert(MCE && "Invalid constant immediate operand!");
1250 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1253 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1256 assert(MCE && "Invalid constant immediate operand!");
1257 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1260 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1263 assert(MCE && "Invalid constant immediate operand!");
1264 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1267 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1270 assert(MCE && "Invalid constant immediate operand!");
1271 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1274 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1275 assert(N == 1 && "Invalid number of operands!");
1276 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 assert(MCE && "Invalid constant immediate operand!");
1278 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1281 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1284 assert(MCE && "Invalid constant immediate operand!");
1285 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1288 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1291 assert(MCE && "Invalid constant immediate operand!");
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1298 assert(MCE && "Invalid constant immediate operand!");
1299 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1302 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!");
1304 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1305 assert(MCE && "Invalid constant immediate operand!");
1306 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1309 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1312 assert(MCE && "Invalid constant immediate operand!");
1313 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1316 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1319 assert(MCE && "Invalid constant immediate operand!");
1320 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1323 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1324 assert(N == 1 && "Invalid number of operands!");
1325 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1326 assert(MCE && "Invalid constant immediate operand!");
1327 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1330 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1332 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1333 assert(MCE && "Invalid constant immediate operand!");
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1340 assert(MCE && "Invalid constant immediate operand!");
1341 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1344 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1345 assert(N == 1 && "Invalid number of operands!");
1346 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1347 assert(MCE && "Invalid logical immediate operand!");
1348 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1349 Inst.addOperand(MCOperand::CreateImm(encoding));
1352 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355 assert(MCE && "Invalid logical immediate operand!");
1356 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1357 Inst.addOperand(MCOperand::CreateImm(encoding));
1360 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1361 assert(N == 1 && "Invalid number of operands!");
1362 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1363 assert(MCE && "Invalid immediate operand!");
1364 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1365 Inst.addOperand(MCOperand::CreateImm(encoding));
1368 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1369 // Branch operands don't encode the low bits, so shift them off
1370 // here. If it's a label, however, just put it on directly as there's
1371 // not enough information now to do anything.
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1375 addExpr(Inst, getImm());
1378 assert(MCE && "Invalid constant immediate operand!");
1379 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1382 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1383 // Branch operands don't encode the low bits, so shift them off
1384 // here. If it's a label, however, just put it on directly as there's
1385 // not enough information now to do anything.
1386 assert(N == 1 && "Invalid number of operands!");
1387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1389 addExpr(Inst, getImm());
1392 assert(MCE && "Invalid constant immediate operand!");
1393 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1396 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1397 // Branch operands don't encode the low bits, so shift them off
1398 // here. If it's a label, however, just put it on directly as there's
1399 // not enough information now to do anything.
1400 assert(N == 1 && "Invalid number of operands!");
1401 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1403 addExpr(Inst, getImm());
1406 assert(MCE && "Invalid constant immediate operand!");
1407 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1410 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1415 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1420 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1424 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1425 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1427 Inst.addOperand(MCOperand::CreateImm(Bits));
1430 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1434 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1435 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1437 Inst.addOperand(MCOperand::CreateImm(Bits));
1440 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1445 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1447 Inst.addOperand(MCOperand::CreateImm(Bits));
1450 void addSysCROperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1452 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1455 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1457 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1460 void addShifterOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 1 && "Invalid number of operands!");
1463 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1464 Inst.addOperand(MCOperand::CreateImm(Imm));
1467 void addExtendOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1469 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1470 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1471 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1472 Inst.addOperand(MCOperand::CreateImm(Imm));
1475 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1477 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1478 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1479 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1480 Inst.addOperand(MCOperand::CreateImm(Imm));
1483 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 2 && "Invalid number of operands!");
1485 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1486 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1487 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1488 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1491 // For 8-bit load/store instructions with a register offset, both the
1492 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1493 // they're disambiguated by whether the shift was explicit or implicit rather
1495 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1496 assert(N == 2 && "Invalid number of operands!");
1497 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1498 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1499 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1500 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1504 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1505 assert(N == 1 && "Invalid number of operands!");
1507 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1508 uint64_t Value = CE->getValue();
1509 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1513 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1514 assert(N == 1 && "Invalid number of operands!");
1516 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1517 uint64_t Value = CE->getValue();
1518 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1521 void print(raw_ostream &OS) const override;
1523 static std::unique_ptr<AArch64Operand>
1524 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1525 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1526 Op->Tok.Data = Str.data();
1527 Op->Tok.Length = Str.size();
1528 Op->Tok.IsSuffix = IsSuffix;
1534 static std::unique_ptr<AArch64Operand>
1535 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1536 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1537 Op->Reg.RegNum = RegNum;
1538 Op->Reg.isVector = isVector;
1544 static std::unique_ptr<AArch64Operand>
1545 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1546 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1547 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1548 Op->VectorList.RegNum = RegNum;
1549 Op->VectorList.Count = Count;
1550 Op->VectorList.NumElements = NumElements;
1551 Op->VectorList.ElementKind = ElementKind;
1557 static std::unique_ptr<AArch64Operand>
1558 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1559 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1560 Op->VectorIndex.Val = Idx;
1566 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1567 SMLoc E, MCContext &Ctx) {
1568 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1575 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1576 unsigned ShiftAmount,
1579 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1580 Op->ShiftedImm .Val = Val;
1581 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1587 static std::unique_ptr<AArch64Operand>
1588 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1589 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1590 Op->CondCode.Code = Code;
1596 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1598 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1599 Op->FPImm.Val = Val;
1605 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1607 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1608 Op->Barrier.Val = Val;
1614 static std::unique_ptr<AArch64Operand>
1615 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1616 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1617 Op->SysReg.Data = Str.data();
1618 Op->SysReg.Length = Str.size();
1619 Op->SysReg.FeatureBits = FeatureBits;
1625 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1626 SMLoc E, MCContext &Ctx) {
1627 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1628 Op->SysCRImm.Val = Val;
1634 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1636 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1637 Op->Prefetch.Val = Val;
1643 static std::unique_ptr<AArch64Operand>
1644 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1645 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1646 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1647 Op->ShiftExtend.Type = ShOp;
1648 Op->ShiftExtend.Amount = Val;
1649 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1656 } // end anonymous namespace.
1658 void AArch64Operand::print(raw_ostream &OS) const {
1661 OS << "<fpimm " << getFPImm() << "("
1662 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1666 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1668 OS << "<barrier " << Name << ">";
1670 OS << "<barrier invalid #" << getBarrier() << ">";
1674 getImm()->print(OS);
1676 case k_ShiftedImm: {
1677 unsigned Shift = getShiftedImmShift();
1678 OS << "<shiftedimm ";
1679 getShiftedImmVal()->print(OS);
1680 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1684 OS << "<condcode " << getCondCode() << ">";
1687 OS << "<register " << getReg() << ">";
1689 case k_VectorList: {
1690 OS << "<vectorlist ";
1691 unsigned Reg = getVectorListStart();
1692 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1693 OS << Reg + i << " ";
1698 OS << "<vectorindex " << getVectorIndex() << ">";
1701 OS << "<sysreg: " << getSysReg() << '>';
1704 OS << "'" << getToken() << "'";
1707 OS << "c" << getSysCR();
1711 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1713 OS << "<prfop " << Name << ">";
1715 OS << "<prfop invalid #" << getPrefetch() << ">";
1718 case k_ShiftExtend: {
1719 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1720 << getShiftExtendAmount();
1721 if (!hasShiftExtendAmount())
1729 /// @name Auto-generated Match Functions
1732 static unsigned MatchRegisterName(StringRef Name);
1736 static unsigned matchVectorRegName(StringRef Name) {
1737 return StringSwitch<unsigned>(Name)
1738 .Case("v0", AArch64::Q0)
1739 .Case("v1", AArch64::Q1)
1740 .Case("v2", AArch64::Q2)
1741 .Case("v3", AArch64::Q3)
1742 .Case("v4", AArch64::Q4)
1743 .Case("v5", AArch64::Q5)
1744 .Case("v6", AArch64::Q6)
1745 .Case("v7", AArch64::Q7)
1746 .Case("v8", AArch64::Q8)
1747 .Case("v9", AArch64::Q9)
1748 .Case("v10", AArch64::Q10)
1749 .Case("v11", AArch64::Q11)
1750 .Case("v12", AArch64::Q12)
1751 .Case("v13", AArch64::Q13)
1752 .Case("v14", AArch64::Q14)
1753 .Case("v15", AArch64::Q15)
1754 .Case("v16", AArch64::Q16)
1755 .Case("v17", AArch64::Q17)
1756 .Case("v18", AArch64::Q18)
1757 .Case("v19", AArch64::Q19)
1758 .Case("v20", AArch64::Q20)
1759 .Case("v21", AArch64::Q21)
1760 .Case("v22", AArch64::Q22)
1761 .Case("v23", AArch64::Q23)
1762 .Case("v24", AArch64::Q24)
1763 .Case("v25", AArch64::Q25)
1764 .Case("v26", AArch64::Q26)
1765 .Case("v27", AArch64::Q27)
1766 .Case("v28", AArch64::Q28)
1767 .Case("v29", AArch64::Q29)
1768 .Case("v30", AArch64::Q30)
1769 .Case("v31", AArch64::Q31)
1773 static bool isValidVectorKind(StringRef Name) {
1774 return StringSwitch<bool>(Name.lower())
1784 // Accept the width neutral ones, too, for verbose syntax. If those
1785 // aren't used in the right places, the token operand won't match so
1786 // all will work out.
1794 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1795 char &ElementKind) {
1796 assert(isValidVectorKind(Name));
1798 ElementKind = Name.lower()[Name.size() - 1];
1801 if (Name.size() == 2)
1804 // Parse the lane count
1805 Name = Name.drop_front();
1806 while (isdigit(Name.front())) {
1807 NumElements = 10 * NumElements + (Name.front() - '0');
1808 Name = Name.drop_front();
1812 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1814 StartLoc = getLoc();
1815 RegNo = tryParseRegister();
1816 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1817 return (RegNo == (unsigned)-1);
1820 /// tryParseRegister - Try to parse a register name. The token must be an
1821 /// Identifier when called, and if it is a register name the token is eaten and
1822 /// the register is added to the operand list.
1823 int AArch64AsmParser::tryParseRegister() {
1824 const AsmToken &Tok = Parser.getTok();
1825 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1827 std::string lowerCase = Tok.getString().lower();
1828 unsigned RegNum = MatchRegisterName(lowerCase);
1829 // Also handle a few aliases of registers.
1831 RegNum = StringSwitch<unsigned>(lowerCase)
1832 .Case("fp", AArch64::FP)
1833 .Case("lr", AArch64::LR)
1834 .Case("x31", AArch64::XZR)
1835 .Case("w31", AArch64::WZR)
1841 Parser.Lex(); // Eat identifier token.
1845 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1846 /// kind specifier. If it is a register specifier, eat the token and return it.
1847 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1848 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1849 TokError("vector register expected");
1853 StringRef Name = Parser.getTok().getString();
1854 // If there is a kind specifier, it's separated from the register name by
1856 size_t Start = 0, Next = Name.find('.');
1857 StringRef Head = Name.slice(Start, Next);
1858 unsigned RegNum = matchVectorRegName(Head);
1860 if (Next != StringRef::npos) {
1861 Kind = Name.slice(Next, StringRef::npos);
1862 if (!isValidVectorKind(Kind)) {
1863 TokError("invalid vector kind qualifier");
1867 Parser.Lex(); // Eat the register token.
1872 TokError("vector register expected");
1876 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1877 AArch64AsmParser::OperandMatchResultTy
1878 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1881 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1882 Error(S, "Expected cN operand where 0 <= N <= 15");
1883 return MatchOperand_ParseFail;
1886 StringRef Tok = Parser.getTok().getIdentifier();
1887 if (Tok[0] != 'c' && Tok[0] != 'C') {
1888 Error(S, "Expected cN operand where 0 <= N <= 15");
1889 return MatchOperand_ParseFail;
1893 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1894 if (BadNum || CRNum > 15) {
1895 Error(S, "Expected cN operand where 0 <= N <= 15");
1896 return MatchOperand_ParseFail;
1899 Parser.Lex(); // Eat identifier token.
1901 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1902 return MatchOperand_Success;
1905 /// tryParsePrefetch - Try to parse a prefetch operand.
1906 AArch64AsmParser::OperandMatchResultTy
1907 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1909 const AsmToken &Tok = Parser.getTok();
1910 // Either an identifier for named values or a 5-bit immediate.
1911 bool Hash = Tok.is(AsmToken::Hash);
1912 if (Hash || Tok.is(AsmToken::Integer)) {
1914 Parser.Lex(); // Eat hash token.
1915 const MCExpr *ImmVal;
1916 if (getParser().parseExpression(ImmVal))
1917 return MatchOperand_ParseFail;
1919 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1921 TokError("immediate value expected for prefetch operand");
1922 return MatchOperand_ParseFail;
1924 unsigned prfop = MCE->getValue();
1926 TokError("prefetch operand out of range, [0,31] expected");
1927 return MatchOperand_ParseFail;
1930 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1931 return MatchOperand_Success;
1934 if (Tok.isNot(AsmToken::Identifier)) {
1935 TokError("pre-fetch hint expected");
1936 return MatchOperand_ParseFail;
1940 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1942 TokError("pre-fetch hint expected");
1943 return MatchOperand_ParseFail;
1946 Parser.Lex(); // Eat identifier token.
1947 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1948 return MatchOperand_Success;
1951 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1953 AArch64AsmParser::OperandMatchResultTy
1954 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1958 if (Parser.getTok().is(AsmToken::Hash)) {
1959 Parser.Lex(); // Eat hash token.
1962 if (parseSymbolicImmVal(Expr))
1963 return MatchOperand_ParseFail;
1965 AArch64MCExpr::VariantKind ELFRefKind;
1966 MCSymbolRefExpr::VariantKind DarwinRefKind;
1968 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1969 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1970 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1971 // No modifier was specified at all; this is the syntax for an ELF basic
1972 // ADRP relocation (unfortunately).
1974 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1975 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1976 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1978 Error(S, "gotpage label reference not allowed an addend");
1979 return MatchOperand_ParseFail;
1980 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1981 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1982 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1983 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
1984 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
1985 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
1986 // The operand must be an @page or @gotpage qualified symbolref.
1987 Error(S, "page or gotpage label reference expected");
1988 return MatchOperand_ParseFail;
1992 // We have either a label reference possibly with addend or an immediate. The
1993 // addend is a raw value here. The linker will adjust it to only reference the
1995 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1996 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
1998 return MatchOperand_Success;
2001 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2003 AArch64AsmParser::OperandMatchResultTy
2004 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2008 if (Parser.getTok().is(AsmToken::Hash)) {
2009 Parser.Lex(); // Eat hash token.
2012 if (getParser().parseExpression(Expr))
2013 return MatchOperand_ParseFail;
2015 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2016 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2018 return MatchOperand_Success;
2021 /// tryParseFPImm - A floating point immediate expression operand.
2022 AArch64AsmParser::OperandMatchResultTy
2023 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2027 if (Parser.getTok().is(AsmToken::Hash)) {
2028 Parser.Lex(); // Eat '#'
2032 // Handle negation, as that still comes through as a separate token.
2033 bool isNegative = false;
2034 if (Parser.getTok().is(AsmToken::Minus)) {
2038 const AsmToken &Tok = Parser.getTok();
2039 if (Tok.is(AsmToken::Real)) {
2040 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2041 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2042 // If we had a '-' in front, toggle the sign bit.
2043 IntVal ^= (uint64_t)isNegative << 63;
2044 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2045 Parser.Lex(); // Eat the token.
2046 // Check for out of range values. As an exception, we let Zero through,
2047 // as we handle that special case in post-processing before matching in
2048 // order to use the zero register for it.
2049 if (Val == -1 && !RealVal.isZero()) {
2050 TokError("expected compatible register or floating-point constant");
2051 return MatchOperand_ParseFail;
2053 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2054 return MatchOperand_Success;
2056 if (Tok.is(AsmToken::Integer)) {
2058 if (!isNegative && Tok.getString().startswith("0x")) {
2059 Val = Tok.getIntVal();
2060 if (Val > 255 || Val < 0) {
2061 TokError("encoded floating point value out of range");
2062 return MatchOperand_ParseFail;
2065 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2066 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2067 // If we had a '-' in front, toggle the sign bit.
2068 IntVal ^= (uint64_t)isNegative << 63;
2069 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2071 Parser.Lex(); // Eat the token.
2072 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2073 return MatchOperand_Success;
2077 return MatchOperand_NoMatch;
2079 TokError("invalid floating point immediate");
2080 return MatchOperand_ParseFail;
2083 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2084 AArch64AsmParser::OperandMatchResultTy
2085 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2088 if (Parser.getTok().is(AsmToken::Hash))
2089 Parser.Lex(); // Eat '#'
2090 else if (Parser.getTok().isNot(AsmToken::Integer))
2091 // Operand should start from # or should be integer, emit error otherwise.
2092 return MatchOperand_NoMatch;
2095 if (parseSymbolicImmVal(Imm))
2096 return MatchOperand_ParseFail;
2097 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2098 uint64_t ShiftAmount = 0;
2099 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2101 int64_t Val = MCE->getValue();
2102 if (Val > 0xfff && (Val & 0xfff) == 0) {
2103 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2107 SMLoc E = Parser.getTok().getLoc();
2108 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2110 return MatchOperand_Success;
2116 // The optional operand must be "lsl #N" where N is non-negative.
2117 if (!Parser.getTok().is(AsmToken::Identifier) ||
2118 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2119 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2120 return MatchOperand_ParseFail;
2126 if (Parser.getTok().is(AsmToken::Hash)) {
2130 if (Parser.getTok().isNot(AsmToken::Integer)) {
2131 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2132 return MatchOperand_ParseFail;
2135 int64_t ShiftAmount = Parser.getTok().getIntVal();
2137 if (ShiftAmount < 0) {
2138 Error(Parser.getTok().getLoc(), "positive shift amount required");
2139 return MatchOperand_ParseFail;
2141 Parser.Lex(); // Eat the number
2143 SMLoc E = Parser.getTok().getLoc();
2144 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2145 S, E, getContext()));
2146 return MatchOperand_Success;
2149 /// parseCondCodeString - Parse a Condition Code string.
2150 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2151 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2152 .Case("eq", AArch64CC::EQ)
2153 .Case("ne", AArch64CC::NE)
2154 .Case("cs", AArch64CC::HS)
2155 .Case("hs", AArch64CC::HS)
2156 .Case("cc", AArch64CC::LO)
2157 .Case("lo", AArch64CC::LO)
2158 .Case("mi", AArch64CC::MI)
2159 .Case("pl", AArch64CC::PL)
2160 .Case("vs", AArch64CC::VS)
2161 .Case("vc", AArch64CC::VC)
2162 .Case("hi", AArch64CC::HI)
2163 .Case("ls", AArch64CC::LS)
2164 .Case("ge", AArch64CC::GE)
2165 .Case("lt", AArch64CC::LT)
2166 .Case("gt", AArch64CC::GT)
2167 .Case("le", AArch64CC::LE)
2168 .Case("al", AArch64CC::AL)
2169 .Case("nv", AArch64CC::NV)
2170 .Default(AArch64CC::Invalid);
2174 /// parseCondCode - Parse a Condition Code operand.
2175 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2176 bool invertCondCode) {
2178 const AsmToken &Tok = Parser.getTok();
2179 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2181 StringRef Cond = Tok.getString();
2182 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2183 if (CC == AArch64CC::Invalid)
2184 return TokError("invalid condition code");
2185 Parser.Lex(); // Eat identifier token.
2187 if (invertCondCode) {
2188 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2189 return TokError("condition codes AL and NV are invalid for this instruction");
2190 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2194 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2198 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2199 /// them if present.
2200 AArch64AsmParser::OperandMatchResultTy
2201 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2202 const AsmToken &Tok = Parser.getTok();
2203 std::string LowerID = Tok.getString().lower();
2204 AArch64_AM::ShiftExtendType ShOp =
2205 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2206 .Case("lsl", AArch64_AM::LSL)
2207 .Case("lsr", AArch64_AM::LSR)
2208 .Case("asr", AArch64_AM::ASR)
2209 .Case("ror", AArch64_AM::ROR)
2210 .Case("msl", AArch64_AM::MSL)
2211 .Case("uxtb", AArch64_AM::UXTB)
2212 .Case("uxth", AArch64_AM::UXTH)
2213 .Case("uxtw", AArch64_AM::UXTW)
2214 .Case("uxtx", AArch64_AM::UXTX)
2215 .Case("sxtb", AArch64_AM::SXTB)
2216 .Case("sxth", AArch64_AM::SXTH)
2217 .Case("sxtw", AArch64_AM::SXTW)
2218 .Case("sxtx", AArch64_AM::SXTX)
2219 .Default(AArch64_AM::InvalidShiftExtend);
2221 if (ShOp == AArch64_AM::InvalidShiftExtend)
2222 return MatchOperand_NoMatch;
2224 SMLoc S = Tok.getLoc();
2227 bool Hash = getLexer().is(AsmToken::Hash);
2228 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2229 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2230 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2231 ShOp == AArch64_AM::MSL) {
2232 // We expect a number here.
2233 TokError("expected #imm after shift specifier");
2234 return MatchOperand_ParseFail;
2237 // "extend" type operatoins don't need an immediate, #0 is implicit.
2238 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2240 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2241 return MatchOperand_Success;
2245 Parser.Lex(); // Eat the '#'.
2247 // Make sure we do actually have a number
2248 if (!Parser.getTok().is(AsmToken::Integer)) {
2249 Error(Parser.getTok().getLoc(),
2250 "expected integer shift amount");
2251 return MatchOperand_ParseFail;
2254 const MCExpr *ImmVal;
2255 if (getParser().parseExpression(ImmVal))
2256 return MatchOperand_ParseFail;
2258 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2260 TokError("expected #imm after shift specifier");
2261 return MatchOperand_ParseFail;
2264 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2265 Operands.push_back(AArch64Operand::CreateShiftExtend(
2266 ShOp, MCE->getValue(), true, S, E, getContext()));
2267 return MatchOperand_Success;
2270 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2271 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2272 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2273 OperandVector &Operands) {
2274 if (Name.find('.') != StringRef::npos)
2275 return TokError("invalid operand");
2279 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2281 const AsmToken &Tok = Parser.getTok();
2282 StringRef Op = Tok.getString();
2283 SMLoc S = Tok.getLoc();
2285 const MCExpr *Expr = nullptr;
2287 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2289 Expr = MCConstantExpr::Create(op1, getContext()); \
2290 Operands.push_back( \
2291 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2292 Operands.push_back( \
2293 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2294 Operands.push_back( \
2295 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2296 Expr = MCConstantExpr::Create(op2, getContext()); \
2297 Operands.push_back( \
2298 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2301 if (Mnemonic == "ic") {
2302 if (!Op.compare_lower("ialluis")) {
2303 // SYS #0, C7, C1, #0
2304 SYS_ALIAS(0, 7, 1, 0);
2305 } else if (!Op.compare_lower("iallu")) {
2306 // SYS #0, C7, C5, #0
2307 SYS_ALIAS(0, 7, 5, 0);
2308 } else if (!Op.compare_lower("ivau")) {
2309 // SYS #3, C7, C5, #1
2310 SYS_ALIAS(3, 7, 5, 1);
2312 return TokError("invalid operand for IC instruction");
2314 } else if (Mnemonic == "dc") {
2315 if (!Op.compare_lower("zva")) {
2316 // SYS #3, C7, C4, #1
2317 SYS_ALIAS(3, 7, 4, 1);
2318 } else if (!Op.compare_lower("ivac")) {
2319 // SYS #3, C7, C6, #1
2320 SYS_ALIAS(0, 7, 6, 1);
2321 } else if (!Op.compare_lower("isw")) {
2322 // SYS #0, C7, C6, #2
2323 SYS_ALIAS(0, 7, 6, 2);
2324 } else if (!Op.compare_lower("cvac")) {
2325 // SYS #3, C7, C10, #1
2326 SYS_ALIAS(3, 7, 10, 1);
2327 } else if (!Op.compare_lower("csw")) {
2328 // SYS #0, C7, C10, #2
2329 SYS_ALIAS(0, 7, 10, 2);
2330 } else if (!Op.compare_lower("cvau")) {
2331 // SYS #3, C7, C11, #1
2332 SYS_ALIAS(3, 7, 11, 1);
2333 } else if (!Op.compare_lower("civac")) {
2334 // SYS #3, C7, C14, #1
2335 SYS_ALIAS(3, 7, 14, 1);
2336 } else if (!Op.compare_lower("cisw")) {
2337 // SYS #0, C7, C14, #2
2338 SYS_ALIAS(0, 7, 14, 2);
2340 return TokError("invalid operand for DC instruction");
2342 } else if (Mnemonic == "at") {
2343 if (!Op.compare_lower("s1e1r")) {
2344 // SYS #0, C7, C8, #0
2345 SYS_ALIAS(0, 7, 8, 0);
2346 } else if (!Op.compare_lower("s1e2r")) {
2347 // SYS #4, C7, C8, #0
2348 SYS_ALIAS(4, 7, 8, 0);
2349 } else if (!Op.compare_lower("s1e3r")) {
2350 // SYS #6, C7, C8, #0
2351 SYS_ALIAS(6, 7, 8, 0);
2352 } else if (!Op.compare_lower("s1e1w")) {
2353 // SYS #0, C7, C8, #1
2354 SYS_ALIAS(0, 7, 8, 1);
2355 } else if (!Op.compare_lower("s1e2w")) {
2356 // SYS #4, C7, C8, #1
2357 SYS_ALIAS(4, 7, 8, 1);
2358 } else if (!Op.compare_lower("s1e3w")) {
2359 // SYS #6, C7, C8, #1
2360 SYS_ALIAS(6, 7, 8, 1);
2361 } else if (!Op.compare_lower("s1e0r")) {
2362 // SYS #0, C7, C8, #3
2363 SYS_ALIAS(0, 7, 8, 2);
2364 } else if (!Op.compare_lower("s1e0w")) {
2365 // SYS #0, C7, C8, #3
2366 SYS_ALIAS(0, 7, 8, 3);
2367 } else if (!Op.compare_lower("s12e1r")) {
2368 // SYS #4, C7, C8, #4
2369 SYS_ALIAS(4, 7, 8, 4);
2370 } else if (!Op.compare_lower("s12e1w")) {
2371 // SYS #4, C7, C8, #5
2372 SYS_ALIAS(4, 7, 8, 5);
2373 } else if (!Op.compare_lower("s12e0r")) {
2374 // SYS #4, C7, C8, #6
2375 SYS_ALIAS(4, 7, 8, 6);
2376 } else if (!Op.compare_lower("s12e0w")) {
2377 // SYS #4, C7, C8, #7
2378 SYS_ALIAS(4, 7, 8, 7);
2380 return TokError("invalid operand for AT instruction");
2382 } else if (Mnemonic == "tlbi") {
2383 if (!Op.compare_lower("vmalle1is")) {
2384 // SYS #0, C8, C3, #0
2385 SYS_ALIAS(0, 8, 3, 0);
2386 } else if (!Op.compare_lower("alle2is")) {
2387 // SYS #4, C8, C3, #0
2388 SYS_ALIAS(4, 8, 3, 0);
2389 } else if (!Op.compare_lower("alle3is")) {
2390 // SYS #6, C8, C3, #0
2391 SYS_ALIAS(6, 8, 3, 0);
2392 } else if (!Op.compare_lower("vae1is")) {
2393 // SYS #0, C8, C3, #1
2394 SYS_ALIAS(0, 8, 3, 1);
2395 } else if (!Op.compare_lower("vae2is")) {
2396 // SYS #4, C8, C3, #1
2397 SYS_ALIAS(4, 8, 3, 1);
2398 } else if (!Op.compare_lower("vae3is")) {
2399 // SYS #6, C8, C3, #1
2400 SYS_ALIAS(6, 8, 3, 1);
2401 } else if (!Op.compare_lower("aside1is")) {
2402 // SYS #0, C8, C3, #2
2403 SYS_ALIAS(0, 8, 3, 2);
2404 } else if (!Op.compare_lower("vaae1is")) {
2405 // SYS #0, C8, C3, #3
2406 SYS_ALIAS(0, 8, 3, 3);
2407 } else if (!Op.compare_lower("alle1is")) {
2408 // SYS #4, C8, C3, #4
2409 SYS_ALIAS(4, 8, 3, 4);
2410 } else if (!Op.compare_lower("vale1is")) {
2411 // SYS #0, C8, C3, #5
2412 SYS_ALIAS(0, 8, 3, 5);
2413 } else if (!Op.compare_lower("vaale1is")) {
2414 // SYS #0, C8, C3, #7
2415 SYS_ALIAS(0, 8, 3, 7);
2416 } else if (!Op.compare_lower("vmalle1")) {
2417 // SYS #0, C8, C7, #0
2418 SYS_ALIAS(0, 8, 7, 0);
2419 } else if (!Op.compare_lower("alle2")) {
2420 // SYS #4, C8, C7, #0
2421 SYS_ALIAS(4, 8, 7, 0);
2422 } else if (!Op.compare_lower("vale2is")) {
2423 // SYS #4, C8, C3, #5
2424 SYS_ALIAS(4, 8, 3, 5);
2425 } else if (!Op.compare_lower("vale3is")) {
2426 // SYS #6, C8, C3, #5
2427 SYS_ALIAS(6, 8, 3, 5);
2428 } else if (!Op.compare_lower("alle3")) {
2429 // SYS #6, C8, C7, #0
2430 SYS_ALIAS(6, 8, 7, 0);
2431 } else if (!Op.compare_lower("vae1")) {
2432 // SYS #0, C8, C7, #1
2433 SYS_ALIAS(0, 8, 7, 1);
2434 } else if (!Op.compare_lower("vae2")) {
2435 // SYS #4, C8, C7, #1
2436 SYS_ALIAS(4, 8, 7, 1);
2437 } else if (!Op.compare_lower("vae3")) {
2438 // SYS #6, C8, C7, #1
2439 SYS_ALIAS(6, 8, 7, 1);
2440 } else if (!Op.compare_lower("aside1")) {
2441 // SYS #0, C8, C7, #2
2442 SYS_ALIAS(0, 8, 7, 2);
2443 } else if (!Op.compare_lower("vaae1")) {
2444 // SYS #0, C8, C7, #3
2445 SYS_ALIAS(0, 8, 7, 3);
2446 } else if (!Op.compare_lower("alle1")) {
2447 // SYS #4, C8, C7, #4
2448 SYS_ALIAS(4, 8, 7, 4);
2449 } else if (!Op.compare_lower("vale1")) {
2450 // SYS #0, C8, C7, #5
2451 SYS_ALIAS(0, 8, 7, 5);
2452 } else if (!Op.compare_lower("vale2")) {
2453 // SYS #4, C8, C7, #5
2454 SYS_ALIAS(4, 8, 7, 5);
2455 } else if (!Op.compare_lower("vale3")) {
2456 // SYS #6, C8, C7, #5
2457 SYS_ALIAS(6, 8, 7, 5);
2458 } else if (!Op.compare_lower("vaale1")) {
2459 // SYS #0, C8, C7, #7
2460 SYS_ALIAS(0, 8, 7, 7);
2461 } else if (!Op.compare_lower("ipas2e1")) {
2462 // SYS #4, C8, C4, #1
2463 SYS_ALIAS(4, 8, 4, 1);
2464 } else if (!Op.compare_lower("ipas2le1")) {
2465 // SYS #4, C8, C4, #5
2466 SYS_ALIAS(4, 8, 4, 5);
2467 } else if (!Op.compare_lower("ipas2e1is")) {
2468 // SYS #4, C8, C4, #1
2469 SYS_ALIAS(4, 8, 0, 1);
2470 } else if (!Op.compare_lower("ipas2le1is")) {
2471 // SYS #4, C8, C4, #5
2472 SYS_ALIAS(4, 8, 0, 5);
2473 } else if (!Op.compare_lower("vmalls12e1")) {
2474 // SYS #4, C8, C7, #6
2475 SYS_ALIAS(4, 8, 7, 6);
2476 } else if (!Op.compare_lower("vmalls12e1is")) {
2477 // SYS #4, C8, C3, #6
2478 SYS_ALIAS(4, 8, 3, 6);
2480 return TokError("invalid operand for TLBI instruction");
2486 Parser.Lex(); // Eat operand.
2488 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2489 bool HasRegister = false;
2491 // Check for the optional register operand.
2492 if (getLexer().is(AsmToken::Comma)) {
2493 Parser.Lex(); // Eat comma.
2495 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2496 return TokError("expected register operand");
2501 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2502 Parser.eatToEndOfStatement();
2503 return TokError("unexpected token in argument list");
2506 if (ExpectRegister && !HasRegister) {
2507 return TokError("specified " + Mnemonic + " op requires a register");
2509 else if (!ExpectRegister && HasRegister) {
2510 return TokError("specified " + Mnemonic + " op does not use a register");
2513 Parser.Lex(); // Consume the EndOfStatement
2517 AArch64AsmParser::OperandMatchResultTy
2518 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2519 const AsmToken &Tok = Parser.getTok();
2521 // Can be either a #imm style literal or an option name
2522 bool Hash = Tok.is(AsmToken::Hash);
2523 if (Hash || Tok.is(AsmToken::Integer)) {
2524 // Immediate operand.
2526 Parser.Lex(); // Eat the '#'
2527 const MCExpr *ImmVal;
2528 SMLoc ExprLoc = getLoc();
2529 if (getParser().parseExpression(ImmVal))
2530 return MatchOperand_ParseFail;
2531 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2533 Error(ExprLoc, "immediate value expected for barrier operand");
2534 return MatchOperand_ParseFail;
2536 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2537 Error(ExprLoc, "barrier operand out of range");
2538 return MatchOperand_ParseFail;
2541 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2542 return MatchOperand_Success;
2545 if (Tok.isNot(AsmToken::Identifier)) {
2546 TokError("invalid operand for instruction");
2547 return MatchOperand_ParseFail;
2551 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2553 TokError("invalid barrier option name");
2554 return MatchOperand_ParseFail;
2557 // The only valid named option for ISB is 'sy'
2558 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2559 TokError("'sy' or #imm operand expected");
2560 return MatchOperand_ParseFail;
2564 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2565 Parser.Lex(); // Consume the option
2567 return MatchOperand_Success;
2570 AArch64AsmParser::OperandMatchResultTy
2571 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2572 const AsmToken &Tok = Parser.getTok();
2574 if (Tok.isNot(AsmToken::Identifier))
2575 return MatchOperand_NoMatch;
2577 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2578 STI.getFeatureBits(), getContext()));
2579 Parser.Lex(); // Eat identifier
2581 return MatchOperand_Success;
2584 /// tryParseVectorRegister - Parse a vector register operand.
2585 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2586 if (Parser.getTok().isNot(AsmToken::Identifier))
2590 // Check for a vector register specifier first.
2592 int64_t Reg = tryMatchVectorRegister(Kind, false);
2596 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2597 // If there was an explicit qualifier, that goes on as a literal text
2601 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2603 // If there is an index specifier following the register, parse that too.
2604 if (Parser.getTok().is(AsmToken::LBrac)) {
2605 SMLoc SIdx = getLoc();
2606 Parser.Lex(); // Eat left bracket token.
2608 const MCExpr *ImmVal;
2609 if (getParser().parseExpression(ImmVal))
2611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2613 TokError("immediate value expected for vector index");
2618 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2619 Error(E, "']' expected");
2623 Parser.Lex(); // Eat right bracket token.
2625 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2632 /// parseRegister - Parse a non-vector register operand.
2633 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2635 // Try for a vector register.
2636 if (!tryParseVectorRegister(Operands))
2639 // Try for a scalar register.
2640 int64_t Reg = tryParseRegister();
2644 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2646 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2647 // as a string token in the instruction itself.
2648 if (getLexer().getKind() == AsmToken::LBrac) {
2649 SMLoc LBracS = getLoc();
2651 const AsmToken &Tok = Parser.getTok();
2652 if (Tok.is(AsmToken::Integer)) {
2653 SMLoc IntS = getLoc();
2654 int64_t Val = Tok.getIntVal();
2657 if (getLexer().getKind() == AsmToken::RBrac) {
2658 SMLoc RBracS = getLoc();
2661 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2663 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2665 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2675 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2676 bool HasELFModifier = false;
2677 AArch64MCExpr::VariantKind RefKind;
2679 if (Parser.getTok().is(AsmToken::Colon)) {
2680 Parser.Lex(); // Eat ':"
2681 HasELFModifier = true;
2683 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2684 Error(Parser.getTok().getLoc(),
2685 "expect relocation specifier in operand after ':'");
2689 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2690 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2691 .Case("lo12", AArch64MCExpr::VK_LO12)
2692 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2693 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2694 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2695 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2696 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2697 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2698 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2699 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2700 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2701 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2702 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2703 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2704 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2705 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2706 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2707 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2708 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2709 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2710 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2711 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2712 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2713 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2714 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2715 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2716 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2717 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2718 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2719 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2720 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2721 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2722 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2723 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2724 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2725 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2726 .Default(AArch64MCExpr::VK_INVALID);
2728 if (RefKind == AArch64MCExpr::VK_INVALID) {
2729 Error(Parser.getTok().getLoc(),
2730 "expect relocation specifier in operand after ':'");
2734 Parser.Lex(); // Eat identifier
2736 if (Parser.getTok().isNot(AsmToken::Colon)) {
2737 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2740 Parser.Lex(); // Eat ':'
2743 if (getParser().parseExpression(ImmVal))
2747 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2752 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2753 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2754 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2756 Parser.Lex(); // Eat left bracket token.
2758 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2761 int64_t PrevReg = FirstReg;
2764 if (Parser.getTok().is(AsmToken::Minus)) {
2765 Parser.Lex(); // Eat the minus.
2767 SMLoc Loc = getLoc();
2769 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2772 // Any Kind suffices must match on all regs in the list.
2773 if (Kind != NextKind)
2774 return Error(Loc, "mismatched register size suffix");
2776 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2778 if (Space == 0 || Space > 3) {
2779 return Error(Loc, "invalid number of vectors");
2785 while (Parser.getTok().is(AsmToken::Comma)) {
2786 Parser.Lex(); // Eat the comma token.
2788 SMLoc Loc = getLoc();
2790 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2793 // Any Kind suffices must match on all regs in the list.
2794 if (Kind != NextKind)
2795 return Error(Loc, "mismatched register size suffix");
2797 // Registers must be incremental (with wraparound at 31)
2798 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2799 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2800 return Error(Loc, "registers must be sequential");
2807 if (Parser.getTok().isNot(AsmToken::RCurly))
2808 return Error(getLoc(), "'}' expected");
2809 Parser.Lex(); // Eat the '}' token.
2812 return Error(S, "invalid number of vectors");
2814 unsigned NumElements = 0;
2815 char ElementKind = 0;
2817 parseValidVectorKind(Kind, NumElements, ElementKind);
2819 Operands.push_back(AArch64Operand::CreateVectorList(
2820 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2822 // If there is an index specifier following the list, parse that too.
2823 if (Parser.getTok().is(AsmToken::LBrac)) {
2824 SMLoc SIdx = getLoc();
2825 Parser.Lex(); // Eat left bracket token.
2827 const MCExpr *ImmVal;
2828 if (getParser().parseExpression(ImmVal))
2830 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2832 TokError("immediate value expected for vector index");
2837 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2838 Error(E, "']' expected");
2842 Parser.Lex(); // Eat right bracket token.
2844 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2850 AArch64AsmParser::OperandMatchResultTy
2851 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2852 const AsmToken &Tok = Parser.getTok();
2853 if (!Tok.is(AsmToken::Identifier))
2854 return MatchOperand_NoMatch;
2856 unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2858 MCContext &Ctx = getContext();
2859 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2860 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2861 return MatchOperand_NoMatch;
2864 Parser.Lex(); // Eat register
2866 if (Parser.getTok().isNot(AsmToken::Comma)) {
2868 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2869 return MatchOperand_Success;
2871 Parser.Lex(); // Eat comma.
2873 if (Parser.getTok().is(AsmToken::Hash))
2874 Parser.Lex(); // Eat hash
2876 if (Parser.getTok().isNot(AsmToken::Integer)) {
2877 Error(getLoc(), "index must be absent or #0");
2878 return MatchOperand_ParseFail;
2881 const MCExpr *ImmVal;
2882 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2883 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2884 Error(getLoc(), "index must be absent or #0");
2885 return MatchOperand_ParseFail;
2889 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2890 return MatchOperand_Success;
2893 /// parseOperand - Parse a arm instruction operand. For now this parses the
2894 /// operand regardless of the mnemonic.
2895 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2896 bool invertCondCode) {
2897 // Check if the current operand has a custom associated parser, if so, try to
2898 // custom parse the operand, or fallback to the general approach.
2899 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2900 if (ResTy == MatchOperand_Success)
2902 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2903 // there was a match, but an error occurred, in which case, just return that
2904 // the operand parsing failed.
2905 if (ResTy == MatchOperand_ParseFail)
2908 // Nothing custom, so do general case parsing.
2910 switch (getLexer().getKind()) {
2914 if (parseSymbolicImmVal(Expr))
2915 return Error(S, "invalid operand");
2917 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2918 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2921 case AsmToken::LBrac: {
2922 SMLoc Loc = Parser.getTok().getLoc();
2923 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2925 Parser.Lex(); // Eat '['
2927 // There's no comma after a '[', so we can parse the next operand
2929 return parseOperand(Operands, false, false);
2931 case AsmToken::LCurly:
2932 return parseVectorList(Operands);
2933 case AsmToken::Identifier: {
2934 // If we're expecting a Condition Code operand, then just parse that.
2936 return parseCondCode(Operands, invertCondCode);
2938 // If it's a register name, parse it.
2939 if (!parseRegister(Operands))
2942 // This could be an optional "shift" or "extend" operand.
2943 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2944 // We can only continue if no tokens were eaten.
2945 if (GotShift != MatchOperand_NoMatch)
2948 // This was not a register so parse other operands that start with an
2949 // identifier (like labels) as expressions and create them as immediates.
2950 const MCExpr *IdVal;
2952 if (getParser().parseExpression(IdVal))
2955 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2956 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2959 case AsmToken::Integer:
2960 case AsmToken::Real:
2961 case AsmToken::Hash: {
2962 // #42 -> immediate.
2964 if (getLexer().is(AsmToken::Hash))
2967 // Parse a negative sign
2968 bool isNegative = false;
2969 if (Parser.getTok().is(AsmToken::Minus)) {
2971 // We need to consume this token only when we have a Real, otherwise
2972 // we let parseSymbolicImmVal take care of it
2973 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2977 // The only Real that should come through here is a literal #0.0 for
2978 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2979 // so convert the value.
2980 const AsmToken &Tok = Parser.getTok();
2981 if (Tok.is(AsmToken::Real)) {
2982 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2983 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2984 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2985 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2986 Mnemonic != "fcmlt")
2987 return TokError("unexpected floating point literal");
2988 else if (IntVal != 0 || isNegative)
2989 return TokError("expected floating-point constant #0.0");
2990 Parser.Lex(); // Eat the token.
2993 AArch64Operand::CreateToken("#0", false, S, getContext()));
2995 AArch64Operand::CreateToken(".0", false, S, getContext()));
2999 const MCExpr *ImmVal;
3000 if (parseSymbolicImmVal(ImmVal))
3003 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3004 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3010 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3012 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3013 StringRef Name, SMLoc NameLoc,
3014 OperandVector &Operands) {
3015 Name = StringSwitch<StringRef>(Name.lower())
3016 .Case("beq", "b.eq")
3017 .Case("bne", "b.ne")
3018 .Case("bhs", "b.hs")
3019 .Case("bcs", "b.cs")
3020 .Case("blo", "b.lo")
3021 .Case("bcc", "b.cc")
3022 .Case("bmi", "b.mi")
3023 .Case("bpl", "b.pl")
3024 .Case("bvs", "b.vs")
3025 .Case("bvc", "b.vc")
3026 .Case("bhi", "b.hi")
3027 .Case("bls", "b.ls")
3028 .Case("bge", "b.ge")
3029 .Case("blt", "b.lt")
3030 .Case("bgt", "b.gt")
3031 .Case("ble", "b.le")
3032 .Case("bal", "b.al")
3033 .Case("bnv", "b.nv")
3036 // Create the leading tokens for the mnemonic, split by '.' characters.
3037 size_t Start = 0, Next = Name.find('.');
3038 StringRef Head = Name.slice(Start, Next);
3040 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3041 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3042 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3043 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3044 Parser.eatToEndOfStatement();
3049 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3052 // Handle condition codes for a branch mnemonic
3053 if (Head == "b" && Next != StringRef::npos) {
3055 Next = Name.find('.', Start + 1);
3056 Head = Name.slice(Start + 1, Next);
3058 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3059 (Head.data() - Name.data()));
3060 AArch64CC::CondCode CC = parseCondCodeString(Head);
3061 if (CC == AArch64CC::Invalid)
3062 return Error(SuffixLoc, "invalid condition code");
3064 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3066 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3069 // Add the remaining tokens in the mnemonic.
3070 while (Next != StringRef::npos) {
3072 Next = Name.find('.', Start + 1);
3073 Head = Name.slice(Start, Next);
3074 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3075 (Head.data() - Name.data()) + 1);
3077 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3080 // Conditional compare instructions have a Condition Code operand, which needs
3081 // to be parsed and an immediate operand created.
3082 bool condCodeFourthOperand =
3083 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3084 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3085 Head == "csinc" || Head == "csinv" || Head == "csneg");
3087 // These instructions are aliases to some of the conditional select
3088 // instructions. However, the condition code is inverted in the aliased
3091 // FIXME: Is this the correct way to handle these? Or should the parser
3092 // generate the aliased instructions directly?
3093 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3094 bool condCodeThirdOperand =
3095 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3097 // Read the remaining operands.
3098 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3099 // Read the first operand.
3100 if (parseOperand(Operands, false, false)) {
3101 Parser.eatToEndOfStatement();
3106 while (getLexer().is(AsmToken::Comma)) {
3107 Parser.Lex(); // Eat the comma.
3109 // Parse and remember the operand.
3110 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3111 (N == 3 && condCodeThirdOperand) ||
3112 (N == 2 && condCodeSecondOperand),
3113 condCodeSecondOperand || condCodeThirdOperand)) {
3114 Parser.eatToEndOfStatement();
3118 // After successfully parsing some operands there are two special cases to
3119 // consider (i.e. notional operands not separated by commas). Both are due
3120 // to memory specifiers:
3121 // + An RBrac will end an address for load/store/prefetch
3122 // + An '!' will indicate a pre-indexed operation.
3124 // It's someone else's responsibility to make sure these tokens are sane
3125 // in the given context!
3126 if (Parser.getTok().is(AsmToken::RBrac)) {
3127 SMLoc Loc = Parser.getTok().getLoc();
3128 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3133 if (Parser.getTok().is(AsmToken::Exclaim)) {
3134 SMLoc Loc = Parser.getTok().getLoc();
3135 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3144 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3145 SMLoc Loc = Parser.getTok().getLoc();
3146 Parser.eatToEndOfStatement();
3147 return Error(Loc, "unexpected token in argument list");
3150 Parser.Lex(); // Consume the EndOfStatement
3154 // FIXME: This entire function is a giant hack to provide us with decent
3155 // operand range validation/diagnostics until TableGen/MC can be extended
3156 // to support autogeneration of this kind of validation.
3157 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3158 SmallVectorImpl<SMLoc> &Loc) {
3159 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3160 // Check for indexed addressing modes w/ the base register being the
3161 // same as a destination/source register or pair load where
3162 // the Rt == Rt2. All of those are undefined behaviour.
3163 switch (Inst.getOpcode()) {
3164 case AArch64::LDPSWpre:
3165 case AArch64::LDPWpost:
3166 case AArch64::LDPWpre:
3167 case AArch64::LDPXpost:
3168 case AArch64::LDPXpre: {
3169 unsigned Rt = Inst.getOperand(1).getReg();
3170 unsigned Rt2 = Inst.getOperand(2).getReg();
3171 unsigned Rn = Inst.getOperand(3).getReg();
3172 if (RI->isSubRegisterEq(Rn, Rt))
3173 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3174 "is also a destination");
3175 if (RI->isSubRegisterEq(Rn, Rt2))
3176 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3177 "is also a destination");
3180 case AArch64::LDPDi:
3181 case AArch64::LDPQi:
3182 case AArch64::LDPSi:
3183 case AArch64::LDPSWi:
3184 case AArch64::LDPWi:
3185 case AArch64::LDPXi: {
3186 unsigned Rt = Inst.getOperand(0).getReg();
3187 unsigned Rt2 = Inst.getOperand(1).getReg();
3189 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3192 case AArch64::LDPDpost:
3193 case AArch64::LDPDpre:
3194 case AArch64::LDPQpost:
3195 case AArch64::LDPQpre:
3196 case AArch64::LDPSpost:
3197 case AArch64::LDPSpre:
3198 case AArch64::LDPSWpost: {
3199 unsigned Rt = Inst.getOperand(1).getReg();
3200 unsigned Rt2 = Inst.getOperand(2).getReg();
3202 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3205 case AArch64::STPDpost:
3206 case AArch64::STPDpre:
3207 case AArch64::STPQpost:
3208 case AArch64::STPQpre:
3209 case AArch64::STPSpost:
3210 case AArch64::STPSpre:
3211 case AArch64::STPWpost:
3212 case AArch64::STPWpre:
3213 case AArch64::STPXpost:
3214 case AArch64::STPXpre: {
3215 unsigned Rt = Inst.getOperand(1).getReg();
3216 unsigned Rt2 = Inst.getOperand(2).getReg();
3217 unsigned Rn = Inst.getOperand(3).getReg();
3218 if (RI->isSubRegisterEq(Rn, Rt))
3219 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3220 "is also a source");
3221 if (RI->isSubRegisterEq(Rn, Rt2))
3222 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3223 "is also a source");
3226 case AArch64::LDRBBpre:
3227 case AArch64::LDRBpre:
3228 case AArch64::LDRHHpre:
3229 case AArch64::LDRHpre:
3230 case AArch64::LDRSBWpre:
3231 case AArch64::LDRSBXpre:
3232 case AArch64::LDRSHWpre:
3233 case AArch64::LDRSHXpre:
3234 case AArch64::LDRSWpre:
3235 case AArch64::LDRWpre:
3236 case AArch64::LDRXpre:
3237 case AArch64::LDRBBpost:
3238 case AArch64::LDRBpost:
3239 case AArch64::LDRHHpost:
3240 case AArch64::LDRHpost:
3241 case AArch64::LDRSBWpost:
3242 case AArch64::LDRSBXpost:
3243 case AArch64::LDRSHWpost:
3244 case AArch64::LDRSHXpost:
3245 case AArch64::LDRSWpost:
3246 case AArch64::LDRWpost:
3247 case AArch64::LDRXpost: {
3248 unsigned Rt = Inst.getOperand(1).getReg();
3249 unsigned Rn = Inst.getOperand(2).getReg();
3250 if (RI->isSubRegisterEq(Rn, Rt))
3251 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3252 "is also a source");
3255 case AArch64::STRBBpost:
3256 case AArch64::STRBpost:
3257 case AArch64::STRHHpost:
3258 case AArch64::STRHpost:
3259 case AArch64::STRWpost:
3260 case AArch64::STRXpost:
3261 case AArch64::STRBBpre:
3262 case AArch64::STRBpre:
3263 case AArch64::STRHHpre:
3264 case AArch64::STRHpre:
3265 case AArch64::STRWpre:
3266 case AArch64::STRXpre: {
3267 unsigned Rt = Inst.getOperand(1).getReg();
3268 unsigned Rn = Inst.getOperand(2).getReg();
3269 if (RI->isSubRegisterEq(Rn, Rt))
3270 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3271 "is also a source");
3276 // Now check immediate ranges. Separate from the above as there is overlap
3277 // in the instructions being checked and this keeps the nested conditionals
3279 switch (Inst.getOpcode()) {
3280 case AArch64::ADDSWri:
3281 case AArch64::ADDSXri:
3282 case AArch64::ADDWri:
3283 case AArch64::ADDXri:
3284 case AArch64::SUBSWri:
3285 case AArch64::SUBSXri:
3286 case AArch64::SUBWri:
3287 case AArch64::SUBXri: {
3288 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3289 // some slight duplication here.
3290 if (Inst.getOperand(2).isExpr()) {
3291 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3292 AArch64MCExpr::VariantKind ELFRefKind;
3293 MCSymbolRefExpr::VariantKind DarwinRefKind;
3295 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3296 return Error(Loc[2], "invalid immediate expression");
3299 // Only allow these with ADDXri.
3300 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3301 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3302 Inst.getOpcode() == AArch64::ADDXri)
3305 // Only allow these with ADDXri/ADDWri
3306 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3307 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3308 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3309 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3310 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3311 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3312 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3313 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3314 (Inst.getOpcode() == AArch64::ADDXri ||
3315 Inst.getOpcode() == AArch64::ADDWri))
3318 // Don't allow expressions in the immediate field otherwise
3319 return Error(Loc[2], "invalid immediate expression");
3328 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3330 case Match_MissingFeature:
3332 "instruction requires a CPU feature not currently enabled");
3333 case Match_InvalidOperand:
3334 return Error(Loc, "invalid operand for instruction");
3335 case Match_InvalidSuffix:
3336 return Error(Loc, "invalid type suffix for instruction");
3337 case Match_InvalidCondCode:
3338 return Error(Loc, "expected AArch64 condition code");
3339 case Match_AddSubRegExtendSmall:
3341 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3342 case Match_AddSubRegExtendLarge:
3344 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3345 case Match_AddSubSecondSource:
3347 "expected compatible register, symbol or integer in range [0, 4095]");
3348 case Match_LogicalSecondSource:
3349 return Error(Loc, "expected compatible register or logical immediate");
3350 case Match_InvalidMovImm32Shift:
3351 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3352 case Match_InvalidMovImm64Shift:
3353 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3354 case Match_AddSubRegShift32:
3356 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3357 case Match_AddSubRegShift64:
3359 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3360 case Match_InvalidFPImm:
3362 "expected compatible register or floating-point constant");
3363 case Match_InvalidMemoryIndexedSImm9:
3364 return Error(Loc, "index must be an integer in range [-256, 255].");
3365 case Match_InvalidMemoryIndexed4SImm7:
3366 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3367 case Match_InvalidMemoryIndexed8SImm7:
3368 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3369 case Match_InvalidMemoryIndexed16SImm7:
3370 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3371 case Match_InvalidMemoryWExtend8:
3373 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3374 case Match_InvalidMemoryWExtend16:
3376 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3377 case Match_InvalidMemoryWExtend32:
3379 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3380 case Match_InvalidMemoryWExtend64:
3382 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3383 case Match_InvalidMemoryWExtend128:
3385 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3386 case Match_InvalidMemoryXExtend8:
3388 "expected 'lsl' or 'sxtx' with optional shift of #0");
3389 case Match_InvalidMemoryXExtend16:
3391 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3392 case Match_InvalidMemoryXExtend32:
3394 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3395 case Match_InvalidMemoryXExtend64:
3397 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3398 case Match_InvalidMemoryXExtend128:
3400 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3401 case Match_InvalidMemoryIndexed1:
3402 return Error(Loc, "index must be an integer in range [0, 4095].");
3403 case Match_InvalidMemoryIndexed2:
3404 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3405 case Match_InvalidMemoryIndexed4:
3406 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3407 case Match_InvalidMemoryIndexed8:
3408 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3409 case Match_InvalidMemoryIndexed16:
3410 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3411 case Match_InvalidImm0_7:
3412 return Error(Loc, "immediate must be an integer in range [0, 7].");
3413 case Match_InvalidImm0_15:
3414 return Error(Loc, "immediate must be an integer in range [0, 15].");
3415 case Match_InvalidImm0_31:
3416 return Error(Loc, "immediate must be an integer in range [0, 31].");
3417 case Match_InvalidImm0_63:
3418 return Error(Loc, "immediate must be an integer in range [0, 63].");
3419 case Match_InvalidImm0_127:
3420 return Error(Loc, "immediate must be an integer in range [0, 127].");
3421 case Match_InvalidImm0_65535:
3422 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3423 case Match_InvalidImm1_8:
3424 return Error(Loc, "immediate must be an integer in range [1, 8].");
3425 case Match_InvalidImm1_16:
3426 return Error(Loc, "immediate must be an integer in range [1, 16].");
3427 case Match_InvalidImm1_32:
3428 return Error(Loc, "immediate must be an integer in range [1, 32].");
3429 case Match_InvalidImm1_64:
3430 return Error(Loc, "immediate must be an integer in range [1, 64].");
3431 case Match_InvalidIndex1:
3432 return Error(Loc, "expected lane specifier '[1]'");
3433 case Match_InvalidIndexB:
3434 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3435 case Match_InvalidIndexH:
3436 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3437 case Match_InvalidIndexS:
3438 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3439 case Match_InvalidIndexD:
3440 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3441 case Match_InvalidLabel:
3442 return Error(Loc, "expected label or encodable integer pc offset");
3444 return Error(Loc, "expected readable system register");
3446 return Error(Loc, "expected writable system register or pstate");
3447 case Match_MnemonicFail:
3448 return Error(Loc, "unrecognized instruction mnemonic");
3450 llvm_unreachable("unexpected error code!");
3454 static const char *getSubtargetFeatureName(unsigned Val);
3456 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3457 OperandVector &Operands,
3459 unsigned &ErrorInfo,
3460 bool MatchingInlineAsm) {
3461 assert(!Operands.empty() && "Unexpect empty operand list!");
3462 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3463 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3465 StringRef Tok = Op.getToken();
3466 unsigned NumOperands = Operands.size();
3468 if (NumOperands == 4 && Tok == "lsl") {
3469 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3470 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3471 if (Op2.isReg() && Op3.isImm()) {
3472 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3474 uint64_t Op3Val = Op3CE->getValue();
3475 uint64_t NewOp3Val = 0;
3476 uint64_t NewOp4Val = 0;
3477 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3479 NewOp3Val = (32 - Op3Val) & 0x1f;
3480 NewOp4Val = 31 - Op3Val;
3482 NewOp3Val = (64 - Op3Val) & 0x3f;
3483 NewOp4Val = 63 - Op3Val;
3486 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3487 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3489 Operands[0] = AArch64Operand::CreateToken(
3490 "ubfm", false, Op.getStartLoc(), getContext());
3491 Operands.push_back(AArch64Operand::CreateImm(
3492 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3493 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3494 Op3.getEndLoc(), getContext());
3497 } else if (NumOperands == 5) {
3498 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3499 // UBFIZ -> UBFM aliases.
3500 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3501 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3502 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3503 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3505 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3506 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3507 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3509 if (Op3CE && Op4CE) {
3510 uint64_t Op3Val = Op3CE->getValue();
3511 uint64_t Op4Val = Op4CE->getValue();
3513 uint64_t RegWidth = 0;
3514 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3520 if (Op3Val >= RegWidth)
3521 return Error(Op3.getStartLoc(),
3522 "expected integer in range [0, 31]");
3523 if (Op4Val < 1 || Op4Val > RegWidth)
3524 return Error(Op4.getStartLoc(),
3525 "expected integer in range [1, 32]");
3527 uint64_t NewOp3Val = 0;
3528 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3530 NewOp3Val = (32 - Op3Val) & 0x1f;
3532 NewOp3Val = (64 - Op3Val) & 0x3f;
3534 uint64_t NewOp4Val = Op4Val - 1;
3536 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3537 return Error(Op4.getStartLoc(),
3538 "requested insert overflows register");
3540 const MCExpr *NewOp3 =
3541 MCConstantExpr::Create(NewOp3Val, getContext());
3542 const MCExpr *NewOp4 =
3543 MCConstantExpr::Create(NewOp4Val, getContext());
3544 Operands[3] = AArch64Operand::CreateImm(
3545 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3546 Operands[4] = AArch64Operand::CreateImm(
3547 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3549 Operands[0] = AArch64Operand::CreateToken(
3550 "bfm", false, Op.getStartLoc(), getContext());
3551 else if (Tok == "sbfiz")
3552 Operands[0] = AArch64Operand::CreateToken(
3553 "sbfm", false, Op.getStartLoc(), getContext());
3554 else if (Tok == "ubfiz")
3555 Operands[0] = AArch64Operand::CreateToken(
3556 "ubfm", false, Op.getStartLoc(), getContext());
3558 llvm_unreachable("No valid mnemonic for alias?");
3562 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3563 // UBFX -> UBFM aliases.
3564 } else if (NumOperands == 5 &&
3565 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3566 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3567 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3568 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3570 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3571 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3572 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3574 if (Op3CE && Op4CE) {
3575 uint64_t Op3Val = Op3CE->getValue();
3576 uint64_t Op4Val = Op4CE->getValue();
3578 uint64_t RegWidth = 0;
3579 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3585 if (Op3Val >= RegWidth)
3586 return Error(Op3.getStartLoc(),
3587 "expected integer in range [0, 31]");
3588 if (Op4Val < 1 || Op4Val > RegWidth)
3589 return Error(Op4.getStartLoc(),
3590 "expected integer in range [1, 32]");
3592 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3594 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3595 return Error(Op4.getStartLoc(),
3596 "requested extract overflows register");
3598 const MCExpr *NewOp4 =
3599 MCConstantExpr::Create(NewOp4Val, getContext());
3600 Operands[4] = AArch64Operand::CreateImm(
3601 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3603 Operands[0] = AArch64Operand::CreateToken(
3604 "bfm", false, Op.getStartLoc(), getContext());
3605 else if (Tok == "sbfx")
3606 Operands[0] = AArch64Operand::CreateToken(
3607 "sbfm", false, Op.getStartLoc(), getContext());
3608 else if (Tok == "ubfx")
3609 Operands[0] = AArch64Operand::CreateToken(
3610 "ubfm", false, Op.getStartLoc(), getContext());
3612 llvm_unreachable("No valid mnemonic for alias?");
3617 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3618 // InstAlias can't quite handle this since the reg classes aren't
3620 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3621 // The source register can be Wn here, but the matcher expects a
3622 // GPR64. Twiddle it here if necessary.
3623 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3625 unsigned Reg = getXRegFromWReg(Op.getReg());
3626 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3627 Op.getEndLoc(), getContext());
3630 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3631 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3632 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3634 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3636 // The source register can be Wn here, but the matcher expects a
3637 // GPR64. Twiddle it here if necessary.
3638 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3640 unsigned Reg = getXRegFromWReg(Op.getReg());
3641 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3642 Op.getEndLoc(), getContext());
3646 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3647 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3648 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3650 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3652 // The source register can be Wn here, but the matcher expects a
3653 // GPR32. Twiddle it here if necessary.
3654 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3656 unsigned Reg = getWRegFromXReg(Op.getReg());
3657 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3658 Op.getEndLoc(), getContext());
3663 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3664 if (NumOperands == 3 && Tok == "fmov") {
3665 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3666 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3667 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3669 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3673 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3674 Op.getEndLoc(), getContext());
3679 // First try to match against the secondary set of tables containing the
3680 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3681 unsigned MatchResult =
3682 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3684 // If that fails, try against the alternate table containing long-form NEON:
3685 // "fadd v0.2s, v1.2s, v2.2s"
3686 if (MatchResult != Match_Success)
3688 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3690 switch (MatchResult) {
3691 case Match_Success: {
3692 // Perform range checking and other semantic validations
3693 SmallVector<SMLoc, 8> OperandLocs;
3694 NumOperands = Operands.size();
3695 for (unsigned i = 1; i < NumOperands; ++i)
3696 OperandLocs.push_back(Operands[i]->getStartLoc());
3697 if (validateInstruction(Inst, OperandLocs))
3701 Out.EmitInstruction(Inst, STI);
3704 case Match_MissingFeature: {
3705 assert(ErrorInfo && "Unknown missing feature!");
3706 // Special case the error message for the very common case where only
3707 // a single subtarget feature is missing (neon, e.g.).
3708 std::string Msg = "instruction requires:";
3710 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3711 if (ErrorInfo & Mask) {
3713 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3717 return Error(IDLoc, Msg);
3719 case Match_MnemonicFail:
3720 return showMatchError(IDLoc, MatchResult);
3721 case Match_InvalidOperand: {
3722 SMLoc ErrorLoc = IDLoc;
3723 if (ErrorInfo != ~0U) {
3724 if (ErrorInfo >= Operands.size())
3725 return Error(IDLoc, "too few operands for instruction");
3727 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3728 if (ErrorLoc == SMLoc())
3731 // If the match failed on a suffix token operand, tweak the diagnostic
3733 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3734 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3735 MatchResult = Match_InvalidSuffix;
3737 return showMatchError(ErrorLoc, MatchResult);
3739 case Match_InvalidMemoryIndexed1:
3740 case Match_InvalidMemoryIndexed2:
3741 case Match_InvalidMemoryIndexed4:
3742 case Match_InvalidMemoryIndexed8:
3743 case Match_InvalidMemoryIndexed16:
3744 case Match_InvalidCondCode:
3745 case Match_AddSubRegExtendSmall:
3746 case Match_AddSubRegExtendLarge:
3747 case Match_AddSubSecondSource:
3748 case Match_LogicalSecondSource:
3749 case Match_AddSubRegShift32:
3750 case Match_AddSubRegShift64:
3751 case Match_InvalidMovImm32Shift:
3752 case Match_InvalidMovImm64Shift:
3753 case Match_InvalidFPImm:
3754 case Match_InvalidMemoryWExtend8:
3755 case Match_InvalidMemoryWExtend16:
3756 case Match_InvalidMemoryWExtend32:
3757 case Match_InvalidMemoryWExtend64:
3758 case Match_InvalidMemoryWExtend128:
3759 case Match_InvalidMemoryXExtend8:
3760 case Match_InvalidMemoryXExtend16:
3761 case Match_InvalidMemoryXExtend32:
3762 case Match_InvalidMemoryXExtend64:
3763 case Match_InvalidMemoryXExtend128:
3764 case Match_InvalidMemoryIndexed4SImm7:
3765 case Match_InvalidMemoryIndexed8SImm7:
3766 case Match_InvalidMemoryIndexed16SImm7:
3767 case Match_InvalidMemoryIndexedSImm9:
3768 case Match_InvalidImm0_7:
3769 case Match_InvalidImm0_15:
3770 case Match_InvalidImm0_31:
3771 case Match_InvalidImm0_63:
3772 case Match_InvalidImm0_127:
3773 case Match_InvalidImm0_65535:
3774 case Match_InvalidImm1_8:
3775 case Match_InvalidImm1_16:
3776 case Match_InvalidImm1_32:
3777 case Match_InvalidImm1_64:
3778 case Match_InvalidIndex1:
3779 case Match_InvalidIndexB:
3780 case Match_InvalidIndexH:
3781 case Match_InvalidIndexS:
3782 case Match_InvalidIndexD:
3783 case Match_InvalidLabel:
3786 if (ErrorInfo >= Operands.size())
3787 return Error(IDLoc, "too few operands for instruction");
3788 // Any time we get here, there's nothing fancy to do. Just get the
3789 // operand SMLoc and display the diagnostic.
3790 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3791 if (ErrorLoc == SMLoc())
3793 return showMatchError(ErrorLoc, MatchResult);
3797 llvm_unreachable("Implement any new match types added!");
3801 /// ParseDirective parses the arm specific directives
3802 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3803 StringRef IDVal = DirectiveID.getIdentifier();
3804 SMLoc Loc = DirectiveID.getLoc();
3805 if (IDVal == ".hword")
3806 return parseDirectiveWord(2, Loc);
3807 if (IDVal == ".word")
3808 return parseDirectiveWord(4, Loc);
3809 if (IDVal == ".xword")
3810 return parseDirectiveWord(8, Loc);
3811 if (IDVal == ".tlsdesccall")
3812 return parseDirectiveTLSDescCall(Loc);
3814 return parseDirectiveLOH(IDVal, Loc);
3817 /// parseDirectiveWord
3818 /// ::= .word [ expression (, expression)* ]
3819 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3820 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3822 const MCExpr *Value;
3823 if (getParser().parseExpression(Value))
3826 getParser().getStreamer().EmitValue(Value, Size);
3828 if (getLexer().is(AsmToken::EndOfStatement))
3831 // FIXME: Improve diagnostic.
3832 if (getLexer().isNot(AsmToken::Comma))
3833 return Error(L, "unexpected token in directive");
3842 // parseDirectiveTLSDescCall:
3843 // ::= .tlsdesccall symbol
3844 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3846 if (getParser().parseIdentifier(Name))
3847 return Error(L, "expected symbol after directive");
3849 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3850 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3851 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3854 Inst.setOpcode(AArch64::TLSDESCCALL);
3855 Inst.addOperand(MCOperand::CreateExpr(Expr));
3857 getParser().getStreamer().EmitInstruction(Inst, STI);
3861 /// ::= .loh <lohName | lohId> label1, ..., labelN
3862 /// The number of arguments depends on the loh identifier.
3863 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3864 if (IDVal != MCLOHDirectiveName())
3867 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3868 if (getParser().getTok().isNot(AsmToken::Integer))
3869 return TokError("expected an identifier or a number in directive");
3870 // We successfully get a numeric value for the identifier.
3871 // Check if it is valid.
3872 int64_t Id = getParser().getTok().getIntVal();
3873 Kind = (MCLOHType)Id;
3874 // Check that Id does not overflow MCLOHType.
3875 if (!isValidMCLOHType(Kind) || Id != Kind)
3876 return TokError("invalid numeric identifier in directive");
3878 StringRef Name = getTok().getIdentifier();
3879 // We successfully parse an identifier.
3880 // Check if it is a recognized one.
3881 int Id = MCLOHNameToId(Name);
3884 return TokError("invalid identifier in directive");
3885 Kind = (MCLOHType)Id;
3887 // Consume the identifier.
3889 // Get the number of arguments of this LOH.
3890 int NbArgs = MCLOHIdToNbArgs(Kind);
3892 assert(NbArgs != -1 && "Invalid number of arguments");
3894 SmallVector<MCSymbol *, 3> Args;
3895 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3897 if (getParser().parseIdentifier(Name))
3898 return TokError("expected identifier in directive");
3899 Args.push_back(getContext().GetOrCreateSymbol(Name));
3901 if (Idx + 1 == NbArgs)
3903 if (getLexer().isNot(AsmToken::Comma))
3904 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3907 if (getLexer().isNot(AsmToken::EndOfStatement))
3908 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3910 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3915 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
3916 AArch64MCExpr::VariantKind &ELFRefKind,
3917 MCSymbolRefExpr::VariantKind &DarwinRefKind,
3919 ELFRefKind = AArch64MCExpr::VK_INVALID;
3920 DarwinRefKind = MCSymbolRefExpr::VK_None;
3923 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
3924 ELFRefKind = AE->getKind();
3925 Expr = AE->getSubExpr();
3928 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3930 // It's a simple symbol reference with no addend.
3931 DarwinRefKind = SE->getKind();
3935 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3939 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3942 DarwinRefKind = SE->getKind();
3944 if (BE->getOpcode() != MCBinaryExpr::Add &&
3945 BE->getOpcode() != MCBinaryExpr::Sub)
3948 // See if the addend is is a constant, otherwise there's more going
3949 // on here than we can deal with.
3950 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
3954 Addend = AddendExpr->getValue();
3955 if (BE->getOpcode() == MCBinaryExpr::Sub)
3958 // It's some symbol reference + a constant addend, but really
3959 // shouldn't use both Darwin and ELF syntax.
3960 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
3961 DarwinRefKind == MCSymbolRefExpr::VK_None;
3964 /// Force static initialization.
3965 extern "C" void LLVMInitializeAArch64AsmParser() {
3966 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
3967 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
3969 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
3970 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
3973 #define GET_REGISTER_MATCHER
3974 #define GET_SUBTARGET_FEATURE_NAME
3975 #define GET_MATCHER_IMPLEMENTATION
3976 #include "AArch64GenAsmMatcher.inc"
3978 // Define this matcher function after the auto-generated include so we
3979 // have the match class enum definitions.
3980 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
3982 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
3983 // If the kind is a token for a literal immediate, check if our asm
3984 // operand matches. This is for InstAliases which have a fixed-value
3985 // immediate in the syntax.
3986 int64_t ExpectedVal;
3989 return Match_InvalidOperand;
4031 return Match_InvalidOperand;
4032 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4034 return Match_InvalidOperand;
4035 if (CE->getValue() == ExpectedVal)
4036 return Match_Success;
4037 return Match_InvalidOperand;