1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII)
108 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
109 MCAsmParserExtension::Initialize(_Parser);
112 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
113 SMLoc NameLoc, OperandVector &Operands);
114 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
115 virtual bool ParseDirective(AsmToken DirectiveID);
116 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
118 static bool classifySymbolRef(const MCExpr *Expr,
119 ARM64MCExpr::VariantKind &ELFRefKind,
120 MCSymbolRefExpr::VariantKind &DarwinRefKind,
121 const MCConstantExpr *&Addend);
123 } // end anonymous namespace
127 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
129 class ARM64Operand : public MCParsedAsmOperand {
132 ImmediateOffset, // pre-indexed, no writeback
133 RegisterOffset // register offset, with optional extend
153 SMLoc StartLoc, EndLoc, OffsetLoc;
158 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
166 struct VectorListOp {
169 unsigned NumElements;
170 unsigned ElementKind;
173 struct VectorIndexOp {
182 unsigned Val; // Encoded 8-bit representation.
186 unsigned Val; // Not the enum since not all values have names.
210 // This is for all forms of ARM64 address expressions
212 unsigned BaseRegNum, OffsetRegNum;
213 ARM64_AM::ExtendType ExtType;
216 const MCExpr *OffsetImm;
223 struct VectorListOp VectorList;
224 struct VectorIndexOp VectorIndex;
226 struct FPImmOp FPImm;
227 struct BarrierOp Barrier;
228 struct SysRegOp SysReg;
229 struct SysCRImmOp SysCRImm;
230 struct PrefetchOp Prefetch;
231 struct ShifterOp Shifter;
232 struct ExtendOp Extend;
236 // Keep the MCContext around as the MCExprs may need manipulated during
237 // the add<>Operands() calls.
240 ARM64Operand(KindTy K, MCContext &_Ctx)
241 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
244 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
246 StartLoc = o.StartLoc;
265 VectorList = o.VectorList;
268 VectorIndex = o.VectorIndex;
274 SysCRImm = o.SysCRImm;
277 Prefetch = o.Prefetch;
291 /// getStartLoc - Get the location of the first token of this operand.
292 SMLoc getStartLoc() const { return StartLoc; }
293 /// getEndLoc - Get the location of the last token of this operand.
294 SMLoc getEndLoc() const { return EndLoc; }
295 /// getOffsetLoc - Get the location of the offset of this memory operand.
296 SMLoc getOffsetLoc() const { return OffsetLoc; }
298 StringRef getToken() const {
299 assert(Kind == k_Token && "Invalid access!");
300 return StringRef(Tok.Data, Tok.Length);
303 bool isTokenSuffix() const {
304 assert(Kind == k_Token && "Invalid access!");
308 const MCExpr *getImm() const {
309 assert(Kind == k_Immediate && "Invalid access!");
313 unsigned getFPImm() const {
314 assert(Kind == k_FPImm && "Invalid access!");
318 unsigned getBarrier() const {
319 assert(Kind == k_Barrier && "Invalid access!");
323 unsigned getReg() const {
324 assert(Kind == k_Register && "Invalid access!");
328 unsigned getVectorListStart() const {
329 assert(Kind == k_VectorList && "Invalid access!");
330 return VectorList.RegNum;
333 unsigned getVectorListCount() const {
334 assert(Kind == k_VectorList && "Invalid access!");
335 return VectorList.Count;
338 unsigned getVectorIndex() const {
339 assert(Kind == k_VectorIndex && "Invalid access!");
340 return VectorIndex.Val;
343 StringRef getSysReg() const {
344 assert(Kind == k_SysReg && "Invalid access!");
345 return StringRef(SysReg.Data, SysReg.Length);
348 unsigned getSysCR() const {
349 assert(Kind == k_SysCR && "Invalid access!");
353 unsigned getPrefetch() const {
354 assert(Kind == k_Prefetch && "Invalid access!");
358 unsigned getShifter() const {
359 assert(Kind == k_Shifter && "Invalid access!");
363 unsigned getExtend() const {
364 assert(Kind == k_Extend && "Invalid access!");
368 bool isImm() const { return Kind == k_Immediate; }
369 bool isSImm9() const {
372 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
375 int64_t Val = MCE->getValue();
376 return (Val >= -256 && Val < 256);
378 bool isSImm7s4() const {
381 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
384 int64_t Val = MCE->getValue();
385 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
387 bool isSImm7s8() const {
390 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
393 int64_t Val = MCE->getValue();
394 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
396 bool isSImm7s16() const {
399 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
402 int64_t Val = MCE->getValue();
403 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
405 bool isImm0_7() const {
408 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
411 int64_t Val = MCE->getValue();
412 return (Val >= 0 && Val < 8);
414 bool isImm1_8() const {
417 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
420 int64_t Val = MCE->getValue();
421 return (Val > 0 && Val < 9);
423 bool isImm0_15() const {
426 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
429 int64_t Val = MCE->getValue();
430 return (Val >= 0 && Val < 16);
432 bool isImm1_16() const {
435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
438 int64_t Val = MCE->getValue();
439 return (Val > 0 && Val < 17);
441 bool isImm0_31() const {
444 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447 int64_t Val = MCE->getValue();
448 return (Val >= 0 && Val < 32);
450 bool isImm1_31() const {
453 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
456 int64_t Val = MCE->getValue();
457 return (Val >= 1 && Val < 32);
459 bool isImm1_32() const {
462 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465 int64_t Val = MCE->getValue();
466 return (Val >= 1 && Val < 33);
468 bool isImm0_63() const {
471 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474 int64_t Val = MCE->getValue();
475 return (Val >= 0 && Val < 64);
477 bool isImm1_63() const {
480 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
483 int64_t Val = MCE->getValue();
484 return (Val >= 1 && Val < 64);
486 bool isImm1_64() const {
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 int64_t Val = MCE->getValue();
493 return (Val >= 1 && Val < 65);
495 bool isImm0_127() const {
498 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
501 int64_t Val = MCE->getValue();
502 return (Val >= 0 && Val < 128);
504 bool isImm0_255() const {
507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
510 int64_t Val = MCE->getValue();
511 return (Val >= 0 && Val < 256);
513 bool isImm0_65535() const {
516 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
519 int64_t Val = MCE->getValue();
520 return (Val >= 0 && Val < 65536);
522 bool isLogicalImm32() const {
525 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
528 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
530 bool isLogicalImm64() const {
533 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
536 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
538 bool isSIMDImmType10() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
546 bool isBranchTarget26() const {
549 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
552 int64_t Val = MCE->getValue();
555 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
557 bool isBranchTarget19() const {
560 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563 int64_t Val = MCE->getValue();
566 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
568 bool isBranchTarget14() const {
571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 int64_t Val = MCE->getValue();
577 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
580 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
584 ARM64MCExpr::VariantKind ELFRefKind;
585 MCSymbolRefExpr::VariantKind DarwinRefKind;
586 const MCConstantExpr *Addend;
587 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
591 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
594 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
595 if (ELFRefKind == AllowedModifiers[i])
602 bool isMovZSymbolG3() const {
603 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
604 return isMovWSymbol(Variants);
607 bool isMovZSymbolG2() const {
608 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
609 ARM64MCExpr::VK_TPREL_G2,
610 ARM64MCExpr::VK_DTPREL_G2 };
611 return isMovWSymbol(Variants);
614 bool isMovZSymbolG1() const {
615 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
616 ARM64MCExpr::VK_GOTTPREL_G1,
617 ARM64MCExpr::VK_TPREL_G1,
618 ARM64MCExpr::VK_DTPREL_G1, };
619 return isMovWSymbol(Variants);
622 bool isMovZSymbolG0() const {
623 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
624 ARM64MCExpr::VK_TPREL_G0,
625 ARM64MCExpr::VK_DTPREL_G0 };
626 return isMovWSymbol(Variants);
629 bool isMovKSymbolG2() const {
630 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
631 return isMovWSymbol(Variants);
634 bool isMovKSymbolG1() const {
635 static ARM64MCExpr::VariantKind Variants[] = {
636 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
637 ARM64MCExpr::VK_DTPREL_G1_NC
639 return isMovWSymbol(Variants);
642 bool isMovKSymbolG0() const {
643 static ARM64MCExpr::VariantKind Variants[] = {
644 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
645 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
647 return isMovWSymbol(Variants);
650 bool isFPImm() const { return Kind == k_FPImm; }
651 bool isBarrier() const { return Kind == k_Barrier; }
652 bool isSysReg() const { return Kind == k_SysReg; }
653 bool isMRSSystemRegister() const {
654 if (!isSysReg()) return false;
656 bool IsKnownRegister;
657 ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister);
659 return IsKnownRegister;
661 bool isMSRSystemRegister() const {
662 if (!isSysReg()) return false;
664 bool IsKnownRegister;
665 ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister);
667 return IsKnownRegister;
669 bool isSystemCPSRField() const {
670 if (!isSysReg()) return false;
672 bool IsKnownRegister;
673 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
675 return IsKnownRegister;
677 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
678 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
680 /// Is this a vector list with the type implicit (presumably attached to the
681 /// instruction itself)?
682 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
683 return Kind == k_VectorList && VectorList.Count == NumRegs &&
684 !VectorList.ElementKind;
687 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
688 bool isTypedVectorList() const {
689 if (Kind != k_VectorList)
691 if (VectorList.Count != NumRegs)
693 if (VectorList.ElementKind != ElementKind)
695 return VectorList.NumElements == NumElements;
698 bool isVectorIndexB() const {
699 return Kind == k_VectorIndex && VectorIndex.Val < 16;
701 bool isVectorIndexH() const {
702 return Kind == k_VectorIndex && VectorIndex.Val < 8;
704 bool isVectorIndexS() const {
705 return Kind == k_VectorIndex && VectorIndex.Val < 4;
707 bool isVectorIndexD() const {
708 return Kind == k_VectorIndex && VectorIndex.Val < 2;
710 bool isToken() const { return Kind == k_Token; }
711 bool isTokenEqual(StringRef Str) const {
712 return Kind == k_Token && getToken() == Str;
714 bool isMem() const { return Kind == k_Memory; }
715 bool isSysCR() const { return Kind == k_SysCR; }
716 bool isPrefetch() const { return Kind == k_Prefetch; }
717 bool isShifter() const { return Kind == k_Shifter; }
718 bool isExtend() const {
719 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
721 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
722 return ST == ARM64_AM::LSL;
724 return Kind == k_Extend;
726 bool isExtend64() const {
727 if (Kind != k_Extend)
729 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
730 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
731 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
733 bool isExtendLSL64() const {
734 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
736 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
737 return ST == ARM64_AM::LSL;
739 if (Kind != k_Extend)
741 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
742 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
745 bool isArithmeticShifter() const {
749 // An arithmetic shifter is LSL, LSR, or ASR.
750 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
751 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
754 bool isMovImm32Shifter() const {
758 // A MOVi shifter is LSL of 0, 16, 32, or 48.
759 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
760 if (ST != ARM64_AM::LSL)
762 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
763 return (Val == 0 || Val == 16);
766 bool isMovImm64Shifter() const {
770 // A MOVi shifter is LSL of 0 or 16.
771 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
772 if (ST != ARM64_AM::LSL)
774 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
775 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
778 bool isAddSubShifter() const {
782 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
783 unsigned Val = Shifter.Val;
784 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
785 (ARM64_AM::getShiftValue(Val) == 0 ||
786 ARM64_AM::getShiftValue(Val) == 12);
789 bool isLogicalVecShifter() const {
793 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
794 unsigned Val = Shifter.Val;
795 unsigned Shift = ARM64_AM::getShiftValue(Val);
796 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
797 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
800 bool isLogicalVecHalfWordShifter() const {
801 if (!isLogicalVecShifter())
804 // A logical vector shifter is a left shift by 0 or 8.
805 unsigned Val = Shifter.Val;
806 unsigned Shift = ARM64_AM::getShiftValue(Val);
807 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
808 (Shift == 0 || Shift == 8);
811 bool isMoveVecShifter() const {
815 // A logical vector shifter is a left shift by 8 or 16.
816 unsigned Val = Shifter.Val;
817 unsigned Shift = ARM64_AM::getShiftValue(Val);
818 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
819 (Shift == 8 || Shift == 16);
822 bool isMemoryRegisterOffset8() const {
823 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
826 bool isMemoryRegisterOffset16() const {
827 return isMem() && Mem.Mode == RegisterOffset &&
828 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
831 bool isMemoryRegisterOffset32() const {
832 return isMem() && Mem.Mode == RegisterOffset &&
833 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
836 bool isMemoryRegisterOffset64() const {
837 return isMem() && Mem.Mode == RegisterOffset &&
838 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
841 bool isMemoryRegisterOffset128() const {
842 return isMem() && Mem.Mode == RegisterOffset &&
843 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
846 bool isMemoryUnscaled() const {
849 if (Mem.Mode != ImmediateOffset)
853 // Make sure the immediate value is valid.
854 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
857 // The offset must fit in a signed 9-bit unscaled immediate.
858 int64_t Value = CE->getValue();
859 return (Value >= -256 && Value < 256);
861 // Fallback unscaled operands are for aliases of LDR/STR that fall back
862 // to LDUR/STUR when the offset is not legal for the former but is for
863 // the latter. As such, in addition to checking for being a legal unscaled
864 // address, also check that it is not a legal scaled address. This avoids
865 // ambiguity in the matcher.
866 bool isMemoryUnscaledFB8() const {
867 return isMemoryUnscaled() && !isMemoryIndexed8();
869 bool isMemoryUnscaledFB16() const {
870 return isMemoryUnscaled() && !isMemoryIndexed16();
872 bool isMemoryUnscaledFB32() const {
873 return isMemoryUnscaled() && !isMemoryIndexed32();
875 bool isMemoryUnscaledFB64() const {
876 return isMemoryUnscaled() && !isMemoryIndexed64();
878 bool isMemoryUnscaledFB128() const {
879 return isMemoryUnscaled() && !isMemoryIndexed128();
881 bool isMemoryIndexed(unsigned Scale) const {
884 if (Mem.Mode != ImmediateOffset)
888 // Make sure the immediate value is valid.
889 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
892 // The offset must be a positive multiple of the scale and in range of
893 // encoding with a 12-bit immediate.
894 int64_t Value = CE->getValue();
895 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
898 // If it's not a constant, check for some expressions we know.
899 const MCExpr *Expr = Mem.OffsetImm;
900 ARM64MCExpr::VariantKind ELFRefKind;
901 MCSymbolRefExpr::VariantKind DarwinRefKind;
902 const MCConstantExpr *Addend;
903 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
905 // If we don't understand the expression, assume the best and
906 // let the fixup and relocation code deal with it.
910 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
911 ELFRefKind == ARM64MCExpr::VK_LO12 ||
912 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
913 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
914 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
915 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
916 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
917 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
918 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
919 // Note that we don't range-check the addend. It's adjusted modulo page
920 // size when converted, so there is no "out of range" condition when using
922 int64_t Value = Addend ? Addend->getValue() : 0;
923 return Value >= 0 && (Value % Scale) == 0;
924 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
925 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
926 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
932 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
933 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
934 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
935 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
936 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
937 bool isMemoryNoIndex() const {
940 if (Mem.Mode != ImmediateOffset)
945 // Make sure the immediate value is valid. Only zero is allowed.
946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
947 if (!CE || CE->getValue() != 0)
951 bool isMemorySIMDNoIndex() const {
954 if (Mem.Mode != ImmediateOffset)
956 return Mem.OffsetImm == 0;
958 bool isMemoryIndexedSImm9() const {
959 if (!isMem() || Mem.Mode != ImmediateOffset)
963 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
964 assert(CE && "Non-constant pre-indexed offset!");
965 int64_t Value = CE->getValue();
966 return Value >= -256 && Value <= 255;
968 bool isMemoryIndexed32SImm7() const {
969 if (!isMem() || Mem.Mode != ImmediateOffset)
973 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
974 assert(CE && "Non-constant pre-indexed offset!");
975 int64_t Value = CE->getValue();
976 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
978 bool isMemoryIndexed64SImm7() const {
979 if (!isMem() || Mem.Mode != ImmediateOffset)
983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
984 assert(CE && "Non-constant pre-indexed offset!");
985 int64_t Value = CE->getValue();
986 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
988 bool isMemoryIndexed128SImm7() const {
989 if (!isMem() || Mem.Mode != ImmediateOffset)
993 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
994 assert(CE && "Non-constant pre-indexed offset!");
995 int64_t Value = CE->getValue();
996 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
999 bool isAdrpLabel() const {
1000 // Validation was handled during parsing, so we just sanity check that
1001 // something didn't go haywire.
1005 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1006 int64_t Val = CE->getValue();
1007 int64_t Min = - (4096 * (1LL << (21 - 1)));
1008 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1009 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1015 bool isAdrLabel() const {
1016 // Validation was handled during parsing, so we just sanity check that
1017 // something didn't go haywire.
1021 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1022 int64_t Val = CE->getValue();
1023 int64_t Min = - (1LL << (21 - 1));
1024 int64_t Max = ((1LL << (21 - 1)) - 1);
1025 return Val >= Min && Val <= Max;
1031 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1032 // Add as immediates when possible. Null MCExpr = 0.
1034 Inst.addOperand(MCOperand::CreateImm(0));
1035 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1036 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1038 Inst.addOperand(MCOperand::CreateExpr(Expr));
1041 void addRegOperands(MCInst &Inst, unsigned N) const {
1042 assert(N == 1 && "Invalid number of operands!");
1043 Inst.addOperand(MCOperand::CreateReg(getReg()));
1046 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1047 assert(N == 1 && "Invalid number of operands!");
1048 Inst.addOperand(MCOperand::CreateReg(getReg()));
1051 template <unsigned NumRegs>
1052 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1053 assert(N == 1 && "Invalid number of operands!");
1054 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1055 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1056 unsigned FirstReg = FirstRegs[NumRegs - 1];
1059 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1062 template <unsigned NumRegs>
1063 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1064 assert(N == 1 && "Invalid number of operands!");
1065 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1066 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1067 unsigned FirstReg = FirstRegs[NumRegs - 1];
1070 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1073 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1074 assert(N == 1 && "Invalid number of operands!");
1075 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1078 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1079 assert(N == 1 && "Invalid number of operands!");
1080 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1083 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1084 assert(N == 1 && "Invalid number of operands!");
1085 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1088 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1089 assert(N == 1 && "Invalid number of operands!");
1090 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1093 void addImmOperands(MCInst &Inst, unsigned N) const {
1094 assert(N == 1 && "Invalid number of operands!");
1095 // If this is a pageoff symrefexpr with an addend, adjust the addend
1096 // to be only the page-offset portion. Otherwise, just add the expr
1098 addExpr(Inst, getImm());
1101 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1102 assert(N == 1 && "Invalid number of operands!");
1103 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1105 addExpr(Inst, getImm());
1107 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1110 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1111 addImmOperands(Inst, N);
1114 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1115 assert(N == 1 && "Invalid number of operands!");
1116 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1117 assert(MCE && "Invalid constant immediate operand!");
1118 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1121 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1122 assert(N == 1 && "Invalid number of operands!");
1123 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1124 assert(MCE && "Invalid constant immediate operand!");
1125 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1128 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1129 assert(N == 1 && "Invalid number of operands!");
1130 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1131 assert(MCE && "Invalid constant immediate operand!");
1132 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1135 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1136 assert(N == 1 && "Invalid number of operands!");
1137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1138 assert(MCE && "Invalid constant immediate operand!");
1139 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1142 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1143 assert(N == 1 && "Invalid number of operands!");
1144 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1145 assert(MCE && "Invalid constant immediate operand!");
1146 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1149 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1152 assert(MCE && "Invalid constant immediate operand!");
1153 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1156 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1159 assert(MCE && "Invalid constant immediate operand!");
1160 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1163 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1164 assert(N == 1 && "Invalid number of operands!");
1165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1166 assert(MCE && "Invalid constant immediate operand!");
1167 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1170 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1173 assert(MCE && "Invalid constant immediate operand!");
1174 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1177 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1178 assert(N == 1 && "Invalid number of operands!");
1179 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1180 assert(MCE && "Invalid constant immediate operand!");
1181 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1184 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1185 assert(N == 1 && "Invalid number of operands!");
1186 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1187 assert(MCE && "Invalid constant immediate operand!");
1188 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1191 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1194 assert(MCE && "Invalid constant immediate operand!");
1195 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1198 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1199 assert(N == 1 && "Invalid number of operands!");
1200 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1201 assert(MCE && "Invalid constant immediate operand!");
1202 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1205 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1206 assert(N == 1 && "Invalid number of operands!");
1207 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1208 assert(MCE && "Invalid constant immediate operand!");
1209 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1212 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1215 assert(MCE && "Invalid constant immediate operand!");
1216 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1219 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1220 assert(N == 1 && "Invalid number of operands!");
1221 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1222 assert(MCE && "Invalid constant immediate operand!");
1223 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1226 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1229 assert(MCE && "Invalid constant immediate operand!");
1230 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1233 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1234 assert(N == 1 && "Invalid number of operands!");
1235 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1236 assert(MCE && "Invalid logical immediate operand!");
1237 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1238 Inst.addOperand(MCOperand::CreateImm(encoding));
1241 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1243 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1244 assert(MCE && "Invalid logical immediate operand!");
1245 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1246 Inst.addOperand(MCOperand::CreateImm(encoding));
1249 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1250 assert(N == 1 && "Invalid number of operands!");
1251 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1252 assert(MCE && "Invalid immediate operand!");
1253 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1254 Inst.addOperand(MCOperand::CreateImm(encoding));
1257 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1258 // Branch operands don't encode the low bits, so shift them off
1259 // here. If it's a label, however, just put it on directly as there's
1260 // not enough information now to do anything.
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264 addExpr(Inst, getImm());
1267 assert(MCE && "Invalid constant immediate operand!");
1268 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1271 void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
1272 // Branch operands don't encode the low bits, so shift them off
1273 // here. If it's a label, however, just put it on directly as there's
1274 // not enough information now to do anything.
1275 assert(N == 1 && "Invalid number of operands!");
1276 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1278 addExpr(Inst, getImm());
1281 assert(MCE && "Invalid constant immediate operand!");
1282 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1285 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1286 // Branch operands don't encode the low bits, so shift them off
1287 // here. If it's a label, however, just put it on directly as there's
1288 // not enough information now to do anything.
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 addExpr(Inst, getImm());
1295 assert(MCE && "Invalid constant immediate operand!");
1296 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1299 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1300 assert(N == 1 && "Invalid number of operands!");
1301 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1304 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1309 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1313 uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid);
1315 Inst.addOperand(MCOperand::CreateImm(Bits));
1318 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1322 uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid);
1324 Inst.addOperand(MCOperand::CreateImm(Bits));
1327 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1331 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1333 Inst.addOperand(MCOperand::CreateImm(Bits));
1336 void addSysCROperands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1341 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1342 assert(N == 1 && "Invalid number of operands!");
1343 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1346 void addShifterOperands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1351 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1353 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1356 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1361 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1363 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1366 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1367 assert(N == 1 && "Invalid number of operands!");
1368 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1371 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1376 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1381 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1382 assert(N == 1 && "Invalid number of operands!");
1383 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1386 void addExtendOperands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1390 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1391 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1392 ARM64_AM::getShiftValue(getShifter()));
1393 Inst.addOperand(MCOperand::CreateImm(imm));
1395 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1398 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!");
1400 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1403 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1404 assert(N == 1 && "Invalid number of operands!");
1405 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1407 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1408 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1409 ARM64_AM::getShiftValue(getShifter()));
1410 Inst.addOperand(MCOperand::CreateImm(imm));
1412 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1415 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1416 assert(N == 3 && "Invalid number of operands!");
1418 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1419 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
1420 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1421 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1424 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1425 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1428 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1429 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1432 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1433 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1436 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1437 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1440 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1441 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1444 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1445 unsigned Scale) const {
1446 // Add the base register operand.
1447 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1449 if (!Mem.OffsetImm) {
1450 // There isn't an offset.
1451 Inst.addOperand(MCOperand::CreateImm(0));
1455 // Add the offset operand.
1456 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1457 assert(CE->getValue() % Scale == 0 &&
1458 "Offset operand must be multiple of the scale!");
1460 // The MCInst offset operand doesn't include the low bits (like the
1461 // instruction encoding).
1462 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1465 // If this is a pageoff symrefexpr with an addend, the linker will
1466 // do the scaling of the addend.
1468 // Otherwise we don't know what this is, so just add the scaling divide to
1469 // the expression and let the MC fixup evaluation code deal with it.
1470 const MCExpr *Expr = Mem.OffsetImm;
1471 ARM64MCExpr::VariantKind ELFRefKind;
1472 MCSymbolRefExpr::VariantKind DarwinRefKind;
1473 const MCConstantExpr *Addend;
1475 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1477 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1478 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1482 Inst.addOperand(MCOperand::CreateExpr(Expr));
1485 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1486 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1487 // Add the base register operand.
1488 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1490 // Add the offset operand.
1492 Inst.addOperand(MCOperand::CreateImm(0));
1494 // Only constant offsets supported.
1495 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1496 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1500 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1501 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1502 addMemoryIndexedOperands(Inst, N, 16);
1505 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1506 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1507 addMemoryIndexedOperands(Inst, N, 8);
1510 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1511 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1512 addMemoryIndexedOperands(Inst, N, 4);
1515 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1517 addMemoryIndexedOperands(Inst, N, 2);
1520 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1521 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1522 addMemoryIndexedOperands(Inst, N, 1);
1525 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1527 // Add the base register operand (the offset is always zero, so ignore it).
1528 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1531 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1532 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1533 // Add the base register operand (the offset is always zero, so ignore it).
1534 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1537 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1538 unsigned Scale) const {
1539 assert(N == 2 && "Invalid number of operands!");
1541 // Add the base register operand.
1542 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1544 // Add the offset operand.
1546 if (Mem.OffsetImm) {
1547 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1548 assert(CE && "Non-constant indexed offset operand!");
1549 Offset = CE->getValue();
1553 assert(Offset % Scale == 0 &&
1554 "Offset operand must be a multiple of the scale!");
1558 Inst.addOperand(MCOperand::CreateImm(Offset));
1561 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1562 addMemoryWritebackIndexedOperands(Inst, N, 1);
1565 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1566 addMemoryWritebackIndexedOperands(Inst, N, 4);
1569 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1570 addMemoryWritebackIndexedOperands(Inst, N, 8);
1573 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1574 addMemoryWritebackIndexedOperands(Inst, N, 16);
1577 virtual void print(raw_ostream &OS) const;
1579 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1581 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1582 Op->Tok.Data = Str.data();
1583 Op->Tok.Length = Str.size();
1584 Op->Tok.IsSuffix = IsSuffix;
1590 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1591 SMLoc E, MCContext &Ctx) {
1592 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1593 Op->Reg.RegNum = RegNum;
1594 Op->Reg.isVector = isVector;
1600 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1601 unsigned NumElements, char ElementKind,
1602 SMLoc S, SMLoc E, MCContext &Ctx) {
1603 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1604 Op->VectorList.RegNum = RegNum;
1605 Op->VectorList.Count = Count;
1606 Op->VectorList.NumElements = NumElements;
1607 Op->VectorList.ElementKind = ElementKind;
1613 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1615 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1616 Op->VectorIndex.Val = Idx;
1622 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1624 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1631 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1632 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1633 Op->FPImm.Val = Val;
1639 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1640 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1641 Op->Barrier.Val = Val;
1647 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) {
1648 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1649 Op->SysReg.Data = Str.data();
1650 Op->SysReg.Length = Str.size();
1656 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1657 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1659 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1660 Op->Mem.BaseRegNum = BaseRegNum;
1661 Op->Mem.OffsetRegNum = 0;
1662 Op->Mem.OffsetImm = Off;
1663 Op->Mem.ExtType = ARM64_AM::UXTX;
1664 Op->Mem.ShiftVal = 0;
1665 Op->Mem.ExplicitShift = false;
1666 Op->Mem.Mode = ImmediateOffset;
1667 Op->OffsetLoc = OffsetLoc;
1673 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1674 ARM64_AM::ExtendType ExtType,
1675 unsigned ShiftVal, bool ExplicitShift,
1676 SMLoc S, SMLoc E, MCContext &Ctx) {
1677 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1678 Op->Mem.BaseRegNum = BaseReg;
1679 Op->Mem.OffsetRegNum = OffsetReg;
1680 Op->Mem.OffsetImm = 0;
1681 Op->Mem.ExtType = ExtType;
1682 Op->Mem.ShiftVal = ShiftVal;
1683 Op->Mem.ExplicitShift = ExplicitShift;
1684 Op->Mem.Mode = RegisterOffset;
1690 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1692 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1693 Op->SysCRImm.Val = Val;
1699 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1700 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1701 Op->Prefetch.Val = Val;
1707 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1708 SMLoc S, SMLoc E, MCContext &Ctx) {
1709 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1710 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1716 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1717 SMLoc S, SMLoc E, MCContext &Ctx) {
1718 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1719 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1726 } // end anonymous namespace.
1728 /// isFPR32Register - Check if a register is in the FPR32 register class.
1729 /// (The parser does not have the target register info to check the register
1730 /// class directly.)
1731 static bool isFPR32Register(unsigned Reg) {
1732 using namespace ARM64;
1736 case S0: case S1: case S2: case S3: case S4: case S5: case S6:
1737 case S7: case S8: case S9: case S10: case S11: case S12: case S13:
1738 case S14: case S15: case S16: case S17: case S18: case S19: case S20:
1739 case S21: case S22: case S23: case S24: case S25: case S26: case S27:
1740 case S28: case S29: case S30: case S31:
1746 /// isGPR32Register - Check if a register is in the GPR32sp register class.
1747 /// (The parser does not have the target register info to check the register
1748 /// class directly.)
1749 static bool isGPR32Register(unsigned Reg) {
1750 using namespace ARM64;
1754 case W0: case W1: case W2: case W3: case W4: case W5: case W6:
1755 case W7: case W8: case W9: case W10: case W11: case W12: case W13:
1756 case W14: case W15: case W16: case W17: case W18: case W19: case W20:
1757 case W21: case W22: case W23: case W24: case W25: case W26: case W27:
1758 case W28: case W29: case W30: case WSP: case WZR:
1764 static bool isGPR64Register(unsigned Reg) {
1765 using namespace ARM64;
1767 case X0: case X1: case X2: case X3: case X4: case X5: case X6:
1768 case X7: case X8: case X9: case X10: case X11: case X12: case X13:
1769 case X14: case X15: case X16: case X17: case X18: case X19: case X20:
1770 case X21: case X22: case X23: case X24: case X25: case X26: case X27:
1771 case X28: case FP: case LR: case SP: case XZR:
1778 void ARM64Operand::print(raw_ostream &OS) const {
1781 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1786 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1788 OS << "<barrier " << Name << ">";
1790 OS << "<barrier invalid #" << getBarrier() << ">";
1794 getImm()->print(OS);
1800 OS << "<register " << getReg() << ">";
1802 case k_VectorList: {
1803 OS << "<vectorlist ";
1804 unsigned Reg = getVectorListStart();
1805 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1806 OS << Reg + i << " ";
1811 OS << "<vectorindex " << getVectorIndex() << ">";
1814 OS << "<sysreg: " << getSysReg() << '>';
1817 OS << "'" << getToken() << "'";
1820 OS << "c" << getSysCR();
1824 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1826 OS << "<prfop " << Name << ">";
1828 OS << "<prfop invalid #" << getPrefetch() << ">";
1832 unsigned Val = getShifter();
1833 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1834 << ARM64_AM::getShiftValue(Val) << ">";
1838 unsigned Val = getExtend();
1839 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1840 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1846 /// @name Auto-generated Match Functions
1849 static unsigned MatchRegisterName(StringRef Name);
1853 static unsigned matchVectorRegName(StringRef Name) {
1854 return StringSwitch<unsigned>(Name)
1855 .Case("v0", ARM64::Q0)
1856 .Case("v1", ARM64::Q1)
1857 .Case("v2", ARM64::Q2)
1858 .Case("v3", ARM64::Q3)
1859 .Case("v4", ARM64::Q4)
1860 .Case("v5", ARM64::Q5)
1861 .Case("v6", ARM64::Q6)
1862 .Case("v7", ARM64::Q7)
1863 .Case("v8", ARM64::Q8)
1864 .Case("v9", ARM64::Q9)
1865 .Case("v10", ARM64::Q10)
1866 .Case("v11", ARM64::Q11)
1867 .Case("v12", ARM64::Q12)
1868 .Case("v13", ARM64::Q13)
1869 .Case("v14", ARM64::Q14)
1870 .Case("v15", ARM64::Q15)
1871 .Case("v16", ARM64::Q16)
1872 .Case("v17", ARM64::Q17)
1873 .Case("v18", ARM64::Q18)
1874 .Case("v19", ARM64::Q19)
1875 .Case("v20", ARM64::Q20)
1876 .Case("v21", ARM64::Q21)
1877 .Case("v22", ARM64::Q22)
1878 .Case("v23", ARM64::Q23)
1879 .Case("v24", ARM64::Q24)
1880 .Case("v25", ARM64::Q25)
1881 .Case("v26", ARM64::Q26)
1882 .Case("v27", ARM64::Q27)
1883 .Case("v28", ARM64::Q28)
1884 .Case("v29", ARM64::Q29)
1885 .Case("v30", ARM64::Q30)
1886 .Case("v31", ARM64::Q31)
1890 static bool isValidVectorKind(StringRef Name) {
1891 return StringSwitch<bool>(Name.lower())
1901 // Accept the width neutral ones, too, for verbose syntax. If those
1902 // aren't used in the right places, the token operand won't match so
1903 // all will work out.
1911 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1912 char &ElementKind) {
1913 assert(isValidVectorKind(Name));
1915 ElementKind = Name.lower()[Name.size() - 1];
1918 if (Name.size() == 2)
1921 // Parse the lane count
1922 Name = Name.drop_front();
1923 while (isdigit(Name.front())) {
1924 NumElements = 10 * NumElements + (Name.front() - '0');
1925 Name = Name.drop_front();
1929 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1931 StartLoc = getLoc();
1932 RegNo = tryParseRegister();
1933 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1934 return (RegNo == (unsigned)-1);
1937 /// tryParseRegister - Try to parse a register name. The token must be an
1938 /// Identifier when called, and if it is a register name the token is eaten and
1939 /// the register is added to the operand list.
1940 int ARM64AsmParser::tryParseRegister() {
1941 const AsmToken &Tok = Parser.getTok();
1942 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1944 std::string lowerCase = Tok.getString().lower();
1945 unsigned RegNum = MatchRegisterName(lowerCase);
1946 // Also handle a few aliases of registers.
1948 RegNum = StringSwitch<unsigned>(lowerCase)
1949 .Case("fp", ARM64::FP)
1950 .Case("lr", ARM64::LR)
1951 .Case("x31", ARM64::XZR)
1952 .Case("w31", ARM64::WZR)
1958 Parser.Lex(); // Eat identifier token.
1962 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1963 /// kind specifier. If it is a register specifier, eat the token and return it.
1964 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1965 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1966 TokError("vector register expected");
1970 StringRef Name = Parser.getTok().getString();
1971 // If there is a kind specifier, it's separated from the register name by
1973 size_t Start = 0, Next = Name.find('.');
1974 StringRef Head = Name.slice(Start, Next);
1975 unsigned RegNum = matchVectorRegName(Head);
1977 if (Next != StringRef::npos) {
1978 Kind = Name.slice(Next, StringRef::npos);
1979 if (!isValidVectorKind(Kind)) {
1980 TokError("invalid vector kind qualifier");
1984 Parser.Lex(); // Eat the register token.
1989 TokError("vector register expected");
1993 static int MatchSysCRName(StringRef Name) {
1994 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1996 switch (Name.size()) {
2000 if (Name[0] != 'c' && Name[0] != 'C')
2028 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
2049 llvm_unreachable("Unhandled SysCR operand string!");
2053 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2054 ARM64AsmParser::OperandMatchResultTy
2055 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2057 const AsmToken &Tok = Parser.getTok();
2058 if (Tok.isNot(AsmToken::Identifier))
2059 return MatchOperand_NoMatch;
2061 int Num = MatchSysCRName(Tok.getString());
2063 return MatchOperand_NoMatch;
2065 Parser.Lex(); // Eat identifier token.
2066 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2067 return MatchOperand_Success;
2070 /// tryParsePrefetch - Try to parse a prefetch operand.
2071 ARM64AsmParser::OperandMatchResultTy
2072 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2074 const AsmToken &Tok = Parser.getTok();
2075 // Either an identifier for named values or a 5-bit immediate.
2076 bool Hash = Tok.is(AsmToken::Hash);
2077 if (Hash || Tok.is(AsmToken::Integer)) {
2079 Parser.Lex(); // Eat hash token.
2080 const MCExpr *ImmVal;
2081 if (getParser().parseExpression(ImmVal))
2082 return MatchOperand_ParseFail;
2084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2086 TokError("immediate value expected for prefetch operand");
2087 return MatchOperand_ParseFail;
2089 unsigned prfop = MCE->getValue();
2091 TokError("prefetch operand out of range, [0,31] expected");
2092 return MatchOperand_ParseFail;
2095 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2096 return MatchOperand_Success;
2099 if (Tok.isNot(AsmToken::Identifier)) {
2100 TokError("pre-fetch hint expected");
2101 return MatchOperand_ParseFail;
2105 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2107 TokError("pre-fetch hint expected");
2108 return MatchOperand_ParseFail;
2111 Parser.Lex(); // Eat identifier token.
2112 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2113 return MatchOperand_Success;
2116 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2118 ARM64AsmParser::OperandMatchResultTy
2119 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2123 if (Parser.getTok().is(AsmToken::Hash)) {
2124 Parser.Lex(); // Eat hash token.
2127 if (parseSymbolicImmVal(Expr))
2128 return MatchOperand_ParseFail;
2130 ARM64MCExpr::VariantKind ELFRefKind;
2131 MCSymbolRefExpr::VariantKind DarwinRefKind;
2132 const MCConstantExpr *Addend;
2133 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2134 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2135 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2136 // No modifier was specified at all; this is the syntax for an ELF basic
2137 // ADRP relocation (unfortunately).
2138 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2139 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2140 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2142 Error(S, "gotpage label reference not allowed an addend");
2143 return MatchOperand_ParseFail;
2144 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2145 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2146 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2147 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2148 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2149 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2150 // The operand must be an @page or @gotpage qualified symbolref.
2151 Error(S, "page or gotpage label reference expected");
2152 return MatchOperand_ParseFail;
2156 // We have either a label reference possibly with addend or an immediate. The
2157 // addend is a raw value here. The linker will adjust it to only reference the
2159 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2160 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2162 return MatchOperand_Success;
2165 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2167 ARM64AsmParser::OperandMatchResultTy
2168 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2172 if (Parser.getTok().is(AsmToken::Hash)) {
2173 Parser.Lex(); // Eat hash token.
2176 if (getParser().parseExpression(Expr))
2177 return MatchOperand_ParseFail;
2179 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2180 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2182 return MatchOperand_Success;
2185 /// tryParseFPImm - A floating point immediate expression operand.
2186 ARM64AsmParser::OperandMatchResultTy
2187 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2191 if (Parser.getTok().is(AsmToken::Hash)) {
2192 Parser.Lex(); // Eat '#'
2196 // Handle negation, as that still comes through as a separate token.
2197 bool isNegative = false;
2198 if (Parser.getTok().is(AsmToken::Minus)) {
2202 const AsmToken &Tok = Parser.getTok();
2203 if (Tok.is(AsmToken::Real)) {
2204 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2205 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2206 // If we had a '-' in front, toggle the sign bit.
2207 IntVal ^= (uint64_t)isNegative << 63;
2208 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2209 Parser.Lex(); // Eat the token.
2210 // Check for out of range values. As an exception, we let Zero through,
2211 // as we handle that special case in post-processing before matching in
2212 // order to use the zero register for it.
2213 if (Val == -1 && !RealVal.isZero()) {
2214 TokError("floating point value out of range");
2215 return MatchOperand_ParseFail;
2217 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2218 return MatchOperand_Success;
2220 if (Tok.is(AsmToken::Integer)) {
2222 if (!isNegative && Tok.getString().startswith("0x")) {
2223 Val = Tok.getIntVal();
2224 if (Val > 255 || Val < 0) {
2225 TokError("encoded floating point value out of range");
2226 return MatchOperand_ParseFail;
2229 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2230 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2231 // If we had a '-' in front, toggle the sign bit.
2232 IntVal ^= (uint64_t)isNegative << 63;
2233 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2235 Parser.Lex(); // Eat the token.
2236 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2237 return MatchOperand_Success;
2241 return MatchOperand_NoMatch;
2243 TokError("invalid floating point immediate");
2244 return MatchOperand_ParseFail;
2247 /// parseCondCodeString - Parse a Condition Code string.
2248 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2249 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2250 .Case("eq", ARM64CC::EQ)
2251 .Case("ne", ARM64CC::NE)
2252 .Case("cs", ARM64CC::CS)
2253 .Case("hs", ARM64CC::CS)
2254 .Case("cc", ARM64CC::CC)
2255 .Case("lo", ARM64CC::CC)
2256 .Case("mi", ARM64CC::MI)
2257 .Case("pl", ARM64CC::PL)
2258 .Case("vs", ARM64CC::VS)
2259 .Case("vc", ARM64CC::VC)
2260 .Case("hi", ARM64CC::HI)
2261 .Case("ls", ARM64CC::LS)
2262 .Case("ge", ARM64CC::GE)
2263 .Case("lt", ARM64CC::LT)
2264 .Case("gt", ARM64CC::GT)
2265 .Case("le", ARM64CC::LE)
2266 .Case("al", ARM64CC::AL)
2267 .Case("nv", ARM64CC::NV)
2268 .Default(ARM64CC::Invalid);
2272 /// parseCondCode - Parse a Condition Code operand.
2273 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2274 bool invertCondCode) {
2276 const AsmToken &Tok = Parser.getTok();
2277 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2279 StringRef Cond = Tok.getString();
2280 unsigned CC = parseCondCodeString(Cond);
2281 if (CC == ARM64CC::Invalid)
2282 return TokError("invalid condition code");
2283 Parser.Lex(); // Eat identifier token.
2286 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2288 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2290 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2294 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2295 /// them if present.
2296 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2297 const AsmToken &Tok = Parser.getTok();
2298 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2299 .Case("lsl", ARM64_AM::LSL)
2300 .Case("lsr", ARM64_AM::LSR)
2301 .Case("asr", ARM64_AM::ASR)
2302 .Case("ror", ARM64_AM::ROR)
2303 .Case("msl", ARM64_AM::MSL)
2304 .Case("LSL", ARM64_AM::LSL)
2305 .Case("LSR", ARM64_AM::LSR)
2306 .Case("ASR", ARM64_AM::ASR)
2307 .Case("ROR", ARM64_AM::ROR)
2308 .Case("MSL", ARM64_AM::MSL)
2309 .Default(ARM64_AM::InvalidShift);
2310 if (ShOp == ARM64_AM::InvalidShift)
2313 SMLoc S = Tok.getLoc();
2316 // We expect a number here.
2317 bool Hash = getLexer().is(AsmToken::Hash);
2318 if (!Hash && getLexer().isNot(AsmToken::Integer))
2319 return TokError("immediate value expected for shifter operand");
2322 Parser.Lex(); // Eat the '#'.
2324 SMLoc ExprLoc = getLoc();
2325 const MCExpr *ImmVal;
2326 if (getParser().parseExpression(ImmVal))
2329 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2331 return TokError("immediate value expected for shifter operand");
2333 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2334 return Error(ExprLoc, "immediate value too large for shifter operand");
2336 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2338 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2342 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2343 /// them if present.
2344 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2345 const AsmToken &Tok = Parser.getTok();
2346 ARM64_AM::ExtendType ExtOp =
2347 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2348 .Case("uxtb", ARM64_AM::UXTB)
2349 .Case("uxth", ARM64_AM::UXTH)
2350 .Case("uxtw", ARM64_AM::UXTW)
2351 .Case("uxtx", ARM64_AM::UXTX)
2352 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2353 .Case("sxtb", ARM64_AM::SXTB)
2354 .Case("sxth", ARM64_AM::SXTH)
2355 .Case("sxtw", ARM64_AM::SXTW)
2356 .Case("sxtx", ARM64_AM::SXTX)
2357 .Case("UXTB", ARM64_AM::UXTB)
2358 .Case("UXTH", ARM64_AM::UXTH)
2359 .Case("UXTW", ARM64_AM::UXTW)
2360 .Case("UXTX", ARM64_AM::UXTX)
2361 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2362 .Case("SXTB", ARM64_AM::SXTB)
2363 .Case("SXTH", ARM64_AM::SXTH)
2364 .Case("SXTW", ARM64_AM::SXTW)
2365 .Case("SXTX", ARM64_AM::SXTX)
2366 .Default(ARM64_AM::InvalidExtend);
2367 if (ExtOp == ARM64_AM::InvalidExtend)
2370 SMLoc S = Tok.getLoc();
2373 if (getLexer().is(AsmToken::EndOfStatement) ||
2374 getLexer().is(AsmToken::Comma)) {
2375 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2377 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2381 bool Hash = getLexer().is(AsmToken::Hash);
2382 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2383 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2385 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2390 Parser.Lex(); // Eat the '#'.
2392 const MCExpr *ImmVal;
2393 if (getParser().parseExpression(ImmVal))
2396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2398 return TokError("immediate value expected for extend operand");
2400 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2402 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2406 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2407 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2408 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2409 OperandVector &Operands) {
2410 if (Name.find('.') != StringRef::npos)
2411 return TokError("invalid operand");
2415 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2417 const AsmToken &Tok = Parser.getTok();
2418 StringRef Op = Tok.getString();
2419 SMLoc S = Tok.getLoc();
2421 const MCExpr *Expr = 0;
2423 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2425 Expr = MCConstantExpr::Create(op1, getContext()); \
2426 Operands.push_back( \
2427 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2428 Operands.push_back( \
2429 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2430 Operands.push_back( \
2431 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2432 Expr = MCConstantExpr::Create(op2, getContext()); \
2433 Operands.push_back( \
2434 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2437 if (Mnemonic == "ic") {
2438 if (!Op.compare_lower("ialluis")) {
2439 // SYS #0, C7, C1, #0
2440 SYS_ALIAS(0, 7, 1, 0);
2441 } else if (!Op.compare_lower("iallu")) {
2442 // SYS #0, C7, C5, #0
2443 SYS_ALIAS(0, 7, 5, 0);
2444 } else if (!Op.compare_lower("ivau")) {
2445 // SYS #3, C7, C5, #1
2446 SYS_ALIAS(3, 7, 5, 1);
2448 return TokError("invalid operand for IC instruction");
2450 } else if (Mnemonic == "dc") {
2451 if (!Op.compare_lower("zva")) {
2452 // SYS #3, C7, C4, #1
2453 SYS_ALIAS(3, 7, 4, 1);
2454 } else if (!Op.compare_lower("ivac")) {
2455 // SYS #3, C7, C6, #1
2456 SYS_ALIAS(0, 7, 6, 1);
2457 } else if (!Op.compare_lower("isw")) {
2458 // SYS #0, C7, C6, #2
2459 SYS_ALIAS(0, 7, 6, 2);
2460 } else if (!Op.compare_lower("cvac")) {
2461 // SYS #3, C7, C10, #1
2462 SYS_ALIAS(3, 7, 10, 1);
2463 } else if (!Op.compare_lower("csw")) {
2464 // SYS #0, C7, C10, #2
2465 SYS_ALIAS(0, 7, 10, 2);
2466 } else if (!Op.compare_lower("cvau")) {
2467 // SYS #3, C7, C11, #1
2468 SYS_ALIAS(3, 7, 11, 1);
2469 } else if (!Op.compare_lower("civac")) {
2470 // SYS #3, C7, C14, #1
2471 SYS_ALIAS(3, 7, 14, 1);
2472 } else if (!Op.compare_lower("cisw")) {
2473 // SYS #0, C7, C14, #2
2474 SYS_ALIAS(0, 7, 14, 2);
2476 return TokError("invalid operand for DC instruction");
2478 } else if (Mnemonic == "at") {
2479 if (!Op.compare_lower("s1e1r")) {
2480 // SYS #0, C7, C8, #0
2481 SYS_ALIAS(0, 7, 8, 0);
2482 } else if (!Op.compare_lower("s1e2r")) {
2483 // SYS #4, C7, C8, #0
2484 SYS_ALIAS(4, 7, 8, 0);
2485 } else if (!Op.compare_lower("s1e3r")) {
2486 // SYS #6, C7, C8, #0
2487 SYS_ALIAS(6, 7, 8, 0);
2488 } else if (!Op.compare_lower("s1e1w")) {
2489 // SYS #0, C7, C8, #1
2490 SYS_ALIAS(0, 7, 8, 1);
2491 } else if (!Op.compare_lower("s1e2w")) {
2492 // SYS #4, C7, C8, #1
2493 SYS_ALIAS(4, 7, 8, 1);
2494 } else if (!Op.compare_lower("s1e3w")) {
2495 // SYS #6, C7, C8, #1
2496 SYS_ALIAS(6, 7, 8, 1);
2497 } else if (!Op.compare_lower("s1e0r")) {
2498 // SYS #0, C7, C8, #3
2499 SYS_ALIAS(0, 7, 8, 2);
2500 } else if (!Op.compare_lower("s1e0w")) {
2501 // SYS #0, C7, C8, #3
2502 SYS_ALIAS(0, 7, 8, 3);
2503 } else if (!Op.compare_lower("s12e1r")) {
2504 // SYS #4, C7, C8, #4
2505 SYS_ALIAS(4, 7, 8, 4);
2506 } else if (!Op.compare_lower("s12e1w")) {
2507 // SYS #4, C7, C8, #5
2508 SYS_ALIAS(4, 7, 8, 5);
2509 } else if (!Op.compare_lower("s12e0r")) {
2510 // SYS #4, C7, C8, #6
2511 SYS_ALIAS(4, 7, 8, 6);
2512 } else if (!Op.compare_lower("s12e0w")) {
2513 // SYS #4, C7, C8, #7
2514 SYS_ALIAS(4, 7, 8, 7);
2516 return TokError("invalid operand for AT instruction");
2518 } else if (Mnemonic == "tlbi") {
2519 if (!Op.compare_lower("vmalle1is")) {
2520 // SYS #0, C8, C3, #0
2521 SYS_ALIAS(0, 8, 3, 0);
2522 } else if (!Op.compare_lower("alle2is")) {
2523 // SYS #4, C8, C3, #0
2524 SYS_ALIAS(4, 8, 3, 0);
2525 } else if (!Op.compare_lower("alle3is")) {
2526 // SYS #6, C8, C3, #0
2527 SYS_ALIAS(6, 8, 3, 0);
2528 } else if (!Op.compare_lower("vae1is")) {
2529 // SYS #0, C8, C3, #1
2530 SYS_ALIAS(0, 8, 3, 1);
2531 } else if (!Op.compare_lower("vae2is")) {
2532 // SYS #4, C8, C3, #1
2533 SYS_ALIAS(4, 8, 3, 1);
2534 } else if (!Op.compare_lower("vae3is")) {
2535 // SYS #6, C8, C3, #1
2536 SYS_ALIAS(6, 8, 3, 1);
2537 } else if (!Op.compare_lower("aside1is")) {
2538 // SYS #0, C8, C3, #2
2539 SYS_ALIAS(0, 8, 3, 2);
2540 } else if (!Op.compare_lower("vaae1is")) {
2541 // SYS #0, C8, C3, #3
2542 SYS_ALIAS(0, 8, 3, 3);
2543 } else if (!Op.compare_lower("alle1is")) {
2544 // SYS #4, C8, C3, #4
2545 SYS_ALIAS(4, 8, 3, 4);
2546 } else if (!Op.compare_lower("vale1is")) {
2547 // SYS #0, C8, C3, #5
2548 SYS_ALIAS(0, 8, 3, 5);
2549 } else if (!Op.compare_lower("vaale1is")) {
2550 // SYS #0, C8, C3, #7
2551 SYS_ALIAS(0, 8, 3, 7);
2552 } else if (!Op.compare_lower("vmalle1")) {
2553 // SYS #0, C8, C7, #0
2554 SYS_ALIAS(0, 8, 7, 0);
2555 } else if (!Op.compare_lower("alle2")) {
2556 // SYS #4, C8, C7, #0
2557 SYS_ALIAS(4, 8, 7, 0);
2558 } else if (!Op.compare_lower("vale2is")) {
2559 // SYS #4, C8, C3, #5
2560 SYS_ALIAS(4, 8, 3, 5);
2561 } else if (!Op.compare_lower("vale3is")) {
2562 // SYS #6, C8, C3, #5
2563 SYS_ALIAS(6, 8, 3, 5);
2564 } else if (!Op.compare_lower("alle3")) {
2565 // SYS #6, C8, C7, #0
2566 SYS_ALIAS(6, 8, 7, 0);
2567 } else if (!Op.compare_lower("vae1")) {
2568 // SYS #0, C8, C7, #1
2569 SYS_ALIAS(0, 8, 7, 1);
2570 } else if (!Op.compare_lower("vae2")) {
2571 // SYS #4, C8, C7, #1
2572 SYS_ALIAS(4, 8, 7, 1);
2573 } else if (!Op.compare_lower("vae3")) {
2574 // SYS #6, C8, C7, #1
2575 SYS_ALIAS(6, 8, 7, 1);
2576 } else if (!Op.compare_lower("aside1")) {
2577 // SYS #0, C8, C7, #2
2578 SYS_ALIAS(0, 8, 7, 2);
2579 } else if (!Op.compare_lower("vaae1")) {
2580 // SYS #0, C8, C7, #3
2581 SYS_ALIAS(0, 8, 7, 3);
2582 } else if (!Op.compare_lower("alle1")) {
2583 // SYS #4, C8, C7, #4
2584 SYS_ALIAS(4, 8, 7, 4);
2585 } else if (!Op.compare_lower("vale1")) {
2586 // SYS #0, C8, C7, #5
2587 SYS_ALIAS(0, 8, 7, 5);
2588 } else if (!Op.compare_lower("vale2")) {
2589 // SYS #4, C8, C7, #5
2590 SYS_ALIAS(4, 8, 7, 5);
2591 } else if (!Op.compare_lower("vale3")) {
2592 // SYS #6, C8, C7, #5
2593 SYS_ALIAS(6, 8, 7, 5);
2594 } else if (!Op.compare_lower("vaale1")) {
2595 // SYS #0, C8, C7, #7
2596 SYS_ALIAS(0, 8, 7, 7);
2597 } else if (!Op.compare_lower("ipas2e1")) {
2598 // SYS #4, C8, C4, #1
2599 SYS_ALIAS(4, 8, 4, 1);
2600 } else if (!Op.compare_lower("ipas2le1")) {
2601 // SYS #4, C8, C4, #5
2602 SYS_ALIAS(4, 8, 4, 5);
2603 } else if (!Op.compare_lower("ipas2e1is")) {
2604 // SYS #4, C8, C4, #1
2605 SYS_ALIAS(4, 8, 0, 1);
2606 } else if (!Op.compare_lower("ipas2le1is")) {
2607 // SYS #4, C8, C4, #5
2608 SYS_ALIAS(4, 8, 0, 5);
2609 } else if (!Op.compare_lower("vmalls12e1")) {
2610 // SYS #4, C8, C7, #6
2611 SYS_ALIAS(4, 8, 7, 6);
2612 } else if (!Op.compare_lower("vmalls12e1is")) {
2613 // SYS #4, C8, C3, #6
2614 SYS_ALIAS(4, 8, 3, 6);
2616 return TokError("invalid operand for TLBI instruction");
2622 Parser.Lex(); // Eat operand.
2624 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2625 bool HasRegister = false;
2627 // Check for the optional register operand.
2628 if (getLexer().is(AsmToken::Comma)) {
2629 Parser.Lex(); // Eat comma.
2631 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2632 return TokError("expected register operand");
2637 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2638 Parser.eatToEndOfStatement();
2639 return TokError("unexpected token in argument list");
2642 if (ExpectRegister && !HasRegister) {
2643 return TokError("specified " + Mnemonic + " op requires a register");
2645 else if (!ExpectRegister && HasRegister) {
2646 return TokError("specified " + Mnemonic + " op does not use a register");
2649 Parser.Lex(); // Consume the EndOfStatement
2653 ARM64AsmParser::OperandMatchResultTy
2654 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2655 const AsmToken &Tok = Parser.getTok();
2657 // Can be either a #imm style literal or an option name
2658 bool Hash = Tok.is(AsmToken::Hash);
2659 if (Hash || Tok.is(AsmToken::Integer)) {
2660 // Immediate operand.
2662 Parser.Lex(); // Eat the '#'
2663 const MCExpr *ImmVal;
2664 SMLoc ExprLoc = getLoc();
2665 if (getParser().parseExpression(ImmVal))
2666 return MatchOperand_ParseFail;
2667 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2669 Error(ExprLoc, "immediate value expected for barrier operand");
2670 return MatchOperand_ParseFail;
2672 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2673 Error(ExprLoc, "barrier operand out of range");
2674 return MatchOperand_ParseFail;
2677 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2678 return MatchOperand_Success;
2681 if (Tok.isNot(AsmToken::Identifier)) {
2682 TokError("invalid operand for instruction");
2683 return MatchOperand_ParseFail;
2687 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2689 TokError("invalid barrier option name");
2690 return MatchOperand_ParseFail;
2693 // The only valid named option for ISB is 'sy'
2694 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2695 TokError("'sy' or #imm operand expected");
2696 return MatchOperand_ParseFail;
2699 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2700 Parser.Lex(); // Consume the option
2702 return MatchOperand_Success;
2705 ARM64AsmParser::OperandMatchResultTy
2706 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2707 const AsmToken &Tok = Parser.getTok();
2709 if (Tok.isNot(AsmToken::Identifier))
2710 return MatchOperand_NoMatch;
2712 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2714 Parser.Lex(); // Eat identifier
2716 return MatchOperand_Success;
2719 /// tryParseVectorRegister - Parse a vector register operand.
2720 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2721 if (Parser.getTok().isNot(AsmToken::Identifier))
2725 // Check for a vector register specifier first.
2727 int64_t Reg = tryMatchVectorRegister(Kind, false);
2731 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2732 // If there was an explicit qualifier, that goes on as a literal text
2735 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2737 // If there is an index specifier following the register, parse that too.
2738 if (Parser.getTok().is(AsmToken::LBrac)) {
2739 SMLoc SIdx = getLoc();
2740 Parser.Lex(); // Eat left bracket token.
2742 const MCExpr *ImmVal;
2743 if (getParser().parseExpression(ImmVal))
2745 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2747 TokError("immediate value expected for vector index");
2752 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2753 Error(E, "']' expected");
2757 Parser.Lex(); // Eat right bracket token.
2759 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2766 /// parseRegister - Parse a non-vector register operand.
2767 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2769 // Try for a vector register.
2770 if (!tryParseVectorRegister(Operands))
2773 // Try for a scalar register.
2774 int64_t Reg = tryParseRegister();
2778 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2780 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2781 // as a string token in the instruction itself.
2782 if (getLexer().getKind() == AsmToken::LBrac) {
2783 SMLoc LBracS = getLoc();
2785 const AsmToken &Tok = Parser.getTok();
2786 if (Tok.is(AsmToken::Integer)) {
2787 SMLoc IntS = getLoc();
2788 int64_t Val = Tok.getIntVal();
2791 if (getLexer().getKind() == AsmToken::RBrac) {
2792 SMLoc RBracS = getLoc();
2795 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2797 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2799 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2809 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2810 /// do not allow base regisrer writeback modes,
2811 /// or those that handle writeback separately from
2812 /// the memory operand (like the AdvSIMD ldX/stX
2814 ARM64AsmParser::OperandMatchResultTy
2815 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2816 if (Parser.getTok().isNot(AsmToken::LBrac))
2817 return MatchOperand_NoMatch;
2819 Parser.Lex(); // Eat left bracket token.
2821 const AsmToken &BaseRegTok = Parser.getTok();
2822 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2823 Error(BaseRegTok.getLoc(), "register expected");
2824 return MatchOperand_ParseFail;
2827 int64_t Reg = tryParseRegister();
2829 Error(BaseRegTok.getLoc(), "register expected");
2830 return MatchOperand_ParseFail;
2834 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2835 Error(E, "']' expected");
2836 return MatchOperand_ParseFail;
2839 Parser.Lex(); // Eat right bracket token.
2841 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
2842 return MatchOperand_Success;
2845 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2846 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2847 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2849 Parser.Lex(); // Eat left bracket token.
2851 const AsmToken &BaseRegTok = Parser.getTok();
2852 if (BaseRegTok.isNot(AsmToken::Identifier))
2853 return Error(BaseRegTok.getLoc(), "register expected");
2855 int64_t Reg = tryParseRegister();
2857 return Error(BaseRegTok.getLoc(), "register expected");
2859 // If there is an offset expression, parse it.
2860 const MCExpr *OffsetExpr = 0;
2862 if (Parser.getTok().is(AsmToken::Comma)) {
2863 Parser.Lex(); // Eat the comma.
2864 OffsetLoc = getLoc();
2867 const AsmToken &OffsetRegTok = Parser.getTok();
2868 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2870 // Default shift is LSL, with an omitted shift. We use the third bit of
2871 // the extend value to indicate presence/omission of the immediate offset.
2872 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2873 int64_t ShiftVal = 0;
2874 bool ExplicitShift = false;
2876 if (Parser.getTok().is(AsmToken::Comma)) {
2877 // Embedded extend operand.
2878 Parser.Lex(); // Eat the comma
2880 SMLoc ExtLoc = getLoc();
2881 const AsmToken &Tok = Parser.getTok();
2882 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2883 .Case("uxtw", ARM64_AM::UXTW)
2884 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2885 .Case("sxtw", ARM64_AM::SXTW)
2886 .Case("sxtx", ARM64_AM::SXTX)
2887 .Case("UXTW", ARM64_AM::UXTW)
2888 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2889 .Case("SXTW", ARM64_AM::SXTW)
2890 .Case("SXTX", ARM64_AM::SXTX)
2891 .Default(ARM64_AM::InvalidExtend);
2892 if (ExtOp == ARM64_AM::InvalidExtend)
2893 return Error(ExtLoc, "expected valid extend operation");
2895 Parser.Lex(); // Eat the extend op.
2897 bool Hash = getLexer().is(AsmToken::Hash);
2898 if (getLexer().is(AsmToken::RBrac)) {
2899 // No immediate operand.
2900 if (ExtOp == ARM64_AM::UXTX)
2901 return Error(ExtLoc, "LSL extend requires immediate operand");
2902 } else if (Hash || getLexer().is(AsmToken::Integer)) {
2903 // Immediate operand.
2905 Parser.Lex(); // Eat the '#'
2906 const MCExpr *ImmVal;
2907 SMLoc ExprLoc = getLoc();
2908 if (getParser().parseExpression(ImmVal))
2910 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2912 return TokError("immediate value expected for extend operand");
2914 ExplicitShift = true;
2915 ShiftVal = MCE->getValue();
2916 if (ShiftVal < 0 || ShiftVal > 4)
2917 return Error(ExprLoc, "immediate operand out of range");
2919 return Error(getLoc(), "expected immediate operand");
2922 if (Parser.getTok().isNot(AsmToken::RBrac))
2923 return Error(getLoc(), "']' expected");
2925 Parser.Lex(); // Eat right bracket token.
2928 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2929 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2932 // Immediate expressions.
2933 } else if (Parser.getTok().is(AsmToken::Hash) ||
2934 Parser.getTok().is(AsmToken::Integer)) {
2935 if (Parser.getTok().is(AsmToken::Hash))
2936 Parser.Lex(); // Eat hash token.
2938 if (parseSymbolicImmVal(OffsetExpr))
2941 // FIXME: We really should make sure that we're dealing with a LDR/STR
2942 // instruction that can legally have a symbolic expression here.
2943 // Symbol reference.
2944 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2945 Parser.getTok().isNot(AsmToken::String))
2946 return Error(getLoc(), "identifier or immediate expression expected");
2947 if (getParser().parseExpression(OffsetExpr))
2949 // If this is a plain ref, Make sure a legal variant kind was specified.
2950 // Otherwise, it's a more complicated expression and we have to just
2951 // assume it's OK and let the relocation stuff puke if it's not.
2952 ARM64MCExpr::VariantKind ELFRefKind;
2953 MCSymbolRefExpr::VariantKind DarwinRefKind;
2954 const MCConstantExpr *Addend;
2955 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2957 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2958 "ELF symbol modifiers not supported here yet");
2960 switch (DarwinRefKind) {
2962 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2963 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2964 case MCSymbolRefExpr::VK_PAGEOFF:
2965 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2966 // These are what we're expecting.
2974 if (Parser.getTok().isNot(AsmToken::RBrac))
2975 return Error(E, "']' expected");
2977 Parser.Lex(); // Eat right bracket token.
2979 // Create the memory operand.
2981 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2983 // Check for a '!', indicating pre-indexed addressing with writeback.
2984 if (Parser.getTok().is(AsmToken::Exclaim)) {
2985 // There needs to have been an immediate or wback doesn't make sense.
2987 return Error(E, "missing offset for pre-indexed addressing");
2988 // Pre-indexed with writeback must have a constant expression for the
2989 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2990 // as they don't require a relocation.
2991 if (!isa<MCConstantExpr>(OffsetExpr))
2992 return Error(OffsetLoc, "constant immediate expression expected");
2994 // Create the Token operand for the '!'.
2995 Operands.push_back(ARM64Operand::CreateToken(
2996 "!", false, Parser.getTok().getLoc(), getContext()));
2997 Parser.Lex(); // Eat the '!' token.
3003 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3004 bool HasELFModifier = false;
3005 ARM64MCExpr::VariantKind RefKind;
3007 if (Parser.getTok().is(AsmToken::Colon)) {
3008 Parser.Lex(); // Eat ':"
3009 HasELFModifier = true;
3011 if (Parser.getTok().isNot(AsmToken::Identifier)) {
3012 Error(Parser.getTok().getLoc(),
3013 "expect relocation specifier in operand after ':'");
3017 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3018 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
3019 .Case("lo12", ARM64MCExpr::VK_LO12)
3020 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
3021 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
3022 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
3023 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
3024 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3025 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3026 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3027 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3028 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3029 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3030 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3031 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3032 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3033 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3034 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3035 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3036 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3037 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3038 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3039 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3040 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3041 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3042 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3043 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3044 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3045 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3046 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3047 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3048 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3049 .Default(ARM64MCExpr::VK_INVALID);
3051 if (RefKind == ARM64MCExpr::VK_INVALID) {
3052 Error(Parser.getTok().getLoc(),
3053 "expect relocation specifier in operand after ':'");
3057 Parser.Lex(); // Eat identifier
3059 if (Parser.getTok().isNot(AsmToken::Colon)) {
3060 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3063 Parser.Lex(); // Eat ':'
3066 if (getParser().parseExpression(ImmVal))
3070 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3075 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3076 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3077 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3079 Parser.Lex(); // Eat left bracket token.
3081 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3084 int64_t PrevReg = FirstReg;
3087 if (Parser.getTok().is(AsmToken::Minus)) {
3088 Parser.Lex(); // Eat the minus.
3090 SMLoc Loc = getLoc();
3092 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3095 // Any Kind suffices must match on all regs in the list.
3096 if (Kind != NextKind)
3097 return Error(Loc, "mismatched register size suffix");
3099 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3101 if (Space == 0 || Space > 3) {
3102 return Error(Loc, "invalid number of vectors");
3108 while (Parser.getTok().is(AsmToken::Comma)) {
3109 Parser.Lex(); // Eat the comma token.
3111 SMLoc Loc = getLoc();
3113 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3116 // Any Kind suffices must match on all regs in the list.
3117 if (Kind != NextKind)
3118 return Error(Loc, "mismatched register size suffix");
3120 // Registers must be incremental (with wraparound at 31)
3121 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3122 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3123 return Error(Loc, "registers must be sequential");
3130 if (Parser.getTok().is(AsmToken::EndOfStatement))
3131 Error(getLoc(), "'}' expected");
3132 Parser.Lex(); // Eat the '}' token.
3134 unsigned NumElements = 0;
3135 char ElementKind = 0;
3137 parseValidVectorKind(Kind, NumElements, ElementKind);
3139 Operands.push_back(ARM64Operand::CreateVectorList(
3140 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3142 // If there is an index specifier following the list, parse that too.
3143 if (Parser.getTok().is(AsmToken::LBrac)) {
3144 SMLoc SIdx = getLoc();
3145 Parser.Lex(); // Eat left bracket token.
3147 const MCExpr *ImmVal;
3148 if (getParser().parseExpression(ImmVal))
3150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3152 TokError("immediate value expected for vector index");
3157 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3158 Error(E, "']' expected");
3162 Parser.Lex(); // Eat right bracket token.
3164 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3170 /// parseOperand - Parse a arm instruction operand. For now this parses the
3171 /// operand regardless of the mnemonic.
3172 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3173 bool invertCondCode) {
3174 // Check if the current operand has a custom associated parser, if so, try to
3175 // custom parse the operand, or fallback to the general approach.
3176 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3177 if (ResTy == MatchOperand_Success)
3179 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3180 // there was a match, but an error occurred, in which case, just return that
3181 // the operand parsing failed.
3182 if (ResTy == MatchOperand_ParseFail)
3185 // Nothing custom, so do general case parsing.
3187 switch (getLexer().getKind()) {
3191 if (parseSymbolicImmVal(Expr))
3192 return Error(S, "invalid operand");
3194 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3195 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3198 case AsmToken::LBrac:
3199 return parseMemory(Operands);
3200 case AsmToken::LCurly:
3201 return parseVectorList(Operands);
3202 case AsmToken::Identifier: {
3203 // If we're expecting a Condition Code operand, then just parse that.
3205 return parseCondCode(Operands, invertCondCode);
3207 // If it's a register name, parse it.
3208 if (!parseRegister(Operands))
3211 // This could be an optional "shift" operand.
3212 if (!parseOptionalShift(Operands))
3215 // Or maybe it could be an optional "extend" operand.
3216 if (!parseOptionalExtend(Operands))
3219 // This was not a register so parse other operands that start with an
3220 // identifier (like labels) as expressions and create them as immediates.
3221 const MCExpr *IdVal;
3223 if (getParser().parseExpression(IdVal))
3226 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3227 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3230 case AsmToken::Integer:
3231 case AsmToken::Real:
3232 case AsmToken::Hash: {
3233 // #42 -> immediate.
3235 if (getLexer().is(AsmToken::Hash))
3238 // The only Real that should come through here is a literal #0.0 for
3239 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3240 // so convert the value.
3241 const AsmToken &Tok = Parser.getTok();
3242 if (Tok.is(AsmToken::Real)) {
3243 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3244 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3245 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3246 return TokError("unexpected floating point literal");
3247 Parser.Lex(); // Eat the token.
3250 ARM64Operand::CreateToken("#0", false, S, getContext()));
3252 ARM64Operand::CreateToken(".0", false, S, getContext()));
3256 const MCExpr *ImmVal;
3257 if (parseSymbolicImmVal(ImmVal))
3260 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3261 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3267 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3269 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3270 StringRef Name, SMLoc NameLoc,
3271 OperandVector &Operands) {
3272 // Create the leading tokens for the mnemonic, split by '.' characters.
3273 size_t Start = 0, Next = Name.find('.');
3274 StringRef Head = Name.slice(Start, Next);
3276 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3277 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3278 return parseSysAlias(Head, NameLoc, Operands);
3281 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3284 // Handle condition codes for a branch mnemonic
3285 if (Head == "b" && Next != StringRef::npos) {
3287 Next = Name.find('.', Start + 1);
3288 Head = Name.slice(Start + 1, Next);
3290 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3291 (Head.data() - Name.data()));
3292 unsigned CC = parseCondCodeString(Head);
3293 if (CC == ARM64CC::Invalid)
3294 return Error(SuffixLoc, "invalid condition code");
3295 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3297 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3300 // Add the remaining tokens in the mnemonic.
3301 while (Next != StringRef::npos) {
3303 Next = Name.find('.', Start + 1);
3304 Head = Name.slice(Start, Next);
3305 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3306 (Head.data() - Name.data()) + 1);
3308 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3311 // Conditional compare instructions have a Condition Code operand, which needs
3312 // to be parsed and an immediate operand created.
3313 bool condCodeFourthOperand =
3314 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3315 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3316 Head == "csinc" || Head == "csinv" || Head == "csneg");
3318 // These instructions are aliases to some of the conditional select
3319 // instructions. However, the condition code is inverted in the aliased
3322 // FIXME: Is this the correct way to handle these? Or should the parser
3323 // generate the aliased instructions directly?
3324 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3325 bool condCodeThirdOperand =
3326 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3328 // Read the remaining operands.
3329 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3330 // Read the first operand.
3331 if (parseOperand(Operands, false, false)) {
3332 Parser.eatToEndOfStatement();
3337 while (getLexer().is(AsmToken::Comma)) {
3338 Parser.Lex(); // Eat the comma.
3340 // Parse and remember the operand.
3341 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3342 (N == 3 && condCodeThirdOperand) ||
3343 (N == 2 && condCodeSecondOperand),
3344 condCodeSecondOperand || condCodeThirdOperand)) {
3345 Parser.eatToEndOfStatement();
3353 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3354 SMLoc Loc = Parser.getTok().getLoc();
3355 Parser.eatToEndOfStatement();
3356 return Error(Loc, "unexpected token in argument list");
3359 Parser.Lex(); // Consume the EndOfStatement
3363 // FIXME: This entire function is a giant hack to provide us with decent
3364 // operand range validation/diagnostics until TableGen/MC can be extended
3365 // to support autogeneration of this kind of validation.
3366 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3367 SmallVectorImpl<SMLoc> &Loc) {
3368 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3369 // Check for indexed addressing modes w/ the base register being the
3370 // same as a destination/source register or pair load where
3371 // the Rt == Rt2. All of those are undefined behaviour.
3372 switch (Inst.getOpcode()) {
3373 case ARM64::LDPSWpre:
3374 case ARM64::LDPWpost:
3375 case ARM64::LDPWpre:
3376 case ARM64::LDPXpost:
3377 case ARM64::LDPXpre: {
3378 unsigned Rt = Inst.getOperand(0).getReg();
3379 unsigned Rt2 = Inst.getOperand(1).getReg();
3380 unsigned Rn = Inst.getOperand(2).getReg();
3381 if (RI->isSubRegisterEq(Rn, Rt))
3382 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3383 "is also a destination");
3384 if (RI->isSubRegisterEq(Rn, Rt2))
3385 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3386 "is also a destination");
3389 case ARM64::LDPDpost:
3390 case ARM64::LDPDpre:
3391 case ARM64::LDPQpost:
3392 case ARM64::LDPQpre:
3393 case ARM64::LDPSpost:
3394 case ARM64::LDPSpre:
3395 case ARM64::LDPSWpost:
3401 case ARM64::LDPXi: {
3402 unsigned Rt = Inst.getOperand(0).getReg();
3403 unsigned Rt2 = Inst.getOperand(1).getReg();
3405 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3408 case ARM64::STPDpost:
3409 case ARM64::STPDpre:
3410 case ARM64::STPQpost:
3411 case ARM64::STPQpre:
3412 case ARM64::STPSpost:
3413 case ARM64::STPSpre:
3414 case ARM64::STPWpost:
3415 case ARM64::STPWpre:
3416 case ARM64::STPXpost:
3417 case ARM64::STPXpre: {
3418 unsigned Rt = Inst.getOperand(0).getReg();
3419 unsigned Rt2 = Inst.getOperand(1).getReg();
3420 unsigned Rn = Inst.getOperand(2).getReg();
3421 if (RI->isSubRegisterEq(Rn, Rt))
3422 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3423 "is also a source");
3424 if (RI->isSubRegisterEq(Rn, Rt2))
3425 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3426 "is also a source");
3429 case ARM64::LDRBBpre:
3430 case ARM64::LDRBpre:
3431 case ARM64::LDRHHpre:
3432 case ARM64::LDRHpre:
3433 case ARM64::LDRSBWpre:
3434 case ARM64::LDRSBXpre:
3435 case ARM64::LDRSHWpre:
3436 case ARM64::LDRSHXpre:
3437 case ARM64::LDRSWpre:
3438 case ARM64::LDRWpre:
3439 case ARM64::LDRXpre:
3440 case ARM64::LDRBBpost:
3441 case ARM64::LDRBpost:
3442 case ARM64::LDRHHpost:
3443 case ARM64::LDRHpost:
3444 case ARM64::LDRSBWpost:
3445 case ARM64::LDRSBXpost:
3446 case ARM64::LDRSHWpost:
3447 case ARM64::LDRSHXpost:
3448 case ARM64::LDRSWpost:
3449 case ARM64::LDRWpost:
3450 case ARM64::LDRXpost: {
3451 unsigned Rt = Inst.getOperand(0).getReg();
3452 unsigned Rn = Inst.getOperand(1).getReg();
3453 if (RI->isSubRegisterEq(Rn, Rt))
3454 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3455 "is also a source");
3458 case ARM64::STRBBpost:
3459 case ARM64::STRBpost:
3460 case ARM64::STRHHpost:
3461 case ARM64::STRHpost:
3462 case ARM64::STRWpost:
3463 case ARM64::STRXpost:
3464 case ARM64::STRBBpre:
3465 case ARM64::STRBpre:
3466 case ARM64::STRHHpre:
3467 case ARM64::STRHpre:
3468 case ARM64::STRWpre:
3469 case ARM64::STRXpre: {
3470 unsigned Rt = Inst.getOperand(0).getReg();
3471 unsigned Rn = Inst.getOperand(1).getReg();
3472 if (RI->isSubRegisterEq(Rn, Rt))
3473 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3474 "is also a source");
3479 // Now check immediate ranges. Separate from the above as there is overlap
3480 // in the instructions being checked and this keeps the nested conditionals
3482 switch (Inst.getOpcode()) {
3484 case ARM64::ANDSWrs:
3486 case ARM64::ORRWrs: {
3487 if (!Inst.getOperand(3).isImm())
3488 return Error(Loc[3], "immediate value expected");
3489 int64_t shifter = Inst.getOperand(3).getImm();
3490 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3491 if (ST == ARM64_AM::LSL && shifter > 31)
3492 return Error(Loc[3], "shift value out of range");
3495 case ARM64::ADDSWri:
3496 case ARM64::ADDSXri:
3499 case ARM64::SUBSWri:
3500 case ARM64::SUBSXri:
3502 case ARM64::SUBXri: {
3503 if (!Inst.getOperand(3).isImm())
3504 return Error(Loc[3], "immediate value expected");
3505 int64_t shifter = Inst.getOperand(3).getImm();
3506 if (shifter != 0 && shifter != 12)
3507 return Error(Loc[3], "shift value out of range");
3508 // The imm12 operand can be an expression. Validate that it's legit.
3509 // FIXME: We really, really want to allow arbitrary expressions here
3510 // and resolve the value and validate the result at fixup time, but
3511 // that's hard as we have long since lost any source information we
3512 // need to generate good diagnostics by that point.
3513 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3514 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3515 ARM64MCExpr::VariantKind ELFRefKind;
3516 MCSymbolRefExpr::VariantKind DarwinRefKind;
3517 const MCConstantExpr *Addend;
3518 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3519 return Error(Loc[2], "invalid immediate expression");
3522 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3523 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3524 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3525 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3526 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3527 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3528 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3529 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3530 // Note that we don't range-check the addend. It's adjusted
3531 // modulo page size when converted, so there is no "out of range"
3532 // condition when using @pageoff. Any validity checking for the value
3533 // was done in the is*() predicate function.
3535 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3536 // @gotpageoff can only be used directly, not with an addend.
3540 // Otherwise, we're not sure, so don't allow it for now.
3541 return Error(Loc[2], "invalid immediate expression");
3544 // If it's anything but an immediate, it's not legit.
3545 if (!Inst.getOperand(2).isImm())
3546 return Error(Loc[2], "invalid immediate expression");
3547 int64_t imm = Inst.getOperand(2).getImm();
3548 if (imm > 4095 || imm < 0)
3549 return Error(Loc[2], "immediate value out of range");
3552 case ARM64::LDRBpre:
3553 case ARM64::LDRHpre:
3554 case ARM64::LDRSBWpre:
3555 case ARM64::LDRSBXpre:
3556 case ARM64::LDRSHWpre:
3557 case ARM64::LDRSHXpre:
3558 case ARM64::LDRWpre:
3559 case ARM64::LDRXpre:
3560 case ARM64::LDRSpre:
3561 case ARM64::LDRDpre:
3562 case ARM64::LDRQpre:
3563 case ARM64::STRBpre:
3564 case ARM64::STRHpre:
3565 case ARM64::STRWpre:
3566 case ARM64::STRXpre:
3567 case ARM64::STRSpre:
3568 case ARM64::STRDpre:
3569 case ARM64::STRQpre:
3570 case ARM64::LDRBpost:
3571 case ARM64::LDRHpost:
3572 case ARM64::LDRSBWpost:
3573 case ARM64::LDRSBXpost:
3574 case ARM64::LDRSHWpost:
3575 case ARM64::LDRSHXpost:
3576 case ARM64::LDRWpost:
3577 case ARM64::LDRXpost:
3578 case ARM64::LDRSpost:
3579 case ARM64::LDRDpost:
3580 case ARM64::LDRQpost:
3581 case ARM64::STRBpost:
3582 case ARM64::STRHpost:
3583 case ARM64::STRWpost:
3584 case ARM64::STRXpost:
3585 case ARM64::STRSpost:
3586 case ARM64::STRDpost:
3587 case ARM64::STRQpost:
3592 case ARM64::LDTRSHWi:
3593 case ARM64::LDTRSHXi:
3594 case ARM64::LDTRSBWi:
3595 case ARM64::LDTRSBXi:
3596 case ARM64::LDTRSWi:
3608 case ARM64::LDURSHWi:
3609 case ARM64::LDURSHXi:
3610 case ARM64::LDURSBWi:
3611 case ARM64::LDURSBXi:
3612 case ARM64::LDURSWi:
3620 case ARM64::STURBi: {
3621 // FIXME: Should accept expressions and error in fixup evaluation
3623 if (!Inst.getOperand(2).isImm())
3624 return Error(Loc[1], "immediate value expected");
3625 int64_t offset = Inst.getOperand(2).getImm();
3626 if (offset > 255 || offset < -256)
3627 return Error(Loc[1], "offset value out of range");
3632 case ARM64::LDRSWro:
3634 case ARM64::STRSro: {
3635 // FIXME: Should accept expressions and error in fixup evaluation
3637 if (!Inst.getOperand(3).isImm())
3638 return Error(Loc[1], "immediate value expected");
3639 int64_t shift = Inst.getOperand(3).getImm();
3640 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3641 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3642 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3643 return Error(Loc[1], "shift type invalid");
3652 case ARM64::STRQro: {
3653 // FIXME: Should accept expressions and error in fixup evaluation
3655 if (!Inst.getOperand(3).isImm())
3656 return Error(Loc[1], "immediate value expected");
3657 int64_t shift = Inst.getOperand(3).getImm();
3658 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3659 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3660 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3661 return Error(Loc[1], "shift type invalid");
3665 case ARM64::LDRHHro:
3666 case ARM64::LDRSHWro:
3667 case ARM64::LDRSHXro:
3669 case ARM64::STRHHro: {
3670 // FIXME: Should accept expressions and error in fixup evaluation
3672 if (!Inst.getOperand(3).isImm())
3673 return Error(Loc[1], "immediate value expected");
3674 int64_t shift = Inst.getOperand(3).getImm();
3675 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3676 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3677 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3678 return Error(Loc[1], "shift type invalid");
3682 case ARM64::LDRBBro:
3683 case ARM64::LDRSBWro:
3684 case ARM64::LDRSBXro:
3686 case ARM64::STRBBro: {
3687 // FIXME: Should accept expressions and error in fixup evaluation
3689 if (!Inst.getOperand(3).isImm())
3690 return Error(Loc[1], "immediate value expected");
3691 int64_t shift = Inst.getOperand(3).getImm();
3692 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3693 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3694 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3695 return Error(Loc[1], "shift type invalid");
3709 case ARM64::LDPWpre:
3710 case ARM64::LDPXpre:
3711 case ARM64::LDPSpre:
3712 case ARM64::LDPDpre:
3713 case ARM64::LDPQpre:
3714 case ARM64::LDPSWpre:
3715 case ARM64::STPWpre:
3716 case ARM64::STPXpre:
3717 case ARM64::STPSpre:
3718 case ARM64::STPDpre:
3719 case ARM64::STPQpre:
3720 case ARM64::LDPWpost:
3721 case ARM64::LDPXpost:
3722 case ARM64::LDPSpost:
3723 case ARM64::LDPDpost:
3724 case ARM64::LDPQpost:
3725 case ARM64::LDPSWpost:
3726 case ARM64::STPWpost:
3727 case ARM64::STPXpost:
3728 case ARM64::STPSpost:
3729 case ARM64::STPDpost:
3730 case ARM64::STPQpost:
3740 case ARM64::STNPQi: {
3741 // FIXME: Should accept expressions and error in fixup evaluation
3743 if (!Inst.getOperand(3).isImm())
3744 return Error(Loc[2], "immediate value expected");
3745 int64_t offset = Inst.getOperand(3).getImm();
3746 if (offset > 63 || offset < -64)
3747 return Error(Loc[2], "offset value out of range");
3755 static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
3756 StringRef mnemonic, uint64_t imm, unsigned shift,
3757 MCContext &Context) {
3758 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3759 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3761 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3763 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3764 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3765 Op2->getEndLoc(), Context);
3767 Operands.push_back(ARM64Operand::CreateShifter(
3768 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3773 static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
3774 MCContext &Context) {
3775 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3776 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3778 ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
3780 const MCExpr *Imm = MCConstantExpr::Create(0, Context);
3781 Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
3782 Op2->getEndLoc(), Context));
3783 Operands.push_back(ARM64Operand::CreateShifter(
3784 ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3789 static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
3790 MCContext &Context) {
3791 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3792 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3794 ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
3796 // Operands[2] becomes Operands[3].
3797 Operands.push_back(Operands[2]);
3798 // And Operands[2] becomes ZR.
3799 unsigned ZeroReg = ARM64::XZR;
3800 if (isGPR32Register(Operands[2]->getReg()))
3801 ZeroReg = ARM64::WZR;
3804 ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
3805 Op2->getEndLoc(), Context);
3810 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3812 case Match_MissingFeature:
3814 "instruction requires a CPU feature not currently enabled");
3815 case Match_InvalidOperand:
3816 return Error(Loc, "invalid operand for instruction");
3817 case Match_InvalidSuffix:
3818 return Error(Loc, "invalid type suffix for instruction");
3819 case Match_InvalidMemoryIndexedSImm9:
3820 return Error(Loc, "index must be an integer in range [-256,255].");
3821 case Match_InvalidMemoryIndexed32SImm7:
3822 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
3823 case Match_InvalidMemoryIndexed64SImm7:
3824 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
3825 case Match_InvalidMemoryIndexed128SImm7:
3826 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
3827 case Match_InvalidMemoryIndexed8:
3828 return Error(Loc, "index must be an integer in range [0,4095].");
3829 case Match_InvalidMemoryIndexed16:
3830 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
3831 case Match_InvalidMemoryIndexed32:
3832 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
3833 case Match_InvalidMemoryIndexed64:
3834 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
3835 case Match_InvalidMemoryIndexed128:
3836 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
3837 case Match_InvalidImm1_8:
3838 return Error(Loc, "immediate must be an integer in range [1,8].");
3839 case Match_InvalidImm1_16:
3840 return Error(Loc, "immediate must be an integer in range [1,16].");
3841 case Match_InvalidImm1_32:
3842 return Error(Loc, "immediate must be an integer in range [1,32].");
3843 case Match_InvalidImm1_64:
3844 return Error(Loc, "immediate must be an integer in range [1,64].");
3845 case Match_InvalidLabel:
3846 return Error(Loc, "expected label or encodable integer pc offset");
3847 case Match_MnemonicFail:
3848 return Error(Loc, "unrecognized instruction mnemonic");
3850 assert(0 && "unexpected error code!");
3851 return Error(Loc, "invalid instruction format");
3855 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3856 OperandVector &Operands,
3858 unsigned &ErrorInfo,
3859 bool MatchingInlineAsm) {
3860 assert(!Operands.empty() && "Unexpect empty operand list!");
3861 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3862 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3864 StringRef Tok = Op->getToken();
3865 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3866 // This needs to be done before the special handling of ADD/SUB immediates.
3867 if (Tok == "cmp" || Tok == "cmn") {
3868 // Replace the opcode with either ADDS or SUBS.
3869 const char *Repl = StringSwitch<const char *>(Tok)
3870 .Case("cmp", "subs")
3871 .Case("cmn", "adds")
3873 assert(Repl && "Unknown compare instruction");
3875 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3877 // Insert WZR or XZR as destination operand.
3878 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3880 if (RegOp->isReg() && isGPR32Register(RegOp->getReg()))
3881 ZeroReg = ARM64::WZR;
3883 ZeroReg = ARM64::XZR;
3885 Operands.begin() + 1,
3886 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3887 // Update since we modified it above.
3888 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3889 Tok = Op->getToken();
3892 unsigned NumOperands = Operands.size();
3894 if (Tok == "mov" && NumOperands == 3) {
3895 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3896 // the immediate being instantiated.
3897 // FIXME: Catching this here is a total hack, and we should use tblgen
3898 // support to implement this instead as soon as it is available.
3900 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3901 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3903 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3904 uint64_t Val = CE->getValue();
3905 uint64_t NVal = ~Val;
3907 // If this is a 32-bit register and the value has none of the upper
3908 // set, clear the complemented upper 32-bits so the logic below works
3909 // for 32-bit registers too.
3910 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3911 if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
3912 (Val & 0xFFFFFFFFULL) == Val)
3913 NVal &= 0x00000000FFFFFFFFULL;
3915 // MOVK Rd, imm << 0
3916 if ((Val & 0xFFFF) == Val)
3917 rewriteMOVI(Operands, "movz", Val, 0, getContext());
3919 // MOVK Rd, imm << 16
3920 else if ((Val & 0xFFFF0000ULL) == Val)
3921 rewriteMOVI(Operands, "movz", Val, 16, getContext());
3923 // MOVK Rd, imm << 32
3924 else if ((Val & 0xFFFF00000000ULL) == Val)
3925 rewriteMOVI(Operands, "movz", Val, 32, getContext());
3927 // MOVK Rd, imm << 48
3928 else if ((Val & 0xFFFF000000000000ULL) == Val)
3929 rewriteMOVI(Operands, "movz", Val, 48, getContext());
3931 // MOVN Rd, (~imm << 0)
3932 else if ((NVal & 0xFFFFULL) == NVal)
3933 rewriteMOVI(Operands, "movn", NVal, 0, getContext());
3935 // MOVN Rd, ~(imm << 16)
3936 else if ((NVal & 0xFFFF0000ULL) == NVal)
3937 rewriteMOVI(Operands, "movn", NVal, 16, getContext());
3939 // MOVN Rd, ~(imm << 32)
3940 else if ((NVal & 0xFFFF00000000ULL) == NVal)
3941 rewriteMOVI(Operands, "movn", NVal, 32, getContext());
3943 // MOVN Rd, ~(imm << 48)
3944 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
3945 rewriteMOVI(Operands, "movn", NVal, 48, getContext());
3947 } else if (Op1->isReg() && Op2->isReg()) {
3949 unsigned Reg1 = Op1->getReg();
3950 unsigned Reg2 = Op2->getReg();
3951 if ((Reg1 == ARM64::SP && isGPR64Register(Reg2)) ||
3952 (Reg2 == ARM64::SP && isGPR64Register(Reg1)) ||
3953 (Reg1 == ARM64::WSP && isGPR32Register(Reg2)) ||
3954 (Reg2 == ARM64::WSP && isGPR32Register(Reg1)))
3955 rewriteMOVRSP(Operands, getContext());
3957 rewriteMOVR(Operands, getContext());
3959 } else if (NumOperands == 4) {
3960 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
3961 // Handle the uimm24 immediate form, where the shift is not specified.
3962 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3964 if (const MCConstantExpr *CE =
3965 dyn_cast<MCConstantExpr>(Op3->getImm())) {
3966 uint64_t Val = CE->getValue();
3967 if (Val >= (1 << 24)) {
3968 Error(IDLoc, "immediate value is too large");
3971 if (Val < (1 << 12)) {
3972 Operands.push_back(ARM64Operand::CreateShifter(
3973 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3974 } else if ((Val & 0xfff) == 0) {
3976 CE = MCConstantExpr::Create(Val >> 12, getContext());
3978 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
3979 Operands.push_back(ARM64Operand::CreateShifter(
3980 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
3982 Error(IDLoc, "immediate value is too large");
3986 Operands.push_back(ARM64Operand::CreateShifter(
3987 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3991 // FIXME: Horible hack to handle the LSL -> UBFM alias.
3992 } else if (NumOperands == 4 && Tok == "lsl") {
3993 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3994 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3995 if (Op2->isReg() && Op3->isImm()) {
3996 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3998 uint64_t Op3Val = Op3CE->getValue();
3999 uint64_t NewOp3Val = 0;
4000 uint64_t NewOp4Val = 0;
4001 if (isGPR32Register(Op2->getReg())) {
4002 NewOp3Val = (32 - Op3Val) & 0x1f;
4003 NewOp4Val = 31 - Op3Val;
4005 NewOp3Val = (64 - Op3Val) & 0x3f;
4006 NewOp4Val = 63 - Op3Val;
4009 const MCExpr *NewOp3 =
4010 MCConstantExpr::Create(NewOp3Val, getContext());
4011 const MCExpr *NewOp4 =
4012 MCConstantExpr::Create(NewOp4Val, getContext());
4014 Operands[0] = ARM64Operand::CreateToken(
4015 "ubfm", false, Op->getStartLoc(), getContext());
4016 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4017 Op3->getEndLoc(), getContext());
4018 Operands.push_back(ARM64Operand::CreateImm(
4019 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4025 // FIXME: Horrible hack to handle the optional LSL shift for vector
4027 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4028 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4029 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4030 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4031 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4032 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4033 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4034 IDLoc, getContext()));
4035 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4036 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4037 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4038 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4039 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4040 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4041 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4042 // Canonicalize on lower-case for ease of comparison.
4043 std::string CanonicalSuffix = Suffix.lower();
4044 if (Tok != "movi" ||
4045 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4046 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4047 Operands.push_back(ARM64Operand::CreateShifter(
4048 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4051 } else if (NumOperands == 5) {
4052 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4053 // UBFIZ -> UBFM aliases.
4054 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4055 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4056 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4057 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4059 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4060 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4061 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4063 if (Op3CE && Op4CE) {
4064 uint64_t Op3Val = Op3CE->getValue();
4065 uint64_t Op4Val = Op4CE->getValue();
4067 uint64_t NewOp3Val = 0;
4068 if (isGPR32Register(Op1->getReg()))
4069 NewOp3Val = (32 - Op3Val) & 0x1f;
4071 NewOp3Val = (64 - Op3Val) & 0x3f;
4073 uint64_t NewOp4Val = Op4Val - 1;
4075 const MCExpr *NewOp3 =
4076 MCConstantExpr::Create(NewOp3Val, getContext());
4077 const MCExpr *NewOp4 =
4078 MCConstantExpr::Create(NewOp4Val, getContext());
4079 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4080 Op3->getEndLoc(), getContext());
4081 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4082 Op4->getEndLoc(), getContext());
4084 Operands[0] = ARM64Operand::CreateToken(
4085 "bfm", false, Op->getStartLoc(), getContext());
4086 else if (Tok == "sbfiz")
4087 Operands[0] = ARM64Operand::CreateToken(
4088 "sbfm", false, Op->getStartLoc(), getContext());
4089 else if (Tok == "ubfiz")
4090 Operands[0] = ARM64Operand::CreateToken(
4091 "ubfm", false, Op->getStartLoc(), getContext());
4093 llvm_unreachable("No valid mnemonic for alias?");
4101 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4102 // UBFX -> UBFM aliases.
4103 } else if (NumOperands == 5 &&
4104 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4105 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4106 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4107 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4109 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4110 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4111 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4113 if (Op3CE && Op4CE) {
4114 uint64_t Op3Val = Op3CE->getValue();
4115 uint64_t Op4Val = Op4CE->getValue();
4116 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4118 if (NewOp4Val >= Op3Val) {
4119 const MCExpr *NewOp4 =
4120 MCConstantExpr::Create(NewOp4Val, getContext());
4121 Operands[4] = ARM64Operand::CreateImm(
4122 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4124 Operands[0] = ARM64Operand::CreateToken(
4125 "bfm", false, Op->getStartLoc(), getContext());
4126 else if (Tok == "sbfx")
4127 Operands[0] = ARM64Operand::CreateToken(
4128 "sbfm", false, Op->getStartLoc(), getContext());
4129 else if (Tok == "ubfx")
4130 Operands[0] = ARM64Operand::CreateToken(
4131 "ubfm", false, Op->getStartLoc(), getContext());
4133 llvm_unreachable("No valid mnemonic for alias?");
4142 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4143 // InstAlias can't quite handle this since the reg classes aren't
4145 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4146 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4148 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4149 if (OpCE->getValue() < 32) {
4150 // The source register can be Wn here, but the matcher expects a
4151 // GPR64. Twiddle it here if necessary.
4152 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4154 unsigned Reg = getXRegFromWReg(Op->getReg());
4155 Operands[1] = ARM64Operand::CreateReg(
4156 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4163 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4164 // InstAlias can't quite handle this since the reg classes aren't
4166 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4167 // The source register can be Wn here, but the matcher expects a
4168 // GPR64. Twiddle it here if necessary.
4169 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4171 unsigned Reg = getXRegFromWReg(Op->getReg());
4172 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4173 Op->getEndLoc(), getContext());
4177 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4178 else if (NumOperands == 3 &&
4179 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4180 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4181 if (Op->isReg() && isGPR64Register(Op->getReg())) {
4182 // The source register can be Wn here, but the matcher expects a
4183 // GPR64. Twiddle it here if necessary.
4184 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4186 unsigned Reg = getXRegFromWReg(Op->getReg());
4187 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4188 Op->getEndLoc(), getContext());
4194 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4195 if (NumOperands == 3 && Tok == "fmov") {
4196 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4197 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4198 if (RegOp->isReg() && ImmOp->isFPImm() &&
4199 ImmOp->getFPImm() == (unsigned)-1) {
4201 isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
4202 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4203 Op->getEndLoc(), getContext());
4208 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4209 // FMOV instructions. The index isn't an actual instruction operand
4210 // but rather syntactic sugar. It really should be part of the mnemonic,
4211 // not the operand, but whatever.
4212 if ((NumOperands == 5) && Tok == "fmov") {
4213 // If the last operand is a vectorindex of '1', then replace it with
4214 // a '[' '1' ']' token sequence, which is what the matcher
4215 // (annoyingly) expects for a literal vector index operand.
4216 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4217 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4218 SMLoc Loc = Op->getStartLoc();
4219 Operands.pop_back();
4222 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4224 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4226 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4227 } else if (Op->isReg()) {
4228 // Similarly, check the destination operand for the GPR->High-lane
4230 unsigned OpNo = NumOperands - 2;
4231 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4232 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4233 SMLoc Loc = Op->getStartLoc();
4235 ARM64Operand::CreateToken("[", false, Loc, getContext());
4237 Operands.begin() + OpNo + 1,
4238 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4240 Operands.begin() + OpNo + 2,
4241 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4248 // First try to match against the secondary set of tables containing the
4249 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4250 unsigned MatchResult =
4251 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4253 // If that fails, try against the alternate table containing long-form NEON:
4254 // "fadd v0.2s, v1.2s, v2.2s"
4255 if (MatchResult != Match_Success)
4257 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4259 switch (MatchResult) {
4260 case Match_Success: {
4261 // Perform range checking and other semantic validations
4262 SmallVector<SMLoc, 8> OperandLocs;
4263 NumOperands = Operands.size();
4264 for (unsigned i = 1; i < NumOperands; ++i)
4265 OperandLocs.push_back(Operands[i]->getStartLoc());
4266 if (validateInstruction(Inst, OperandLocs))
4270 Out.EmitInstruction(Inst, STI);
4273 case Match_MissingFeature:
4274 case Match_MnemonicFail:
4275 return showMatchError(IDLoc, MatchResult);
4276 case Match_InvalidOperand: {
4277 SMLoc ErrorLoc = IDLoc;
4278 if (ErrorInfo != ~0U) {
4279 if (ErrorInfo >= Operands.size())
4280 return Error(IDLoc, "too few operands for instruction");
4282 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4283 if (ErrorLoc == SMLoc())
4286 // If the match failed on a suffix token operand, tweak the diagnostic
4288 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4289 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4290 MatchResult = Match_InvalidSuffix;
4292 return showMatchError(ErrorLoc, MatchResult);
4294 case Match_InvalidMemoryIndexedSImm9: {
4295 // If there is not a '!' after the memory operand that failed, we really
4296 // want the diagnostic for the non-pre-indexed instruction variant instead.
4297 // Be careful to check for the post-indexed variant as well, which also
4298 // uses this match diagnostic. Also exclude the explicitly unscaled
4299 // mnemonics, as they want the unscaled diagnostic as well.
4300 if (Operands.size() == ErrorInfo + 1 &&
4301 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4302 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4303 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4304 // the register class of the previous operand. Default to 64 in case
4305 // we see something unexpected.
4306 MatchResult = Match_InvalidMemoryIndexed64;
4308 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4309 if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
4310 .contains(PrevOp->getReg()))
4311 MatchResult = Match_InvalidMemoryIndexed32;
4314 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4315 if (ErrorLoc == SMLoc())
4317 return showMatchError(ErrorLoc, MatchResult);
4319 case Match_InvalidMemoryIndexed32:
4320 case Match_InvalidMemoryIndexed64:
4321 case Match_InvalidMemoryIndexed128:
4322 // If there is a '!' after the memory operand that failed, we really
4323 // want the diagnostic for the pre-indexed instruction variant instead.
4324 if (Operands.size() > ErrorInfo + 1 &&
4325 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4326 MatchResult = Match_InvalidMemoryIndexedSImm9;
4328 case Match_InvalidMemoryIndexed8:
4329 case Match_InvalidMemoryIndexed16:
4330 case Match_InvalidMemoryIndexed32SImm7:
4331 case Match_InvalidMemoryIndexed64SImm7:
4332 case Match_InvalidMemoryIndexed128SImm7:
4333 case Match_InvalidImm1_8:
4334 case Match_InvalidImm1_16:
4335 case Match_InvalidImm1_32:
4336 case Match_InvalidImm1_64:
4337 case Match_InvalidLabel: {
4338 // Any time we get here, there's nothing fancy to do. Just get the
4339 // operand SMLoc and display the diagnostic.
4340 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4341 // If it's a memory operand, the error is with the offset immediate,
4342 // so get that location instead.
4343 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4344 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4345 if (ErrorLoc == SMLoc())
4347 return showMatchError(ErrorLoc, MatchResult);
4351 llvm_unreachable("Implement any new match types added!");
4355 /// ParseDirective parses the arm specific directives
4356 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4357 StringRef IDVal = DirectiveID.getIdentifier();
4358 SMLoc Loc = DirectiveID.getLoc();
4359 if (IDVal == ".hword")
4360 return parseDirectiveWord(2, Loc);
4361 if (IDVal == ".word")
4362 return parseDirectiveWord(4, Loc);
4363 if (IDVal == ".xword")
4364 return parseDirectiveWord(8, Loc);
4365 if (IDVal == ".tlsdesccall")
4366 return parseDirectiveTLSDescCall(Loc);
4368 return parseDirectiveLOH(IDVal, Loc);
4371 /// parseDirectiveWord
4372 /// ::= .word [ expression (, expression)* ]
4373 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4374 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4376 const MCExpr *Value;
4377 if (getParser().parseExpression(Value))
4380 getParser().getStreamer().EmitValue(Value, Size);
4382 if (getLexer().is(AsmToken::EndOfStatement))
4385 // FIXME: Improve diagnostic.
4386 if (getLexer().isNot(AsmToken::Comma))
4387 return Error(L, "unexpected token in directive");
4396 // parseDirectiveTLSDescCall:
4397 // ::= .tlsdesccall symbol
4398 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4400 if (getParser().parseIdentifier(Name))
4401 return Error(L, "expected symbol after directive");
4403 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4404 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4405 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4408 Inst.setOpcode(ARM64::TLSDESCCALL);
4409 Inst.addOperand(MCOperand::CreateExpr(Expr));
4411 getParser().getStreamer().EmitInstruction(Inst, STI);
4415 /// ::= .loh <lohName | lohId> label1, ..., labelN
4416 /// The number of arguments depends on the loh identifier.
4417 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4418 if (IDVal != MCLOHDirectiveName())
4421 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4422 if (getParser().getTok().isNot(AsmToken::Integer))
4423 return TokError("expected an identifier or a number in directive");
4424 // We successfully get a numeric value for the identifier.
4425 // Check if it is valid.
4426 int64_t Id = getParser().getTok().getIntVal();
4427 Kind = (MCLOHType)Id;
4428 // Check that Id does not overflow MCLOHType.
4429 if (!isValidMCLOHType(Kind) || Id != Kind)
4430 return TokError("invalid numeric identifier in directive");
4432 StringRef Name = getTok().getIdentifier();
4433 // We successfully parse an identifier.
4434 // Check if it is a recognized one.
4435 int Id = MCLOHNameToId(Name);
4438 return TokError("invalid identifier in directive");
4439 Kind = (MCLOHType)Id;
4441 // Consume the identifier.
4443 // Get the number of arguments of this LOH.
4444 int NbArgs = MCLOHIdToNbArgs(Kind);
4446 assert(NbArgs != -1 && "Invalid number of arguments");
4448 SmallVector<MCSymbol *, 3> Args;
4449 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4451 if (getParser().parseIdentifier(Name))
4452 return TokError("expected identifier in directive");
4453 Args.push_back(getContext().GetOrCreateSymbol(Name));
4455 if (Idx + 1 == NbArgs)
4457 if (getLexer().isNot(AsmToken::Comma))
4458 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4461 if (getLexer().isNot(AsmToken::EndOfStatement))
4462 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4464 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4469 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4470 ARM64MCExpr::VariantKind &ELFRefKind,
4471 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4472 const MCConstantExpr *&Addend) {
4473 ELFRefKind = ARM64MCExpr::VK_INVALID;
4474 DarwinRefKind = MCSymbolRefExpr::VK_None;
4476 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4477 ELFRefKind = AE->getKind();
4478 Expr = AE->getSubExpr();
4481 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4483 // It's a simple symbol reference with no addend.
4484 DarwinRefKind = SE->getKind();
4489 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4493 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4496 DarwinRefKind = SE->getKind();
4498 if (BE->getOpcode() != MCBinaryExpr::Add)
4501 // See if the addend is is a constant, otherwise there's more going
4502 // on here than we can deal with.
4503 Addend = dyn_cast<MCConstantExpr>(BE->getRHS());
4507 // It's some symbol reference + a constant addend, but really
4508 // shouldn't use both Darwin and ELF syntax.
4509 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4510 DarwinRefKind == MCSymbolRefExpr::VK_None;
4513 /// Force static initialization.
4514 extern "C" void LLVMInitializeARM64AsmParser() {
4515 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
4518 #define GET_REGISTER_MATCHER
4519 #define GET_MATCHER_IMPLEMENTATION
4520 #include "ARM64GenAsmMatcher.inc"
4522 // Define this matcher function after the auto-generated include so we
4523 // have the match class enum definitions.
4524 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4526 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4527 // If the kind is a token for a literal immediate, check if our asm
4528 // operand matches. This is for InstAliases which have a fixed-value
4529 // immediate in the syntax.
4530 int64_t ExpectedVal;
4533 return Match_InvalidOperand;
4575 return Match_InvalidOperand;
4576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4578 return Match_InvalidOperand;
4579 if (CE->getValue() == ExpectedVal)
4580 return Match_Success;
4581 return Match_InvalidOperand;