1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseRegister(OperandVector &Operands);
60 bool parseMemory(OperandVector &Operands);
61 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
62 bool parseVectorList(OperandVector &Operands);
63 bool parseOperand(OperandVector &Operands, bool isCondCode,
66 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
67 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
68 bool showMatchError(SMLoc Loc, unsigned ErrCode);
70 bool parseDirectiveWord(unsigned Size, SMLoc L);
71 bool parseDirectiveTLSDescCall(SMLoc L);
73 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
75 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
76 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77 OperandVector &Operands, MCStreamer &Out,
79 bool MatchingInlineAsm) override;
80 /// @name Auto-generated Match Functions
83 #define GET_ASSEMBLER_HEADER
84 #include "ARM64GenAsmMatcher.inc"
88 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
99 bool tryParseVectorRegister(OperandVector &Operands);
102 enum ARM64MatchResultTy {
103 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
104 #define GET_OPERAND_DIAGNOSTIC_TYPES
105 #include "ARM64GenAsmMatcher.inc"
107 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
108 const MCInstrInfo &MII,
109 const MCTargetOptions &Options)
110 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
111 MCAsmParserExtension::Initialize(_Parser);
113 // Initialize the set of available features.
114 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
117 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
118 SMLoc NameLoc, OperandVector &Operands) override;
119 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
120 bool ParseDirective(AsmToken DirectiveID) override;
121 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
122 unsigned Kind) override;
124 static bool classifySymbolRef(const MCExpr *Expr,
125 ARM64MCExpr::VariantKind &ELFRefKind,
126 MCSymbolRefExpr::VariantKind &DarwinRefKind,
129 } // end anonymous namespace
133 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
135 class ARM64Operand : public MCParsedAsmOperand {
138 ImmediateOffset, // pre-indexed, no writeback
139 RegisterOffset // register offset, with optional extend
159 SMLoc StartLoc, EndLoc, OffsetLoc;
164 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
172 struct VectorListOp {
175 unsigned NumElements;
176 unsigned ElementKind;
179 struct VectorIndexOp {
187 struct ShiftedImmOp {
189 unsigned ShiftAmount;
193 unsigned Val; // Encoded 8-bit representation.
197 unsigned Val; // Not the enum since not all values have names.
203 uint64_t FeatureBits; // We need to pass through information about which
204 // core we are compiling for so that the SysReg
205 // Mappers can appropriately conditionalize.
216 struct ShiftExtendOp {
217 ARM64_AM::ShiftExtendType Type;
225 // This is for all forms of ARM64 address expressions
227 unsigned BaseRegNum, OffsetRegNum;
228 ARM64_AM::ShiftExtendType ExtType;
231 const MCExpr *OffsetImm;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct FPImmOp FPImm;
243 struct BarrierOp Barrier;
244 struct SysRegOp SysReg;
245 struct SysCRImmOp SysCRImm;
246 struct PrefetchOp Prefetch;
247 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
255 ARM64Operand(KindTy K, MCContext &_Ctx)
256 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 StartLoc = o.StartLoc;
271 ShiftedImm = o.ShiftedImm;
283 VectorList = o.VectorList;
286 VectorIndex = o.VectorIndex;
292 SysCRImm = o.SysCRImm;
295 Prefetch = o.Prefetch;
301 ShiftExtend = o.ShiftExtend;
306 /// getStartLoc - Get the location of the first token of this operand.
307 SMLoc getStartLoc() const override { return StartLoc; }
308 /// getEndLoc - Get the location of the last token of this operand.
309 SMLoc getEndLoc() const override { return EndLoc; }
310 /// getOffsetLoc - Get the location of the offset of this memory operand.
311 SMLoc getOffsetLoc() const { return OffsetLoc; }
313 StringRef getToken() const {
314 assert(Kind == k_Token && "Invalid access!");
315 return StringRef(Tok.Data, Tok.Length);
318 bool isTokenSuffix() const {
319 assert(Kind == k_Token && "Invalid access!");
323 const MCExpr *getImm() const {
324 assert(Kind == k_Immediate && "Invalid access!");
328 const MCExpr *getShiftedImmVal() const {
329 assert(Kind == k_ShiftedImm && "Invalid access!");
330 return ShiftedImm.Val;
333 unsigned getShiftedImmShift() const {
334 assert(Kind == k_ShiftedImm && "Invalid access!");
335 return ShiftedImm.ShiftAmount;
338 unsigned getFPImm() const {
339 assert(Kind == k_FPImm && "Invalid access!");
343 unsigned getBarrier() const {
344 assert(Kind == k_Barrier && "Invalid access!");
348 unsigned getReg() const override {
349 assert(Kind == k_Register && "Invalid access!");
353 unsigned getVectorListStart() const {
354 assert(Kind == k_VectorList && "Invalid access!");
355 return VectorList.RegNum;
358 unsigned getVectorListCount() const {
359 assert(Kind == k_VectorList && "Invalid access!");
360 return VectorList.Count;
363 unsigned getVectorIndex() const {
364 assert(Kind == k_VectorIndex && "Invalid access!");
365 return VectorIndex.Val;
368 StringRef getSysReg() const {
369 assert(Kind == k_SysReg && "Invalid access!");
370 return StringRef(SysReg.Data, SysReg.Length);
373 uint64_t getSysRegFeatureBits() const {
374 assert(Kind == k_SysReg && "Invalid access!");
375 return SysReg.FeatureBits;
378 unsigned getSysCR() const {
379 assert(Kind == k_SysCR && "Invalid access!");
383 unsigned getPrefetch() const {
384 assert(Kind == k_Prefetch && "Invalid access!");
388 ARM64_AM::ShiftExtendType getShiftExtendType() const {
389 assert(Kind == k_ShiftExtend && "Invalid access!");
390 return ShiftExtend.Type;
393 unsigned getShiftExtendAmount() const {
394 assert(Kind == k_ShiftExtend && "Invalid access!");
395 return ShiftExtend.Amount;
398 bool isImm() const override { return Kind == k_Immediate; }
399 bool isSImm9() const {
402 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
405 int64_t Val = MCE->getValue();
406 return (Val >= -256 && Val < 256);
408 bool isSImm7s4() const {
411 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
414 int64_t Val = MCE->getValue();
415 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
417 bool isSImm7s8() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
426 bool isSImm7s16() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
435 bool isImm0_7() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= 0 && Val < 8);
444 bool isImm1_8() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val > 0 && Val < 9);
453 bool isImm0_15() const {
456 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
459 int64_t Val = MCE->getValue();
460 return (Val >= 0 && Val < 16);
462 bool isImm1_16() const {
465 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
468 int64_t Val = MCE->getValue();
469 return (Val > 0 && Val < 17);
471 bool isImm0_31() const {
474 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
477 int64_t Val = MCE->getValue();
478 return (Val >= 0 && Val < 32);
480 bool isImm1_31() const {
483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
486 int64_t Val = MCE->getValue();
487 return (Val >= 1 && Val < 32);
489 bool isImm1_32() const {
492 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
495 int64_t Val = MCE->getValue();
496 return (Val >= 1 && Val < 33);
498 bool isImm0_63() const {
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 64);
507 bool isImm1_63() const {
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 int64_t Val = MCE->getValue();
514 return (Val >= 1 && Val < 64);
516 bool isImm1_64() const {
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 int64_t Val = MCE->getValue();
523 return (Val >= 1 && Val < 65);
525 bool isImm0_127() const {
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 int64_t Val = MCE->getValue();
532 return (Val >= 0 && Val < 128);
534 bool isImm0_255() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 256);
543 bool isImm0_65535() const {
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 int64_t Val = MCE->getValue();
550 return (Val >= 0 && Val < 65536);
552 bool isLogicalImm32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
560 bool isLogicalImm64() const {
563 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
566 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
568 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
569 bool isAddSubImm() const {
570 if (!isShiftedImm() && !isImm())
575 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
576 if (isShiftedImm()) {
577 unsigned Shift = ShiftedImm.ShiftAmount;
578 Expr = ShiftedImm.Val;
579 if (Shift != 0 && Shift != 12)
585 ARM64MCExpr::VariantKind ELFRefKind;
586 MCSymbolRefExpr::VariantKind DarwinRefKind;
588 if (ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind,
589 DarwinRefKind, Addend)) {
590 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
591 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
592 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
593 || ELFRefKind == ARM64MCExpr::VK_LO12
594 || ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12
595 || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12
596 || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC
597 || ELFRefKind == ARM64MCExpr::VK_TPREL_HI12
598 || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12
599 || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC
600 || ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12;
603 // Otherwise it should be a real immediate in range:
604 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
605 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
607 bool isSIMDImmType10() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
615 bool isBranchTarget26() const {
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 int64_t Val = MCE->getValue();
624 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
626 bool isPCRelLabel19() const {
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 int64_t Val = MCE->getValue();
635 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
637 bool isBranchTarget14() const {
640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
643 int64_t Val = MCE->getValue();
646 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
649 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
653 ARM64MCExpr::VariantKind ELFRefKind;
654 MCSymbolRefExpr::VariantKind DarwinRefKind;
656 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
660 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
663 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
664 if (ELFRefKind == AllowedModifiers[i])
671 bool isMovZSymbolG3() const {
672 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
673 return isMovWSymbol(Variants);
676 bool isMovZSymbolG2() const {
677 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
678 ARM64MCExpr::VK_ABS_G2_S,
679 ARM64MCExpr::VK_TPREL_G2,
680 ARM64MCExpr::VK_DTPREL_G2 };
681 return isMovWSymbol(Variants);
684 bool isMovZSymbolG1() const {
685 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
686 ARM64MCExpr::VK_ABS_G1_S,
687 ARM64MCExpr::VK_GOTTPREL_G1,
688 ARM64MCExpr::VK_TPREL_G1,
689 ARM64MCExpr::VK_DTPREL_G1, };
690 return isMovWSymbol(Variants);
693 bool isMovZSymbolG0() const {
694 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
695 ARM64MCExpr::VK_ABS_G0_S,
696 ARM64MCExpr::VK_TPREL_G0,
697 ARM64MCExpr::VK_DTPREL_G0 };
698 return isMovWSymbol(Variants);
701 bool isMovKSymbolG3() const {
702 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
703 return isMovWSymbol(Variants);
706 bool isMovKSymbolG2() const {
707 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
708 return isMovWSymbol(Variants);
711 bool isMovKSymbolG1() const {
712 static ARM64MCExpr::VariantKind Variants[] = {
713 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
714 ARM64MCExpr::VK_DTPREL_G1_NC
716 return isMovWSymbol(Variants);
719 bool isMovKSymbolG0() const {
720 static ARM64MCExpr::VariantKind Variants[] = {
721 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
722 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
724 return isMovWSymbol(Variants);
727 template<int RegWidth, int Shift>
728 bool isMOVZMovAlias() const {
729 if (!isImm()) return false;
731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732 if (!CE) return false;
733 uint64_t Value = CE->getValue();
736 Value &= 0xffffffffULL;
738 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
739 if (Value == 0 && Shift != 0)
742 return (Value & ~(0xffffULL << Shift)) == 0;
745 template<int RegWidth, int Shift>
746 bool isMOVNMovAlias() const {
747 if (!isImm()) return false;
749 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750 if (!CE) return false;
751 uint64_t Value = CE->getValue();
753 // MOVZ takes precedence over MOVN.
754 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
755 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
760 Value &= 0xffffffffULL;
762 return (Value & ~(0xffffULL << Shift)) == 0;
765 bool isFPImm() const { return Kind == k_FPImm; }
766 bool isBarrier() const { return Kind == k_Barrier; }
767 bool isSysReg() const { return Kind == k_SysReg; }
768 bool isMRSSystemRegister() const {
769 if (!isSysReg()) return false;
771 bool IsKnownRegister;
772 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
773 Mapper.fromString(getSysReg(), IsKnownRegister);
775 return IsKnownRegister;
777 bool isMSRSystemRegister() const {
778 if (!isSysReg()) return false;
780 bool IsKnownRegister;
781 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
782 Mapper.fromString(getSysReg(), IsKnownRegister);
784 return IsKnownRegister;
786 bool isSystemPStateField() const {
787 if (!isSysReg()) return false;
789 bool IsKnownRegister;
790 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
792 return IsKnownRegister;
794 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
795 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
796 bool isVectorRegLo() const {
797 return Kind == k_Register && Reg.isVector &&
798 ARM64MCRegisterClasses[ARM64::FPR128_loRegClassID].contains(Reg.RegNum);
801 /// Is this a vector list with the type implicit (presumably attached to the
802 /// instruction itself)?
803 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
804 return Kind == k_VectorList && VectorList.Count == NumRegs &&
805 !VectorList.ElementKind;
808 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
809 bool isTypedVectorList() const {
810 if (Kind != k_VectorList)
812 if (VectorList.Count != NumRegs)
814 if (VectorList.ElementKind != ElementKind)
816 return VectorList.NumElements == NumElements;
819 bool isVectorIndexB() const {
820 return Kind == k_VectorIndex && VectorIndex.Val < 16;
822 bool isVectorIndexH() const {
823 return Kind == k_VectorIndex && VectorIndex.Val < 8;
825 bool isVectorIndexS() const {
826 return Kind == k_VectorIndex && VectorIndex.Val < 4;
828 bool isVectorIndexD() const {
829 return Kind == k_VectorIndex && VectorIndex.Val < 2;
831 bool isToken() const override { return Kind == k_Token; }
832 bool isTokenEqual(StringRef Str) const {
833 return Kind == k_Token && getToken() == Str;
835 bool isMem() const override { return Kind == k_Memory; }
836 bool isSysCR() const { return Kind == k_SysCR; }
837 bool isPrefetch() const { return Kind == k_Prefetch; }
838 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
839 bool isShifter() const {
840 if (!isShiftExtend())
843 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
844 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
845 ST == ARM64_AM::ROR || ST == ARM64_AM::MSL);
847 bool isExtend() const {
848 if (!isShiftExtend())
851 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
852 return (ET == ARM64_AM::UXTB || ET == ARM64_AM::SXTB ||
853 ET == ARM64_AM::UXTH || ET == ARM64_AM::SXTH ||
854 ET == ARM64_AM::UXTW || ET == ARM64_AM::SXTW ||
855 ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX ||
856 ET == ARM64_AM::LSL) &&
857 getShiftExtendAmount() <= 4;
860 bool isExtend64() const {
863 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
864 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
865 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
867 bool isExtendLSL64() const {
870 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
871 return (ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX || ET == ARM64_AM::LSL) &&
872 getShiftExtendAmount() <= 4;
875 template <unsigned width>
876 bool isArithmeticShifter() const {
880 // An arithmetic shifter is LSL, LSR, or ASR.
881 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
882 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR ||
883 ST == ARM64_AM::ASR) && getShiftExtendAmount() < width;
886 template <unsigned width>
887 bool isLogicalShifter() const {
891 // A logical shifter is LSL, LSR, ASR or ROR.
892 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
893 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
894 ST == ARM64_AM::ROR) &&
895 getShiftExtendAmount() < width;
898 bool isMovImm32Shifter() const {
902 // A MOVi shifter is LSL of 0, 16, 32, or 48.
903 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
904 if (ST != ARM64_AM::LSL)
906 uint64_t Val = getShiftExtendAmount();
907 return (Val == 0 || Val == 16);
910 bool isMovImm64Shifter() const {
914 // A MOVi shifter is LSL of 0 or 16.
915 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
916 if (ST != ARM64_AM::LSL)
918 uint64_t Val = getShiftExtendAmount();
919 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
922 bool isLogicalVecShifter() const {
926 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
927 unsigned Shift = getShiftExtendAmount();
928 return getShiftExtendType() == ARM64_AM::LSL &&
929 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
932 bool isLogicalVecHalfWordShifter() const {
933 if (!isLogicalVecShifter())
936 // A logical vector shifter is a left shift by 0 or 8.
937 unsigned Shift = getShiftExtendAmount();
938 return getShiftExtendType() == ARM64_AM::LSL && (Shift == 0 || Shift == 8);
941 bool isMoveVecShifter() const {
942 if (!isShiftExtend())
945 // A logical vector shifter is a left shift by 8 or 16.
946 unsigned Shift = getShiftExtendAmount();
947 return getShiftExtendType() == ARM64_AM::MSL && (Shift == 8 || Shift == 16);
950 bool isMemoryRegisterOffset8() const {
951 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
954 bool isMemoryRegisterOffset16() const {
955 return isMem() && Mem.Mode == RegisterOffset &&
956 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
959 bool isMemoryRegisterOffset32() const {
960 return isMem() && Mem.Mode == RegisterOffset &&
961 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
964 bool isMemoryRegisterOffset64() const {
965 return isMem() && Mem.Mode == RegisterOffset &&
966 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
969 bool isMemoryRegisterOffset128() const {
970 return isMem() && Mem.Mode == RegisterOffset &&
971 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
974 bool isMemoryUnscaled() const {
977 if (Mem.Mode != ImmediateOffset)
981 // Make sure the immediate value is valid.
982 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
985 // The offset must fit in a signed 9-bit unscaled immediate.
986 int64_t Value = CE->getValue();
987 return (Value >= -256 && Value < 256);
989 // Fallback unscaled operands are for aliases of LDR/STR that fall back
990 // to LDUR/STUR when the offset is not legal for the former but is for
991 // the latter. As such, in addition to checking for being a legal unscaled
992 // address, also check that it is not a legal scaled address. This avoids
993 // ambiguity in the matcher.
994 bool isMemoryUnscaledFB8() const {
995 return isMemoryUnscaled() && !isMemoryIndexed8();
997 bool isMemoryUnscaledFB16() const {
998 return isMemoryUnscaled() && !isMemoryIndexed16();
1000 bool isMemoryUnscaledFB32() const {
1001 return isMemoryUnscaled() && !isMemoryIndexed32();
1003 bool isMemoryUnscaledFB64() const {
1004 return isMemoryUnscaled() && !isMemoryIndexed64();
1006 bool isMemoryUnscaledFB128() const {
1007 return isMemoryUnscaled() && !isMemoryIndexed128();
1009 bool isMemoryIndexed(unsigned Scale) const {
1012 if (Mem.Mode != ImmediateOffset)
1016 // Make sure the immediate value is valid.
1017 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1020 // The offset must be a positive multiple of the scale and in range of
1021 // encoding with a 12-bit immediate.
1022 int64_t Value = CE->getValue();
1023 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
1026 // If it's not a constant, check for some expressions we know.
1027 const MCExpr *Expr = Mem.OffsetImm;
1028 ARM64MCExpr::VariantKind ELFRefKind;
1029 MCSymbolRefExpr::VariantKind DarwinRefKind;
1031 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1033 // If we don't understand the expression, assume the best and
1034 // let the fixup and relocation code deal with it.
1038 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1039 ELFRefKind == ARM64MCExpr::VK_LO12 ||
1040 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
1041 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
1042 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
1043 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
1044 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
1045 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
1046 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
1047 // Note that we don't range-check the addend. It's adjusted modulo page
1048 // size when converted, so there is no "out of range" condition when using
1050 return Addend >= 0 && (Addend % Scale) == 0;
1051 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
1052 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
1053 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
1059 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
1060 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
1061 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
1062 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
1063 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
1064 bool isMemoryNoIndex() const {
1067 if (Mem.Mode != ImmediateOffset)
1072 // Make sure the immediate value is valid. Only zero is allowed.
1073 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1074 if (!CE || CE->getValue() != 0)
1078 bool isMemorySIMDNoIndex() const {
1081 if (Mem.Mode != ImmediateOffset)
1083 return Mem.OffsetImm == nullptr;
1085 bool isMemoryIndexedSImm9() const {
1086 if (!isMem() || Mem.Mode != ImmediateOffset)
1090 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1091 assert(CE && "Non-constant pre-indexed offset!");
1092 int64_t Value = CE->getValue();
1093 return Value >= -256 && Value <= 255;
1095 bool isMemoryIndexed32SImm7() const {
1096 if (!isMem() || Mem.Mode != ImmediateOffset)
1100 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1101 assert(CE && "Non-constant pre-indexed offset!");
1102 int64_t Value = CE->getValue();
1103 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
1105 bool isMemoryIndexed64SImm7() const {
1106 if (!isMem() || Mem.Mode != ImmediateOffset)
1110 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1111 assert(CE && "Non-constant pre-indexed offset!");
1112 int64_t Value = CE->getValue();
1113 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
1115 bool isMemoryIndexed128SImm7() const {
1116 if (!isMem() || Mem.Mode != ImmediateOffset)
1120 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1121 assert(CE && "Non-constant pre-indexed offset!");
1122 int64_t Value = CE->getValue();
1123 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
1126 bool isAdrpLabel() const {
1127 // Validation was handled during parsing, so we just sanity check that
1128 // something didn't go haywire.
1132 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1133 int64_t Val = CE->getValue();
1134 int64_t Min = - (4096 * (1LL << (21 - 1)));
1135 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1136 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1142 bool isAdrLabel() const {
1143 // Validation was handled during parsing, so we just sanity check that
1144 // something didn't go haywire.
1148 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1149 int64_t Val = CE->getValue();
1150 int64_t Min = - (1LL << (21 - 1));
1151 int64_t Max = ((1LL << (21 - 1)) - 1);
1152 return Val >= Min && Val <= Max;
1158 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1159 // Add as immediates when possible. Null MCExpr = 0.
1161 Inst.addOperand(MCOperand::CreateImm(0));
1162 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1163 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1165 Inst.addOperand(MCOperand::CreateExpr(Expr));
1168 void addRegOperands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1170 Inst.addOperand(MCOperand::CreateReg(getReg()));
1173 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1175 Inst.addOperand(MCOperand::CreateReg(getReg()));
1178 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!");
1180 Inst.addOperand(MCOperand::CreateReg(getReg()));
1183 template <unsigned NumRegs>
1184 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1185 assert(N == 1 && "Invalid number of operands!");
1186 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1187 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1188 unsigned FirstReg = FirstRegs[NumRegs - 1];
1191 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1194 template <unsigned NumRegs>
1195 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1198 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1199 unsigned FirstReg = FirstRegs[NumRegs - 1];
1202 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1205 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1206 assert(N == 1 && "Invalid number of operands!");
1207 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1210 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1215 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1220 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1221 assert(N == 1 && "Invalid number of operands!");
1222 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1225 void addImmOperands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 // If this is a pageoff symrefexpr with an addend, adjust the addend
1228 // to be only the page-offset portion. Otherwise, just add the expr
1230 addExpr(Inst, getImm());
1233 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1234 assert(N == 2 && "Invalid number of operands!");
1235 if (isShiftedImm()) {
1236 addExpr(Inst, getShiftedImmVal());
1237 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1239 addExpr(Inst, getImm());
1240 Inst.addOperand(MCOperand::CreateImm(0));
1244 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1248 addExpr(Inst, getImm());
1250 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1253 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1254 addImmOperands(Inst, N);
1257 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1258 assert(N == 1 && "Invalid number of operands!");
1259 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1260 assert(MCE && "Invalid constant immediate operand!");
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1264 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1267 assert(MCE && "Invalid constant immediate operand!");
1268 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1271 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1272 assert(N == 1 && "Invalid number of operands!");
1273 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1274 assert(MCE && "Invalid constant immediate operand!");
1275 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1278 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!");
1280 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1281 assert(MCE && "Invalid constant immediate operand!");
1282 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1285 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1286 assert(N == 1 && "Invalid number of operands!");
1287 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1288 assert(MCE && "Invalid constant immediate operand!");
1289 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1292 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1293 assert(N == 1 && "Invalid number of operands!");
1294 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1295 assert(MCE && "Invalid constant immediate operand!");
1296 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1299 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1300 assert(N == 1 && "Invalid number of operands!");
1301 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1302 assert(MCE && "Invalid constant immediate operand!");
1303 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1306 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1307 assert(N == 1 && "Invalid number of operands!");
1308 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1309 assert(MCE && "Invalid constant immediate operand!");
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1313 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1316 assert(MCE && "Invalid constant immediate operand!");
1317 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1320 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1321 assert(N == 1 && "Invalid number of operands!");
1322 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1323 assert(MCE && "Invalid constant immediate operand!");
1324 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1327 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1330 assert(MCE && "Invalid constant immediate operand!");
1331 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1334 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1337 assert(MCE && "Invalid constant immediate operand!");
1338 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1341 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1342 assert(N == 1 && "Invalid number of operands!");
1343 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1344 assert(MCE && "Invalid constant immediate operand!");
1345 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1348 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1351 assert(MCE && "Invalid constant immediate operand!");
1352 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1355 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1358 assert(MCE && "Invalid constant immediate operand!");
1359 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1362 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!");
1364 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1365 assert(MCE && "Invalid constant immediate operand!");
1366 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1369 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1370 assert(N == 1 && "Invalid number of operands!");
1371 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1372 assert(MCE && "Invalid constant immediate operand!");
1373 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1376 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1379 assert(MCE && "Invalid logical immediate operand!");
1380 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1381 Inst.addOperand(MCOperand::CreateImm(encoding));
1384 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1385 assert(N == 1 && "Invalid number of operands!");
1386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1387 assert(MCE && "Invalid logical immediate operand!");
1388 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1389 Inst.addOperand(MCOperand::CreateImm(encoding));
1392 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1395 assert(MCE && "Invalid immediate operand!");
1396 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1397 Inst.addOperand(MCOperand::CreateImm(encoding));
1400 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1401 // Branch operands don't encode the low bits, so shift them off
1402 // here. If it's a label, however, just put it on directly as there's
1403 // not enough information now to do anything.
1404 assert(N == 1 && "Invalid number of operands!");
1405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1407 addExpr(Inst, getImm());
1410 assert(MCE && "Invalid constant immediate operand!");
1411 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1414 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1415 // Branch operands don't encode the low bits, so shift them off
1416 // here. If it's a label, however, just put it on directly as there's
1417 // not enough information now to do anything.
1418 assert(N == 1 && "Invalid number of operands!");
1419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1421 addExpr(Inst, getImm());
1424 assert(MCE && "Invalid constant immediate operand!");
1425 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1428 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1429 // Branch operands don't encode the low bits, so shift them off
1430 // here. If it's a label, however, just put it on directly as there's
1431 // not enough information now to do anything.
1432 assert(N == 1 && "Invalid number of operands!");
1433 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1435 addExpr(Inst, getImm());
1438 assert(MCE && "Invalid constant immediate operand!");
1439 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1442 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1443 assert(N == 1 && "Invalid number of operands!");
1444 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1447 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1448 assert(N == 1 && "Invalid number of operands!");
1449 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1452 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1453 assert(N == 1 && "Invalid number of operands!");
1456 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
1457 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1459 Inst.addOperand(MCOperand::CreateImm(Bits));
1462 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1466 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
1467 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1469 Inst.addOperand(MCOperand::CreateImm(Bits));
1472 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1473 assert(N == 1 && "Invalid number of operands!");
1476 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1478 Inst.addOperand(MCOperand::CreateImm(Bits));
1481 void addSysCROperands(MCInst &Inst, unsigned N) const {
1482 assert(N == 1 && "Invalid number of operands!");
1483 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1486 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1487 assert(N == 1 && "Invalid number of operands!");
1488 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1491 void addShifterOperands(MCInst &Inst, unsigned N) const {
1492 assert(N == 1 && "Invalid number of operands!");
1494 ARM64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1495 Inst.addOperand(MCOperand::CreateImm(Imm));
1498 void addExtendOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1500 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1501 if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTW;
1502 unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1503 Inst.addOperand(MCOperand::CreateImm(Imm));
1506 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1507 assert(N == 1 && "Invalid number of operands!");
1508 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1509 if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTX;
1510 unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1511 Inst.addOperand(MCOperand::CreateImm(Imm));
1515 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1516 assert(N == 1 && "Invalid number of operands!");
1518 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1519 uint64_t Value = CE->getValue();
1520 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1524 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1525 assert(N == 1 && "Invalid number of operands!");
1527 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1528 uint64_t Value = CE->getValue();
1529 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1532 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1533 assert(N == 3 && "Invalid number of operands!");
1535 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1536 Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
1537 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1538 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1541 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1542 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1545 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1546 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1549 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1550 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1553 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1554 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1557 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1558 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1561 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1562 unsigned Scale) const {
1563 // Add the base register operand.
1564 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1566 if (!Mem.OffsetImm) {
1567 // There isn't an offset.
1568 Inst.addOperand(MCOperand::CreateImm(0));
1572 // Add the offset operand.
1573 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1574 assert(CE->getValue() % Scale == 0 &&
1575 "Offset operand must be multiple of the scale!");
1577 // The MCInst offset operand doesn't include the low bits (like the
1578 // instruction encoding).
1579 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1582 // If this is a pageoff symrefexpr with an addend, the linker will
1583 // do the scaling of the addend.
1585 // Otherwise we don't know what this is, so just add the scaling divide to
1586 // the expression and let the MC fixup evaluation code deal with it.
1587 const MCExpr *Expr = Mem.OffsetImm;
1588 ARM64MCExpr::VariantKind ELFRefKind;
1589 MCSymbolRefExpr::VariantKind DarwinRefKind;
1592 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1594 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1595 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1599 Inst.addOperand(MCOperand::CreateExpr(Expr));
1602 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1603 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1604 // Add the base register operand.
1605 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1607 // Add the offset operand.
1609 Inst.addOperand(MCOperand::CreateImm(0));
1611 // Only constant offsets supported.
1612 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1613 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1617 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1618 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1619 addMemoryIndexedOperands(Inst, N, 16);
1622 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1623 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1624 addMemoryIndexedOperands(Inst, N, 8);
1627 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1628 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1629 addMemoryIndexedOperands(Inst, N, 4);
1632 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1633 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1634 addMemoryIndexedOperands(Inst, N, 2);
1637 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1638 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1639 addMemoryIndexedOperands(Inst, N, 1);
1642 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1643 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1644 // Add the base register operand (the offset is always zero, so ignore it).
1645 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1648 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1649 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1650 // Add the base register operand (the offset is always zero, so ignore it).
1651 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1654 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1655 unsigned Scale) const {
1656 assert(N == 2 && "Invalid number of operands!");
1658 // Add the base register operand.
1659 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1661 // Add the offset operand.
1663 if (Mem.OffsetImm) {
1664 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1665 assert(CE && "Non-constant indexed offset operand!");
1666 Offset = CE->getValue();
1670 assert(Offset % Scale == 0 &&
1671 "Offset operand must be a multiple of the scale!");
1675 Inst.addOperand(MCOperand::CreateImm(Offset));
1678 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1679 addMemoryWritebackIndexedOperands(Inst, N, 1);
1682 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1683 addMemoryWritebackIndexedOperands(Inst, N, 4);
1686 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1687 addMemoryWritebackIndexedOperands(Inst, N, 8);
1690 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1691 addMemoryWritebackIndexedOperands(Inst, N, 16);
1694 void print(raw_ostream &OS) const override;
1696 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1698 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1699 Op->Tok.Data = Str.data();
1700 Op->Tok.Length = Str.size();
1701 Op->Tok.IsSuffix = IsSuffix;
1707 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1708 SMLoc E, MCContext &Ctx) {
1709 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1710 Op->Reg.RegNum = RegNum;
1711 Op->Reg.isVector = isVector;
1717 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1718 unsigned NumElements, char ElementKind,
1719 SMLoc S, SMLoc E, MCContext &Ctx) {
1720 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1721 Op->VectorList.RegNum = RegNum;
1722 Op->VectorList.Count = Count;
1723 Op->VectorList.NumElements = NumElements;
1724 Op->VectorList.ElementKind = ElementKind;
1730 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1732 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1733 Op->VectorIndex.Val = Idx;
1739 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1741 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1748 static ARM64Operand *CreateShiftedImm(const MCExpr *Val, unsigned ShiftAmount,
1749 SMLoc S, SMLoc E, MCContext &Ctx) {
1750 ARM64Operand *Op = new ARM64Operand(k_ShiftedImm, Ctx);
1751 Op->ShiftedImm .Val = Val;
1752 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1758 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1759 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1760 Op->FPImm.Val = Val;
1766 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1767 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1768 Op->Barrier.Val = Val;
1774 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S,
1775 uint64_t FeatureBits, MCContext &Ctx) {
1776 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1777 Op->SysReg.Data = Str.data();
1778 Op->SysReg.Length = Str.size();
1779 Op->SysReg.FeatureBits = FeatureBits;
1785 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1786 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1788 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1789 Op->Mem.BaseRegNum = BaseRegNum;
1790 Op->Mem.OffsetRegNum = 0;
1791 Op->Mem.OffsetImm = Off;
1792 Op->Mem.ExtType = ARM64_AM::UXTX;
1793 Op->Mem.ShiftVal = 0;
1794 Op->Mem.ExplicitShift = false;
1795 Op->Mem.Mode = ImmediateOffset;
1796 Op->OffsetLoc = OffsetLoc;
1802 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1803 ARM64_AM::ShiftExtendType ExtType,
1804 unsigned ShiftVal, bool ExplicitShift,
1805 SMLoc S, SMLoc E, MCContext &Ctx) {
1806 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1807 Op->Mem.BaseRegNum = BaseReg;
1808 Op->Mem.OffsetRegNum = OffsetReg;
1809 Op->Mem.OffsetImm = nullptr;
1810 Op->Mem.ExtType = ExtType;
1811 Op->Mem.ShiftVal = ShiftVal;
1812 Op->Mem.ExplicitShift = ExplicitShift;
1813 Op->Mem.Mode = RegisterOffset;
1819 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1821 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1822 Op->SysCRImm.Val = Val;
1828 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1829 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1830 Op->Prefetch.Val = Val;
1836 static ARM64Operand *CreateShiftExtend(ARM64_AM::ShiftExtendType ShOp, unsigned Val,
1837 SMLoc S, SMLoc E, MCContext &Ctx) {
1838 ARM64Operand *Op = new ARM64Operand(k_ShiftExtend, Ctx);
1839 Op->ShiftExtend.Type = ShOp;
1840 Op->ShiftExtend.Amount = Val;
1847 } // end anonymous namespace.
1849 void ARM64Operand::print(raw_ostream &OS) const {
1852 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1857 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1859 OS << "<barrier " << Name << ">";
1861 OS << "<barrier invalid #" << getBarrier() << ">";
1865 getImm()->print(OS);
1867 case k_ShiftedImm: {
1868 unsigned Shift = getShiftedImmShift();
1869 OS << "<shiftedimm ";
1870 getShiftedImmVal()->print(OS);
1871 OS << ", lsl #" << ARM64_AM::getShiftValue(Shift) << ">";
1878 OS << "<register " << getReg() << ">";
1880 case k_VectorList: {
1881 OS << "<vectorlist ";
1882 unsigned Reg = getVectorListStart();
1883 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1884 OS << Reg + i << " ";
1889 OS << "<vectorindex " << getVectorIndex() << ">";
1892 OS << "<sysreg: " << getSysReg() << '>';
1895 OS << "'" << getToken() << "'";
1898 OS << "c" << getSysCR();
1902 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1904 OS << "<prfop " << Name << ">";
1906 OS << "<prfop invalid #" << getPrefetch() << ">";
1909 case k_ShiftExtend: {
1910 OS << "<" << ARM64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1911 << getShiftExtendAmount() << ">";
1917 /// @name Auto-generated Match Functions
1920 static unsigned MatchRegisterName(StringRef Name);
1924 static unsigned matchVectorRegName(StringRef Name) {
1925 return StringSwitch<unsigned>(Name)
1926 .Case("v0", ARM64::Q0)
1927 .Case("v1", ARM64::Q1)
1928 .Case("v2", ARM64::Q2)
1929 .Case("v3", ARM64::Q3)
1930 .Case("v4", ARM64::Q4)
1931 .Case("v5", ARM64::Q5)
1932 .Case("v6", ARM64::Q6)
1933 .Case("v7", ARM64::Q7)
1934 .Case("v8", ARM64::Q8)
1935 .Case("v9", ARM64::Q9)
1936 .Case("v10", ARM64::Q10)
1937 .Case("v11", ARM64::Q11)
1938 .Case("v12", ARM64::Q12)
1939 .Case("v13", ARM64::Q13)
1940 .Case("v14", ARM64::Q14)
1941 .Case("v15", ARM64::Q15)
1942 .Case("v16", ARM64::Q16)
1943 .Case("v17", ARM64::Q17)
1944 .Case("v18", ARM64::Q18)
1945 .Case("v19", ARM64::Q19)
1946 .Case("v20", ARM64::Q20)
1947 .Case("v21", ARM64::Q21)
1948 .Case("v22", ARM64::Q22)
1949 .Case("v23", ARM64::Q23)
1950 .Case("v24", ARM64::Q24)
1951 .Case("v25", ARM64::Q25)
1952 .Case("v26", ARM64::Q26)
1953 .Case("v27", ARM64::Q27)
1954 .Case("v28", ARM64::Q28)
1955 .Case("v29", ARM64::Q29)
1956 .Case("v30", ARM64::Q30)
1957 .Case("v31", ARM64::Q31)
1961 static bool isValidVectorKind(StringRef Name) {
1962 return StringSwitch<bool>(Name.lower())
1972 // Accept the width neutral ones, too, for verbose syntax. If those
1973 // aren't used in the right places, the token operand won't match so
1974 // all will work out.
1982 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1983 char &ElementKind) {
1984 assert(isValidVectorKind(Name));
1986 ElementKind = Name.lower()[Name.size() - 1];
1989 if (Name.size() == 2)
1992 // Parse the lane count
1993 Name = Name.drop_front();
1994 while (isdigit(Name.front())) {
1995 NumElements = 10 * NumElements + (Name.front() - '0');
1996 Name = Name.drop_front();
2000 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2002 StartLoc = getLoc();
2003 RegNo = tryParseRegister();
2004 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2005 return (RegNo == (unsigned)-1);
2008 /// tryParseRegister - Try to parse a register name. The token must be an
2009 /// Identifier when called, and if it is a register name the token is eaten and
2010 /// the register is added to the operand list.
2011 int ARM64AsmParser::tryParseRegister() {
2012 const AsmToken &Tok = Parser.getTok();
2013 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2015 std::string lowerCase = Tok.getString().lower();
2016 unsigned RegNum = MatchRegisterName(lowerCase);
2017 // Also handle a few aliases of registers.
2019 RegNum = StringSwitch<unsigned>(lowerCase)
2020 .Case("fp", ARM64::FP)
2021 .Case("lr", ARM64::LR)
2022 .Case("x31", ARM64::XZR)
2023 .Case("w31", ARM64::WZR)
2029 Parser.Lex(); // Eat identifier token.
2033 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2034 /// kind specifier. If it is a register specifier, eat the token and return it.
2035 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2036 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2037 TokError("vector register expected");
2041 StringRef Name = Parser.getTok().getString();
2042 // If there is a kind specifier, it's separated from the register name by
2044 size_t Start = 0, Next = Name.find('.');
2045 StringRef Head = Name.slice(Start, Next);
2046 unsigned RegNum = matchVectorRegName(Head);
2048 if (Next != StringRef::npos) {
2049 Kind = Name.slice(Next, StringRef::npos);
2050 if (!isValidVectorKind(Kind)) {
2051 TokError("invalid vector kind qualifier");
2055 Parser.Lex(); // Eat the register token.
2060 TokError("vector register expected");
2064 static int MatchSysCRName(StringRef Name) {
2065 // Use the same layout as the tablegen'erated register name matcher. Ugly,
2067 switch (Name.size()) {
2071 if (Name[0] != 'c' && Name[0] != 'C')
2099 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
2120 llvm_unreachable("Unhandled SysCR operand string!");
2124 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2125 ARM64AsmParser::OperandMatchResultTy
2126 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2128 const AsmToken &Tok = Parser.getTok();
2129 if (Tok.isNot(AsmToken::Identifier))
2130 return MatchOperand_NoMatch;
2132 int Num = MatchSysCRName(Tok.getString());
2134 return MatchOperand_NoMatch;
2136 Parser.Lex(); // Eat identifier token.
2137 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2138 return MatchOperand_Success;
2141 /// tryParsePrefetch - Try to parse a prefetch operand.
2142 ARM64AsmParser::OperandMatchResultTy
2143 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2145 const AsmToken &Tok = Parser.getTok();
2146 // Either an identifier for named values or a 5-bit immediate.
2147 bool Hash = Tok.is(AsmToken::Hash);
2148 if (Hash || Tok.is(AsmToken::Integer)) {
2150 Parser.Lex(); // Eat hash token.
2151 const MCExpr *ImmVal;
2152 if (getParser().parseExpression(ImmVal))
2153 return MatchOperand_ParseFail;
2155 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2157 TokError("immediate value expected for prefetch operand");
2158 return MatchOperand_ParseFail;
2160 unsigned prfop = MCE->getValue();
2162 TokError("prefetch operand out of range, [0,31] expected");
2163 return MatchOperand_ParseFail;
2166 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2167 return MatchOperand_Success;
2170 if (Tok.isNot(AsmToken::Identifier)) {
2171 TokError("pre-fetch hint expected");
2172 return MatchOperand_ParseFail;
2176 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2178 TokError("pre-fetch hint expected");
2179 return MatchOperand_ParseFail;
2182 Parser.Lex(); // Eat identifier token.
2183 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2184 return MatchOperand_Success;
2187 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2189 ARM64AsmParser::OperandMatchResultTy
2190 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2194 if (Parser.getTok().is(AsmToken::Hash)) {
2195 Parser.Lex(); // Eat hash token.
2198 if (parseSymbolicImmVal(Expr))
2199 return MatchOperand_ParseFail;
2201 ARM64MCExpr::VariantKind ELFRefKind;
2202 MCSymbolRefExpr::VariantKind DarwinRefKind;
2204 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2205 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2206 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2207 // No modifier was specified at all; this is the syntax for an ELF basic
2208 // ADRP relocation (unfortunately).
2209 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2210 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2211 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2213 Error(S, "gotpage label reference not allowed an addend");
2214 return MatchOperand_ParseFail;
2215 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2216 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2217 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2218 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2219 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2220 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2221 // The operand must be an @page or @gotpage qualified symbolref.
2222 Error(S, "page or gotpage label reference expected");
2223 return MatchOperand_ParseFail;
2227 // We have either a label reference possibly with addend or an immediate. The
2228 // addend is a raw value here. The linker will adjust it to only reference the
2230 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2231 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2233 return MatchOperand_Success;
2236 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2238 ARM64AsmParser::OperandMatchResultTy
2239 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2243 if (Parser.getTok().is(AsmToken::Hash)) {
2244 Parser.Lex(); // Eat hash token.
2247 if (getParser().parseExpression(Expr))
2248 return MatchOperand_ParseFail;
2250 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2251 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2253 return MatchOperand_Success;
2256 /// tryParseFPImm - A floating point immediate expression operand.
2257 ARM64AsmParser::OperandMatchResultTy
2258 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2262 if (Parser.getTok().is(AsmToken::Hash)) {
2263 Parser.Lex(); // Eat '#'
2267 // Handle negation, as that still comes through as a separate token.
2268 bool isNegative = false;
2269 if (Parser.getTok().is(AsmToken::Minus)) {
2273 const AsmToken &Tok = Parser.getTok();
2274 if (Tok.is(AsmToken::Real)) {
2275 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2276 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2277 // If we had a '-' in front, toggle the sign bit.
2278 IntVal ^= (uint64_t)isNegative << 63;
2279 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2280 Parser.Lex(); // Eat the token.
2281 // Check for out of range values. As an exception, we let Zero through,
2282 // as we handle that special case in post-processing before matching in
2283 // order to use the zero register for it.
2284 if (Val == -1 && !RealVal.isZero()) {
2285 TokError("floating point value out of range");
2286 return MatchOperand_ParseFail;
2288 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2289 return MatchOperand_Success;
2291 if (Tok.is(AsmToken::Integer)) {
2293 if (!isNegative && Tok.getString().startswith("0x")) {
2294 Val = Tok.getIntVal();
2295 if (Val > 255 || Val < 0) {
2296 TokError("encoded floating point value out of range");
2297 return MatchOperand_ParseFail;
2300 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2301 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2302 // If we had a '-' in front, toggle the sign bit.
2303 IntVal ^= (uint64_t)isNegative << 63;
2304 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2306 Parser.Lex(); // Eat the token.
2307 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2308 return MatchOperand_Success;
2312 return MatchOperand_NoMatch;
2314 TokError("invalid floating point immediate");
2315 return MatchOperand_ParseFail;
2318 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2319 ARM64AsmParser::OperandMatchResultTy
2320 ARM64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2323 if (Parser.getTok().is(AsmToken::Hash))
2324 Parser.Lex(); // Eat '#'
2325 else if (Parser.getTok().isNot(AsmToken::Integer))
2326 // Operand should start from # or should be integer, emit error otherwise.
2327 return MatchOperand_NoMatch;
2330 if (parseSymbolicImmVal(Imm))
2331 return MatchOperand_ParseFail;
2332 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2333 uint64_t ShiftAmount = 0;
2334 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2336 int64_t Val = MCE->getValue();
2337 if (Val > 0xfff && (Val & 0xfff) == 0) {
2338 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2342 SMLoc E = Parser.getTok().getLoc();
2343 Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2345 return MatchOperand_Success;
2351 // The optional operand must be "lsl #N" where N is non-negative.
2352 if (!Parser.getTok().is(AsmToken::Identifier) ||
2353 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2354 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2355 return MatchOperand_ParseFail;
2361 if (Parser.getTok().is(AsmToken::Hash)) {
2365 if (Parser.getTok().isNot(AsmToken::Integer)) {
2366 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2367 return MatchOperand_ParseFail;
2370 int64_t ShiftAmount = Parser.getTok().getIntVal();
2372 if (ShiftAmount < 0) {
2373 Error(Parser.getTok().getLoc(), "positive shift amount required");
2374 return MatchOperand_ParseFail;
2376 Parser.Lex(); // Eat the number
2378 SMLoc E = Parser.getTok().getLoc();
2379 Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount,
2380 S, E, getContext()));
2381 return MatchOperand_Success;
2384 /// parseCondCodeString - Parse a Condition Code string.
2385 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2386 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2387 .Case("eq", ARM64CC::EQ)
2388 .Case("ne", ARM64CC::NE)
2389 .Case("cs", ARM64CC::HS)
2390 .Case("hs", ARM64CC::HS)
2391 .Case("cc", ARM64CC::LO)
2392 .Case("lo", ARM64CC::LO)
2393 .Case("mi", ARM64CC::MI)
2394 .Case("pl", ARM64CC::PL)
2395 .Case("vs", ARM64CC::VS)
2396 .Case("vc", ARM64CC::VC)
2397 .Case("hi", ARM64CC::HI)
2398 .Case("ls", ARM64CC::LS)
2399 .Case("ge", ARM64CC::GE)
2400 .Case("lt", ARM64CC::LT)
2401 .Case("gt", ARM64CC::GT)
2402 .Case("le", ARM64CC::LE)
2403 .Case("al", ARM64CC::AL)
2404 .Case("nv", ARM64CC::NV)
2405 .Default(ARM64CC::Invalid);
2409 /// parseCondCode - Parse a Condition Code operand.
2410 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2411 bool invertCondCode) {
2413 const AsmToken &Tok = Parser.getTok();
2414 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2416 StringRef Cond = Tok.getString();
2417 unsigned CC = parseCondCodeString(Cond);
2418 if (CC == ARM64CC::Invalid)
2419 return TokError("invalid condition code");
2420 Parser.Lex(); // Eat identifier token.
2423 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2425 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2427 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2431 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2432 /// them if present.
2433 ARM64AsmParser::OperandMatchResultTy
2434 ARM64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2435 const AsmToken &Tok = Parser.getTok();
2436 std::string LowerID = Tok.getString().lower();
2437 ARM64_AM::ShiftExtendType ShOp =
2438 StringSwitch<ARM64_AM::ShiftExtendType>(LowerID)
2439 .Case("lsl", ARM64_AM::LSL)
2440 .Case("lsr", ARM64_AM::LSR)
2441 .Case("asr", ARM64_AM::ASR)
2442 .Case("ror", ARM64_AM::ROR)
2443 .Case("msl", ARM64_AM::MSL)
2444 .Case("uxtb", ARM64_AM::UXTB)
2445 .Case("uxth", ARM64_AM::UXTH)
2446 .Case("uxtw", ARM64_AM::UXTW)
2447 .Case("uxtx", ARM64_AM::UXTX)
2448 .Case("sxtb", ARM64_AM::SXTB)
2449 .Case("sxth", ARM64_AM::SXTH)
2450 .Case("sxtw", ARM64_AM::SXTW)
2451 .Case("sxtx", ARM64_AM::SXTX)
2452 .Default(ARM64_AM::InvalidShiftExtend);
2454 if (ShOp == ARM64_AM::InvalidShiftExtend)
2455 return MatchOperand_NoMatch;
2457 SMLoc S = Tok.getLoc();
2460 bool Hash = getLexer().is(AsmToken::Hash);
2461 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2462 if (ShOp == ARM64_AM::LSL || ShOp == ARM64_AM::LSR ||
2463 ShOp == ARM64_AM::ASR || ShOp == ARM64_AM::ROR ||
2464 ShOp == ARM64_AM::MSL) {
2465 // We expect a number here.
2466 TokError("expected #imm after shift specifier");
2467 return MatchOperand_ParseFail;
2470 // "extend" type operatoins don't need an immediate, #0 is implicit.
2471 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2473 ARM64Operand::CreateShiftExtend(ShOp, 0, S, E, getContext()));
2474 return MatchOperand_Success;
2478 Parser.Lex(); // Eat the '#'.
2480 // Make sure we do actually have a number
2481 if (!Parser.getTok().is(AsmToken::Integer)) {
2482 Error(Parser.getTok().getLoc(),
2483 "expected integer shift amount");
2484 return MatchOperand_ParseFail;
2487 const MCExpr *ImmVal;
2488 if (getParser().parseExpression(ImmVal))
2489 return MatchOperand_ParseFail;
2491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2493 TokError("expected #imm after shift specifier");
2494 return MatchOperand_ParseFail;
2497 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2498 Operands.push_back(ARM64Operand::CreateShiftExtend(ShOp, MCE->getValue(), S,
2500 return MatchOperand_Success;
2503 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2504 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2505 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2506 OperandVector &Operands) {
2507 if (Name.find('.') != StringRef::npos)
2508 return TokError("invalid operand");
2512 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2514 const AsmToken &Tok = Parser.getTok();
2515 StringRef Op = Tok.getString();
2516 SMLoc S = Tok.getLoc();
2518 const MCExpr *Expr = nullptr;
2520 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2522 Expr = MCConstantExpr::Create(op1, getContext()); \
2523 Operands.push_back( \
2524 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2525 Operands.push_back( \
2526 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2527 Operands.push_back( \
2528 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2529 Expr = MCConstantExpr::Create(op2, getContext()); \
2530 Operands.push_back( \
2531 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2534 if (Mnemonic == "ic") {
2535 if (!Op.compare_lower("ialluis")) {
2536 // SYS #0, C7, C1, #0
2537 SYS_ALIAS(0, 7, 1, 0);
2538 } else if (!Op.compare_lower("iallu")) {
2539 // SYS #0, C7, C5, #0
2540 SYS_ALIAS(0, 7, 5, 0);
2541 } else if (!Op.compare_lower("ivau")) {
2542 // SYS #3, C7, C5, #1
2543 SYS_ALIAS(3, 7, 5, 1);
2545 return TokError("invalid operand for IC instruction");
2547 } else if (Mnemonic == "dc") {
2548 if (!Op.compare_lower("zva")) {
2549 // SYS #3, C7, C4, #1
2550 SYS_ALIAS(3, 7, 4, 1);
2551 } else if (!Op.compare_lower("ivac")) {
2552 // SYS #3, C7, C6, #1
2553 SYS_ALIAS(0, 7, 6, 1);
2554 } else if (!Op.compare_lower("isw")) {
2555 // SYS #0, C7, C6, #2
2556 SYS_ALIAS(0, 7, 6, 2);
2557 } else if (!Op.compare_lower("cvac")) {
2558 // SYS #3, C7, C10, #1
2559 SYS_ALIAS(3, 7, 10, 1);
2560 } else if (!Op.compare_lower("csw")) {
2561 // SYS #0, C7, C10, #2
2562 SYS_ALIAS(0, 7, 10, 2);
2563 } else if (!Op.compare_lower("cvau")) {
2564 // SYS #3, C7, C11, #1
2565 SYS_ALIAS(3, 7, 11, 1);
2566 } else if (!Op.compare_lower("civac")) {
2567 // SYS #3, C7, C14, #1
2568 SYS_ALIAS(3, 7, 14, 1);
2569 } else if (!Op.compare_lower("cisw")) {
2570 // SYS #0, C7, C14, #2
2571 SYS_ALIAS(0, 7, 14, 2);
2573 return TokError("invalid operand for DC instruction");
2575 } else if (Mnemonic == "at") {
2576 if (!Op.compare_lower("s1e1r")) {
2577 // SYS #0, C7, C8, #0
2578 SYS_ALIAS(0, 7, 8, 0);
2579 } else if (!Op.compare_lower("s1e2r")) {
2580 // SYS #4, C7, C8, #0
2581 SYS_ALIAS(4, 7, 8, 0);
2582 } else if (!Op.compare_lower("s1e3r")) {
2583 // SYS #6, C7, C8, #0
2584 SYS_ALIAS(6, 7, 8, 0);
2585 } else if (!Op.compare_lower("s1e1w")) {
2586 // SYS #0, C7, C8, #1
2587 SYS_ALIAS(0, 7, 8, 1);
2588 } else if (!Op.compare_lower("s1e2w")) {
2589 // SYS #4, C7, C8, #1
2590 SYS_ALIAS(4, 7, 8, 1);
2591 } else if (!Op.compare_lower("s1e3w")) {
2592 // SYS #6, C7, C8, #1
2593 SYS_ALIAS(6, 7, 8, 1);
2594 } else if (!Op.compare_lower("s1e0r")) {
2595 // SYS #0, C7, C8, #3
2596 SYS_ALIAS(0, 7, 8, 2);
2597 } else if (!Op.compare_lower("s1e0w")) {
2598 // SYS #0, C7, C8, #3
2599 SYS_ALIAS(0, 7, 8, 3);
2600 } else if (!Op.compare_lower("s12e1r")) {
2601 // SYS #4, C7, C8, #4
2602 SYS_ALIAS(4, 7, 8, 4);
2603 } else if (!Op.compare_lower("s12e1w")) {
2604 // SYS #4, C7, C8, #5
2605 SYS_ALIAS(4, 7, 8, 5);
2606 } else if (!Op.compare_lower("s12e0r")) {
2607 // SYS #4, C7, C8, #6
2608 SYS_ALIAS(4, 7, 8, 6);
2609 } else if (!Op.compare_lower("s12e0w")) {
2610 // SYS #4, C7, C8, #7
2611 SYS_ALIAS(4, 7, 8, 7);
2613 return TokError("invalid operand for AT instruction");
2615 } else if (Mnemonic == "tlbi") {
2616 if (!Op.compare_lower("vmalle1is")) {
2617 // SYS #0, C8, C3, #0
2618 SYS_ALIAS(0, 8, 3, 0);
2619 } else if (!Op.compare_lower("alle2is")) {
2620 // SYS #4, C8, C3, #0
2621 SYS_ALIAS(4, 8, 3, 0);
2622 } else if (!Op.compare_lower("alle3is")) {
2623 // SYS #6, C8, C3, #0
2624 SYS_ALIAS(6, 8, 3, 0);
2625 } else if (!Op.compare_lower("vae1is")) {
2626 // SYS #0, C8, C3, #1
2627 SYS_ALIAS(0, 8, 3, 1);
2628 } else if (!Op.compare_lower("vae2is")) {
2629 // SYS #4, C8, C3, #1
2630 SYS_ALIAS(4, 8, 3, 1);
2631 } else if (!Op.compare_lower("vae3is")) {
2632 // SYS #6, C8, C3, #1
2633 SYS_ALIAS(6, 8, 3, 1);
2634 } else if (!Op.compare_lower("aside1is")) {
2635 // SYS #0, C8, C3, #2
2636 SYS_ALIAS(0, 8, 3, 2);
2637 } else if (!Op.compare_lower("vaae1is")) {
2638 // SYS #0, C8, C3, #3
2639 SYS_ALIAS(0, 8, 3, 3);
2640 } else if (!Op.compare_lower("alle1is")) {
2641 // SYS #4, C8, C3, #4
2642 SYS_ALIAS(4, 8, 3, 4);
2643 } else if (!Op.compare_lower("vale1is")) {
2644 // SYS #0, C8, C3, #5
2645 SYS_ALIAS(0, 8, 3, 5);
2646 } else if (!Op.compare_lower("vaale1is")) {
2647 // SYS #0, C8, C3, #7
2648 SYS_ALIAS(0, 8, 3, 7);
2649 } else if (!Op.compare_lower("vmalle1")) {
2650 // SYS #0, C8, C7, #0
2651 SYS_ALIAS(0, 8, 7, 0);
2652 } else if (!Op.compare_lower("alle2")) {
2653 // SYS #4, C8, C7, #0
2654 SYS_ALIAS(4, 8, 7, 0);
2655 } else if (!Op.compare_lower("vale2is")) {
2656 // SYS #4, C8, C3, #5
2657 SYS_ALIAS(4, 8, 3, 5);
2658 } else if (!Op.compare_lower("vale3is")) {
2659 // SYS #6, C8, C3, #5
2660 SYS_ALIAS(6, 8, 3, 5);
2661 } else if (!Op.compare_lower("alle3")) {
2662 // SYS #6, C8, C7, #0
2663 SYS_ALIAS(6, 8, 7, 0);
2664 } else if (!Op.compare_lower("vae1")) {
2665 // SYS #0, C8, C7, #1
2666 SYS_ALIAS(0, 8, 7, 1);
2667 } else if (!Op.compare_lower("vae2")) {
2668 // SYS #4, C8, C7, #1
2669 SYS_ALIAS(4, 8, 7, 1);
2670 } else if (!Op.compare_lower("vae3")) {
2671 // SYS #6, C8, C7, #1
2672 SYS_ALIAS(6, 8, 7, 1);
2673 } else if (!Op.compare_lower("aside1")) {
2674 // SYS #0, C8, C7, #2
2675 SYS_ALIAS(0, 8, 7, 2);
2676 } else if (!Op.compare_lower("vaae1")) {
2677 // SYS #0, C8, C7, #3
2678 SYS_ALIAS(0, 8, 7, 3);
2679 } else if (!Op.compare_lower("alle1")) {
2680 // SYS #4, C8, C7, #4
2681 SYS_ALIAS(4, 8, 7, 4);
2682 } else if (!Op.compare_lower("vale1")) {
2683 // SYS #0, C8, C7, #5
2684 SYS_ALIAS(0, 8, 7, 5);
2685 } else if (!Op.compare_lower("vale2")) {
2686 // SYS #4, C8, C7, #5
2687 SYS_ALIAS(4, 8, 7, 5);
2688 } else if (!Op.compare_lower("vale3")) {
2689 // SYS #6, C8, C7, #5
2690 SYS_ALIAS(6, 8, 7, 5);
2691 } else if (!Op.compare_lower("vaale1")) {
2692 // SYS #0, C8, C7, #7
2693 SYS_ALIAS(0, 8, 7, 7);
2694 } else if (!Op.compare_lower("ipas2e1")) {
2695 // SYS #4, C8, C4, #1
2696 SYS_ALIAS(4, 8, 4, 1);
2697 } else if (!Op.compare_lower("ipas2le1")) {
2698 // SYS #4, C8, C4, #5
2699 SYS_ALIAS(4, 8, 4, 5);
2700 } else if (!Op.compare_lower("ipas2e1is")) {
2701 // SYS #4, C8, C4, #1
2702 SYS_ALIAS(4, 8, 0, 1);
2703 } else if (!Op.compare_lower("ipas2le1is")) {
2704 // SYS #4, C8, C4, #5
2705 SYS_ALIAS(4, 8, 0, 5);
2706 } else if (!Op.compare_lower("vmalls12e1")) {
2707 // SYS #4, C8, C7, #6
2708 SYS_ALIAS(4, 8, 7, 6);
2709 } else if (!Op.compare_lower("vmalls12e1is")) {
2710 // SYS #4, C8, C3, #6
2711 SYS_ALIAS(4, 8, 3, 6);
2713 return TokError("invalid operand for TLBI instruction");
2719 Parser.Lex(); // Eat operand.
2721 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2722 bool HasRegister = false;
2724 // Check for the optional register operand.
2725 if (getLexer().is(AsmToken::Comma)) {
2726 Parser.Lex(); // Eat comma.
2728 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2729 return TokError("expected register operand");
2734 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2735 Parser.eatToEndOfStatement();
2736 return TokError("unexpected token in argument list");
2739 if (ExpectRegister && !HasRegister) {
2740 return TokError("specified " + Mnemonic + " op requires a register");
2742 else if (!ExpectRegister && HasRegister) {
2743 return TokError("specified " + Mnemonic + " op does not use a register");
2746 Parser.Lex(); // Consume the EndOfStatement
2750 ARM64AsmParser::OperandMatchResultTy
2751 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2752 const AsmToken &Tok = Parser.getTok();
2754 // Can be either a #imm style literal or an option name
2755 bool Hash = Tok.is(AsmToken::Hash);
2756 if (Hash || Tok.is(AsmToken::Integer)) {
2757 // Immediate operand.
2759 Parser.Lex(); // Eat the '#'
2760 const MCExpr *ImmVal;
2761 SMLoc ExprLoc = getLoc();
2762 if (getParser().parseExpression(ImmVal))
2763 return MatchOperand_ParseFail;
2764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2766 Error(ExprLoc, "immediate value expected for barrier operand");
2767 return MatchOperand_ParseFail;
2769 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2770 Error(ExprLoc, "barrier operand out of range");
2771 return MatchOperand_ParseFail;
2774 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2775 return MatchOperand_Success;
2778 if (Tok.isNot(AsmToken::Identifier)) {
2779 TokError("invalid operand for instruction");
2780 return MatchOperand_ParseFail;
2784 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2786 TokError("invalid barrier option name");
2787 return MatchOperand_ParseFail;
2790 // The only valid named option for ISB is 'sy'
2791 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2792 TokError("'sy' or #imm operand expected");
2793 return MatchOperand_ParseFail;
2796 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2797 Parser.Lex(); // Consume the option
2799 return MatchOperand_Success;
2802 ARM64AsmParser::OperandMatchResultTy
2803 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2804 const AsmToken &Tok = Parser.getTok();
2806 if (Tok.isNot(AsmToken::Identifier))
2807 return MatchOperand_NoMatch;
2809 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2810 STI.getFeatureBits(), getContext()));
2811 Parser.Lex(); // Eat identifier
2813 return MatchOperand_Success;
2816 /// tryParseVectorRegister - Parse a vector register operand.
2817 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2818 if (Parser.getTok().isNot(AsmToken::Identifier))
2822 // Check for a vector register specifier first.
2824 int64_t Reg = tryMatchVectorRegister(Kind, false);
2828 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2829 // If there was an explicit qualifier, that goes on as a literal text
2832 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2834 // If there is an index specifier following the register, parse that too.
2835 if (Parser.getTok().is(AsmToken::LBrac)) {
2836 SMLoc SIdx = getLoc();
2837 Parser.Lex(); // Eat left bracket token.
2839 const MCExpr *ImmVal;
2840 if (getParser().parseExpression(ImmVal))
2842 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2844 TokError("immediate value expected for vector index");
2849 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2850 Error(E, "']' expected");
2854 Parser.Lex(); // Eat right bracket token.
2856 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2863 /// parseRegister - Parse a non-vector register operand.
2864 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2866 // Try for a vector register.
2867 if (!tryParseVectorRegister(Operands))
2870 // Try for a scalar register.
2871 int64_t Reg = tryParseRegister();
2875 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2877 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2878 // as a string token in the instruction itself.
2879 if (getLexer().getKind() == AsmToken::LBrac) {
2880 SMLoc LBracS = getLoc();
2882 const AsmToken &Tok = Parser.getTok();
2883 if (Tok.is(AsmToken::Integer)) {
2884 SMLoc IntS = getLoc();
2885 int64_t Val = Tok.getIntVal();
2888 if (getLexer().getKind() == AsmToken::RBrac) {
2889 SMLoc RBracS = getLoc();
2892 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2894 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2896 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2906 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2907 /// do not allow base regisrer writeback modes,
2908 /// or those that handle writeback separately from
2909 /// the memory operand (like the AdvSIMD ldX/stX
2911 ARM64AsmParser::OperandMatchResultTy
2912 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2913 if (Parser.getTok().isNot(AsmToken::LBrac))
2914 return MatchOperand_NoMatch;
2916 Parser.Lex(); // Eat left bracket token.
2918 const AsmToken &BaseRegTok = Parser.getTok();
2919 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2920 Error(BaseRegTok.getLoc(), "register expected");
2921 return MatchOperand_ParseFail;
2924 int64_t Reg = tryParseRegister();
2926 Error(BaseRegTok.getLoc(), "register expected");
2927 return MatchOperand_ParseFail;
2931 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2932 Error(E, "']' expected");
2933 return MatchOperand_ParseFail;
2936 Parser.Lex(); // Eat right bracket token.
2938 Operands.push_back(ARM64Operand::CreateMem(Reg, nullptr, S, E, E, getContext()));
2939 return MatchOperand_Success;
2942 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2943 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2944 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2946 Parser.Lex(); // Eat left bracket token.
2948 const AsmToken &BaseRegTok = Parser.getTok();
2949 if (BaseRegTok.isNot(AsmToken::Identifier))
2950 return Error(BaseRegTok.getLoc(), "register expected");
2952 int64_t Reg = tryParseRegister();
2954 return Error(BaseRegTok.getLoc(), "register expected");
2956 // If there is an offset expression, parse it.
2957 const MCExpr *OffsetExpr = nullptr;
2959 if (Parser.getTok().is(AsmToken::Comma)) {
2960 Parser.Lex(); // Eat the comma.
2961 OffsetLoc = getLoc();
2964 const AsmToken &OffsetRegTok = Parser.getTok();
2965 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2967 // Default shift is LSL, with an omitted shift. We use the third bit of
2968 // the extend value to indicate presence/omission of the immediate offset.
2969 ARM64_AM::ShiftExtendType ExtOp = ARM64_AM::UXTX;
2970 int64_t ShiftVal = 0;
2971 bool ExplicitShift = false;
2973 if (Parser.getTok().is(AsmToken::Comma)) {
2974 // Embedded extend operand.
2975 Parser.Lex(); // Eat the comma
2977 SMLoc ExtLoc = getLoc();
2978 const AsmToken &Tok = Parser.getTok();
2979 ExtOp = StringSwitch<ARM64_AM::ShiftExtendType>(Tok.getString().lower())
2980 .Case("uxtw", ARM64_AM::UXTW)
2981 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2982 .Case("sxtw", ARM64_AM::SXTW)
2983 .Case("sxtx", ARM64_AM::SXTX)
2984 .Default(ARM64_AM::InvalidShiftExtend);
2985 if (ExtOp == ARM64_AM::InvalidShiftExtend)
2986 return Error(ExtLoc, "expected valid extend operation");
2988 Parser.Lex(); // Eat the extend op.
2990 // A 32-bit offset register is only valid for [SU]/XTW extend
2992 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
2993 if (ExtOp != ARM64_AM::UXTW &&
2994 ExtOp != ARM64_AM::SXTW)
2995 return Error(ExtLoc, "32-bit general purpose offset register "
2996 "requires sxtw or uxtw extend");
2997 } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
2999 return Error(OffsetLoc,
3000 "64-bit general purpose offset register expected");
3002 bool Hash = getLexer().is(AsmToken::Hash);
3003 if (getLexer().is(AsmToken::RBrac)) {
3004 // No immediate operand.
3005 if (ExtOp == ARM64_AM::UXTX)
3006 return Error(ExtLoc, "LSL extend requires immediate operand");
3007 } else if (Hash || getLexer().is(AsmToken::Integer)) {
3008 // Immediate operand.
3010 Parser.Lex(); // Eat the '#'
3011 const MCExpr *ImmVal;
3012 SMLoc ExprLoc = getLoc();
3013 if (getParser().parseExpression(ImmVal))
3015 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3017 return TokError("immediate value expected for extend operand");
3019 ExplicitShift = true;
3020 ShiftVal = MCE->getValue();
3021 if (ShiftVal < 0 || ShiftVal > 4)
3022 return Error(ExprLoc, "immediate operand out of range");
3024 return Error(getLoc(), "expected immediate operand");
3027 if (Parser.getTok().isNot(AsmToken::RBrac))
3028 return Error(getLoc(), "']' expected");
3030 Parser.Lex(); // Eat right bracket token.
3033 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
3034 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
3037 // Immediate expressions.
3038 } else if (Parser.getTok().is(AsmToken::Hash) ||
3039 Parser.getTok().is(AsmToken::Colon) ||
3040 Parser.getTok().is(AsmToken::Integer)) {
3041 if (Parser.getTok().is(AsmToken::Hash))
3042 Parser.Lex(); // Eat hash token.
3044 if (parseSymbolicImmVal(OffsetExpr))
3047 // FIXME: We really should make sure that we're dealing with a LDR/STR
3048 // instruction that can legally have a symbolic expression here.
3049 // Symbol reference.
3050 if (Parser.getTok().isNot(AsmToken::Identifier) &&
3051 Parser.getTok().isNot(AsmToken::String))
3052 return Error(getLoc(), "identifier or immediate expression expected");
3053 if (getParser().parseExpression(OffsetExpr))
3055 // If this is a plain ref, Make sure a legal variant kind was specified.
3056 // Otherwise, it's a more complicated expression and we have to just
3057 // assume it's OK and let the relocation stuff puke if it's not.
3058 ARM64MCExpr::VariantKind ELFRefKind;
3059 MCSymbolRefExpr::VariantKind DarwinRefKind;
3061 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
3063 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
3064 "ELF symbol modifiers not supported here yet");
3066 switch (DarwinRefKind) {
3068 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
3069 case MCSymbolRefExpr::VK_GOTPAGEOFF:
3070 case MCSymbolRefExpr::VK_PAGEOFF:
3071 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
3072 // These are what we're expecting.
3080 if (Parser.getTok().isNot(AsmToken::RBrac))
3081 return Error(E, "']' expected");
3083 Parser.Lex(); // Eat right bracket token.
3085 // Create the memory operand.
3087 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
3089 // Check for a '!', indicating pre-indexed addressing with writeback.
3090 if (Parser.getTok().is(AsmToken::Exclaim)) {
3091 // There needs to have been an immediate or wback doesn't make sense.
3093 return Error(E, "missing offset for pre-indexed addressing");
3094 // Pre-indexed with writeback must have a constant expression for the
3095 // offset. FIXME: Theoretically, we'd like to allow fixups so long
3096 // as they don't require a relocation.
3097 if (!isa<MCConstantExpr>(OffsetExpr))
3098 return Error(OffsetLoc, "constant immediate expression expected");
3100 // Create the Token operand for the '!'.
3101 Operands.push_back(ARM64Operand::CreateToken(
3102 "!", false, Parser.getTok().getLoc(), getContext()));
3103 Parser.Lex(); // Eat the '!' token.
3109 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3110 bool HasELFModifier = false;
3111 ARM64MCExpr::VariantKind RefKind;
3113 if (Parser.getTok().is(AsmToken::Colon)) {
3114 Parser.Lex(); // Eat ':"
3115 HasELFModifier = true;
3117 if (Parser.getTok().isNot(AsmToken::Identifier)) {
3118 Error(Parser.getTok().getLoc(),
3119 "expect relocation specifier in operand after ':'");
3123 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3124 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
3125 .Case("lo12", ARM64MCExpr::VK_LO12)
3126 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
3127 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
3128 .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
3129 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
3130 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
3131 .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
3132 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3133 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3134 .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
3135 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3136 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3137 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3138 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3139 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3140 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3141 .Case("dtprel_hi12", ARM64MCExpr::VK_DTPREL_HI12)
3142 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3143 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3144 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3145 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3146 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3147 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3148 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3149 .Case("tprel_hi12", ARM64MCExpr::VK_TPREL_HI12)
3150 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3151 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3152 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3153 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3154 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3155 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3156 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3157 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3158 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3159 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3160 .Default(ARM64MCExpr::VK_INVALID);
3162 if (RefKind == ARM64MCExpr::VK_INVALID) {
3163 Error(Parser.getTok().getLoc(),
3164 "expect relocation specifier in operand after ':'");
3168 Parser.Lex(); // Eat identifier
3170 if (Parser.getTok().isNot(AsmToken::Colon)) {
3171 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3174 Parser.Lex(); // Eat ':'
3177 if (getParser().parseExpression(ImmVal))
3181 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3186 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3187 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3188 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3190 Parser.Lex(); // Eat left bracket token.
3192 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3195 int64_t PrevReg = FirstReg;
3198 if (Parser.getTok().is(AsmToken::Minus)) {
3199 Parser.Lex(); // Eat the minus.
3201 SMLoc Loc = getLoc();
3203 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3206 // Any Kind suffices must match on all regs in the list.
3207 if (Kind != NextKind)
3208 return Error(Loc, "mismatched register size suffix");
3210 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3212 if (Space == 0 || Space > 3) {
3213 return Error(Loc, "invalid number of vectors");
3219 while (Parser.getTok().is(AsmToken::Comma)) {
3220 Parser.Lex(); // Eat the comma token.
3222 SMLoc Loc = getLoc();
3224 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3227 // Any Kind suffices must match on all regs in the list.
3228 if (Kind != NextKind)
3229 return Error(Loc, "mismatched register size suffix");
3231 // Registers must be incremental (with wraparound at 31)
3232 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3233 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3234 return Error(Loc, "registers must be sequential");
3241 if (Parser.getTok().isNot(AsmToken::RCurly))
3242 return Error(getLoc(), "'}' expected");
3243 Parser.Lex(); // Eat the '}' token.
3246 return Error(S, "invalid number of vectors");
3248 unsigned NumElements = 0;
3249 char ElementKind = 0;
3251 parseValidVectorKind(Kind, NumElements, ElementKind);
3253 Operands.push_back(ARM64Operand::CreateVectorList(
3254 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3256 // If there is an index specifier following the list, parse that too.
3257 if (Parser.getTok().is(AsmToken::LBrac)) {
3258 SMLoc SIdx = getLoc();
3259 Parser.Lex(); // Eat left bracket token.
3261 const MCExpr *ImmVal;
3262 if (getParser().parseExpression(ImmVal))
3264 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3266 TokError("immediate value expected for vector index");
3271 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3272 Error(E, "']' expected");
3276 Parser.Lex(); // Eat right bracket token.
3278 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3284 /// parseOperand - Parse a arm instruction operand. For now this parses the
3285 /// operand regardless of the mnemonic.
3286 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3287 bool invertCondCode) {
3288 // Check if the current operand has a custom associated parser, if so, try to
3289 // custom parse the operand, or fallback to the general approach.
3290 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3291 if (ResTy == MatchOperand_Success)
3293 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3294 // there was a match, but an error occurred, in which case, just return that
3295 // the operand parsing failed.
3296 if (ResTy == MatchOperand_ParseFail)
3299 // Nothing custom, so do general case parsing.
3301 switch (getLexer().getKind()) {
3305 if (parseSymbolicImmVal(Expr))
3306 return Error(S, "invalid operand");
3308 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3309 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3312 case AsmToken::LBrac:
3313 return parseMemory(Operands);
3314 case AsmToken::LCurly:
3315 return parseVectorList(Operands);
3316 case AsmToken::Identifier: {
3317 // If we're expecting a Condition Code operand, then just parse that.
3319 return parseCondCode(Operands, invertCondCode);
3321 // If it's a register name, parse it.
3322 if (!parseRegister(Operands))
3325 // This could be an optional "shift" or "extend" operand.
3326 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3327 // We can only continue if no tokens were eaten.
3328 if (GotShift != MatchOperand_NoMatch)
3331 // This was not a register so parse other operands that start with an
3332 // identifier (like labels) as expressions and create them as immediates.
3333 const MCExpr *IdVal;
3335 if (getParser().parseExpression(IdVal))
3338 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3339 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3342 case AsmToken::Integer:
3343 case AsmToken::Real:
3344 case AsmToken::Hash: {
3345 // #42 -> immediate.
3347 if (getLexer().is(AsmToken::Hash))
3350 // The only Real that should come through here is a literal #0.0 for
3351 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3352 // so convert the value.
3353 const AsmToken &Tok = Parser.getTok();
3354 if (Tok.is(AsmToken::Real)) {
3355 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3356 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3357 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3358 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3359 Mnemonic != "fcmlt")
3360 return TokError("unexpected floating point literal");
3361 else if (IntVal != 0)
3362 return TokError("only valid floating-point immediate is #0.0");
3363 Parser.Lex(); // Eat the token.
3366 ARM64Operand::CreateToken("#0", false, S, getContext()));
3368 ARM64Operand::CreateToken(".0", false, S, getContext()));
3372 const MCExpr *ImmVal;
3373 if (parseSymbolicImmVal(ImmVal))
3376 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3377 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3383 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3385 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3386 StringRef Name, SMLoc NameLoc,
3387 OperandVector &Operands) {
3388 Name = StringSwitch<StringRef>(Name.lower())
3389 .Case("beq", "b.eq")
3390 .Case("bne", "b.ne")
3391 .Case("bhs", "b.hs")
3392 .Case("bcs", "b.cs")
3393 .Case("blo", "b.lo")
3394 .Case("bcc", "b.cc")
3395 .Case("bmi", "b.mi")
3396 .Case("bpl", "b.pl")
3397 .Case("bvs", "b.vs")
3398 .Case("bvc", "b.vc")
3399 .Case("bhi", "b.hi")
3400 .Case("bls", "b.ls")
3401 .Case("bge", "b.ge")
3402 .Case("blt", "b.lt")
3403 .Case("bgt", "b.gt")
3404 .Case("ble", "b.le")
3405 .Case("bal", "b.al")
3406 .Case("bnv", "b.nv")
3409 // Create the leading tokens for the mnemonic, split by '.' characters.
3410 size_t Start = 0, Next = Name.find('.');
3411 StringRef Head = Name.slice(Start, Next);
3413 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3414 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3415 return parseSysAlias(Head, NameLoc, Operands);
3418 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3421 // Handle condition codes for a branch mnemonic
3422 if (Head == "b" && Next != StringRef::npos) {
3424 Next = Name.find('.', Start + 1);
3425 Head = Name.slice(Start + 1, Next);
3427 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3428 (Head.data() - Name.data()));
3429 unsigned CC = parseCondCodeString(Head);
3430 if (CC == ARM64CC::Invalid)
3431 return Error(SuffixLoc, "invalid condition code");
3432 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3434 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3437 // Add the remaining tokens in the mnemonic.
3438 while (Next != StringRef::npos) {
3440 Next = Name.find('.', Start + 1);
3441 Head = Name.slice(Start, Next);
3442 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3443 (Head.data() - Name.data()) + 1);
3445 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3448 // Conditional compare instructions have a Condition Code operand, which needs
3449 // to be parsed and an immediate operand created.
3450 bool condCodeFourthOperand =
3451 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3452 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3453 Head == "csinc" || Head == "csinv" || Head == "csneg");
3455 // These instructions are aliases to some of the conditional select
3456 // instructions. However, the condition code is inverted in the aliased
3459 // FIXME: Is this the correct way to handle these? Or should the parser
3460 // generate the aliased instructions directly?
3461 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3462 bool condCodeThirdOperand =
3463 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3465 // Read the remaining operands.
3466 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3467 // Read the first operand.
3468 if (parseOperand(Operands, false, false)) {
3469 Parser.eatToEndOfStatement();
3474 while (getLexer().is(AsmToken::Comma)) {
3475 Parser.Lex(); // Eat the comma.
3477 // Parse and remember the operand.
3478 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3479 (N == 3 && condCodeThirdOperand) ||
3480 (N == 2 && condCodeSecondOperand),
3481 condCodeSecondOperand || condCodeThirdOperand)) {
3482 Parser.eatToEndOfStatement();
3490 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3491 SMLoc Loc = Parser.getTok().getLoc();
3492 Parser.eatToEndOfStatement();
3493 return Error(Loc, "unexpected token in argument list");
3496 Parser.Lex(); // Consume the EndOfStatement
3500 // FIXME: This entire function is a giant hack to provide us with decent
3501 // operand range validation/diagnostics until TableGen/MC can be extended
3502 // to support autogeneration of this kind of validation.
3503 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3504 SmallVectorImpl<SMLoc> &Loc) {
3505 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3506 // Check for indexed addressing modes w/ the base register being the
3507 // same as a destination/source register or pair load where
3508 // the Rt == Rt2. All of those are undefined behaviour.
3509 switch (Inst.getOpcode()) {
3510 case ARM64::LDPSWpre:
3511 case ARM64::LDPWpost:
3512 case ARM64::LDPWpre:
3513 case ARM64::LDPXpost:
3514 case ARM64::LDPXpre: {
3515 unsigned Rt = Inst.getOperand(0).getReg();
3516 unsigned Rt2 = Inst.getOperand(1).getReg();
3517 unsigned Rn = Inst.getOperand(2).getReg();
3518 if (RI->isSubRegisterEq(Rn, Rt))
3519 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3520 "is also a destination");
3521 if (RI->isSubRegisterEq(Rn, Rt2))
3522 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3523 "is also a destination");
3526 case ARM64::LDPDpost:
3527 case ARM64::LDPDpre:
3528 case ARM64::LDPQpost:
3529 case ARM64::LDPQpre:
3530 case ARM64::LDPSpost:
3531 case ARM64::LDPSpre:
3532 case ARM64::LDPSWpost:
3538 case ARM64::LDPXi: {
3539 unsigned Rt = Inst.getOperand(0).getReg();
3540 unsigned Rt2 = Inst.getOperand(1).getReg();
3542 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3545 case ARM64::STPDpost:
3546 case ARM64::STPDpre:
3547 case ARM64::STPQpost:
3548 case ARM64::STPQpre:
3549 case ARM64::STPSpost:
3550 case ARM64::STPSpre:
3551 case ARM64::STPWpost:
3552 case ARM64::STPWpre:
3553 case ARM64::STPXpost:
3554 case ARM64::STPXpre: {
3555 unsigned Rt = Inst.getOperand(0).getReg();
3556 unsigned Rt2 = Inst.getOperand(1).getReg();
3557 unsigned Rn = Inst.getOperand(2).getReg();
3558 if (RI->isSubRegisterEq(Rn, Rt))
3559 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3560 "is also a source");
3561 if (RI->isSubRegisterEq(Rn, Rt2))
3562 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3563 "is also a source");
3566 case ARM64::LDRBBpre:
3567 case ARM64::LDRBpre:
3568 case ARM64::LDRHHpre:
3569 case ARM64::LDRHpre:
3570 case ARM64::LDRSBWpre:
3571 case ARM64::LDRSBXpre:
3572 case ARM64::LDRSHWpre:
3573 case ARM64::LDRSHXpre:
3574 case ARM64::LDRSWpre:
3575 case ARM64::LDRWpre:
3576 case ARM64::LDRXpre:
3577 case ARM64::LDRBBpost:
3578 case ARM64::LDRBpost:
3579 case ARM64::LDRHHpost:
3580 case ARM64::LDRHpost:
3581 case ARM64::LDRSBWpost:
3582 case ARM64::LDRSBXpost:
3583 case ARM64::LDRSHWpost:
3584 case ARM64::LDRSHXpost:
3585 case ARM64::LDRSWpost:
3586 case ARM64::LDRWpost:
3587 case ARM64::LDRXpost: {
3588 unsigned Rt = Inst.getOperand(0).getReg();
3589 unsigned Rn = Inst.getOperand(1).getReg();
3590 if (RI->isSubRegisterEq(Rn, Rt))
3591 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3592 "is also a source");
3595 case ARM64::STRBBpost:
3596 case ARM64::STRBpost:
3597 case ARM64::STRHHpost:
3598 case ARM64::STRHpost:
3599 case ARM64::STRWpost:
3600 case ARM64::STRXpost:
3601 case ARM64::STRBBpre:
3602 case ARM64::STRBpre:
3603 case ARM64::STRHHpre:
3604 case ARM64::STRHpre:
3605 case ARM64::STRWpre:
3606 case ARM64::STRXpre: {
3607 unsigned Rt = Inst.getOperand(0).getReg();
3608 unsigned Rn = Inst.getOperand(1).getReg();
3609 if (RI->isSubRegisterEq(Rn, Rt))
3610 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3611 "is also a source");
3616 // Now check immediate ranges. Separate from the above as there is overlap
3617 // in the instructions being checked and this keeps the nested conditionals
3619 switch (Inst.getOpcode()) {
3620 case ARM64::ADDSWri:
3621 case ARM64::ADDSXri:
3624 case ARM64::SUBSWri:
3625 case ARM64::SUBSXri:
3627 case ARM64::SUBXri: {
3628 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3629 // some slight duplication here.
3630 if (Inst.getOperand(2).isExpr()) {
3631 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3632 ARM64MCExpr::VariantKind ELFRefKind;
3633 MCSymbolRefExpr::VariantKind DarwinRefKind;
3635 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3636 return Error(Loc[2], "invalid immediate expression");
3639 // Only allow these with ADDXri.
3640 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3641 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3642 Inst.getOpcode() == ARM64::ADDXri)
3645 // Only allow these with ADDXri/ADDWri
3646 if ((ELFRefKind == ARM64MCExpr::VK_LO12 ||
3647 ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12 ||
3648 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3649 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3650 ELFRefKind == ARM64MCExpr::VK_TPREL_HI12 ||
3651 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3652 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3653 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) &&
3654 (Inst.getOpcode() == ARM64::ADDXri ||
3655 Inst.getOpcode() == ARM64::ADDWri))
3658 // Don't allow expressions in the immediate field otherwise
3659 return Error(Loc[2], "invalid immediate expression");
3668 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3670 case Match_MissingFeature:
3672 "instruction requires a CPU feature not currently enabled");
3673 case Match_InvalidOperand:
3674 return Error(Loc, "invalid operand for instruction");
3675 case Match_InvalidSuffix:
3676 return Error(Loc, "invalid type suffix for instruction");
3677 case Match_AddSubRegExtendSmall:
3679 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3680 case Match_AddSubRegExtendLarge:
3682 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3683 case Match_AddSubSecondSource:
3685 "expected compatible register, symbol or integer in range [0, 4095]");
3686 case Match_LogicalSecondSource:
3687 return Error(Loc, "expected compatible register or logical immediate");
3688 case Match_AddSubRegShift32:
3690 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3691 case Match_AddSubRegShift64:
3693 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3694 case Match_InvalidMemoryIndexedSImm9:
3695 return Error(Loc, "index must be an integer in range [-256, 255].");
3696 case Match_InvalidMemoryIndexed32SImm7:
3697 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3698 case Match_InvalidMemoryIndexed64SImm7:
3699 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3700 case Match_InvalidMemoryIndexed128SImm7:
3701 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3702 case Match_InvalidMemoryIndexed8:
3703 return Error(Loc, "index must be an integer in range [0, 4095].");
3704 case Match_InvalidMemoryIndexed16:
3705 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3706 case Match_InvalidMemoryIndexed32:
3707 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3708 case Match_InvalidMemoryIndexed64:
3709 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3710 case Match_InvalidMemoryIndexed128:
3711 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3712 case Match_InvalidImm0_7:
3713 return Error(Loc, "immediate must be an integer in range [0, 7].");
3714 case Match_InvalidImm0_15:
3715 return Error(Loc, "immediate must be an integer in range [0, 15].");
3716 case Match_InvalidImm0_31:
3717 return Error(Loc, "immediate must be an integer in range [0, 31].");
3718 case Match_InvalidImm0_63:
3719 return Error(Loc, "immediate must be an integer in range [0, 63].");
3720 case Match_InvalidImm1_8:
3721 return Error(Loc, "immediate must be an integer in range [1, 8].");
3722 case Match_InvalidImm1_16:
3723 return Error(Loc, "immediate must be an integer in range [1, 16].");
3724 case Match_InvalidImm1_32:
3725 return Error(Loc, "immediate must be an integer in range [1, 32].");
3726 case Match_InvalidImm1_64:
3727 return Error(Loc, "immediate must be an integer in range [1, 64].");
3728 case Match_InvalidIndexB:
3729 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3730 case Match_InvalidIndexH:
3731 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3732 case Match_InvalidIndexS:
3733 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3734 case Match_InvalidIndexD:
3735 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3736 case Match_InvalidLabel:
3737 return Error(Loc, "expected label or encodable integer pc offset");
3739 return Error(Loc, "expected readable system register");
3741 return Error(Loc, "expected writable system register or pstate");
3742 case Match_MnemonicFail:
3743 return Error(Loc, "unrecognized instruction mnemonic");
3745 assert(0 && "unexpected error code!");
3746 return Error(Loc, "invalid instruction format");
3750 static const char *getSubtargetFeatureName(unsigned Val);
3752 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3753 OperandVector &Operands,
3755 unsigned &ErrorInfo,
3756 bool MatchingInlineAsm) {
3757 assert(!Operands.empty() && "Unexpect empty operand list!");
3758 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3759 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3761 StringRef Tok = Op->getToken();
3762 unsigned NumOperands = Operands.size();
3764 if (NumOperands == 4 && Tok == "lsl") {
3765 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3766 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3767 if (Op2->isReg() && Op3->isImm()) {
3768 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3770 uint64_t Op3Val = Op3CE->getValue();
3771 uint64_t NewOp3Val = 0;
3772 uint64_t NewOp4Val = 0;
3773 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3775 NewOp3Val = (32 - Op3Val) & 0x1f;
3776 NewOp4Val = 31 - Op3Val;
3778 NewOp3Val = (64 - Op3Val) & 0x3f;
3779 NewOp4Val = 63 - Op3Val;
3782 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3783 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3785 Operands[0] = ARM64Operand::CreateToken(
3786 "ubfm", false, Op->getStartLoc(), getContext());
3787 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3788 Op3->getEndLoc(), getContext());
3789 Operands.push_back(ARM64Operand::CreateImm(
3790 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
3795 } else if (NumOperands == 5) {
3796 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3797 // UBFIZ -> UBFM aliases.
3798 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3799 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3800 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3801 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
3803 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3804 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3805 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3807 if (Op3CE && Op4CE) {
3808 uint64_t Op3Val = Op3CE->getValue();
3809 uint64_t Op4Val = Op4CE->getValue();
3811 uint64_t RegWidth = 0;
3812 if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3818 if (Op3Val >= RegWidth)
3819 return Error(Op3->getStartLoc(),
3820 "expected integer in range [0, 31]");
3821 if (Op4Val < 1 || Op4Val > RegWidth)
3822 return Error(Op4->getStartLoc(),
3823 "expected integer in range [1, 32]");
3825 uint64_t NewOp3Val = 0;
3826 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3828 NewOp3Val = (32 - Op3Val) & 0x1f;
3830 NewOp3Val = (64 - Op3Val) & 0x3f;
3832 uint64_t NewOp4Val = Op4Val - 1;
3834 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3835 return Error(Op4->getStartLoc(),
3836 "requested insert overflows register");
3838 const MCExpr *NewOp3 =
3839 MCConstantExpr::Create(NewOp3Val, getContext());
3840 const MCExpr *NewOp4 =
3841 MCConstantExpr::Create(NewOp4Val, getContext());
3842 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3843 Op3->getEndLoc(), getContext());
3844 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
3845 Op4->getEndLoc(), getContext());
3847 Operands[0] = ARM64Operand::CreateToken(
3848 "bfm", false, Op->getStartLoc(), getContext());
3849 else if (Tok == "sbfiz")
3850 Operands[0] = ARM64Operand::CreateToken(
3851 "sbfm", false, Op->getStartLoc(), getContext());
3852 else if (Tok == "ubfiz")
3853 Operands[0] = ARM64Operand::CreateToken(
3854 "ubfm", false, Op->getStartLoc(), getContext());
3856 llvm_unreachable("No valid mnemonic for alias?");
3864 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3865 // UBFX -> UBFM aliases.
3866 } else if (NumOperands == 5 &&
3867 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3868 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3869 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3870 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
3872 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3873 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3874 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3876 if (Op3CE && Op4CE) {
3877 uint64_t Op3Val = Op3CE->getValue();
3878 uint64_t Op4Val = Op4CE->getValue();
3880 uint64_t RegWidth = 0;
3881 if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3887 if (Op3Val >= RegWidth)
3888 return Error(Op3->getStartLoc(),
3889 "expected integer in range [0, 31]");
3890 if (Op4Val < 1 || Op4Val > RegWidth)
3891 return Error(Op4->getStartLoc(),
3892 "expected integer in range [1, 32]");
3894 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3896 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3897 return Error(Op4->getStartLoc(),
3898 "requested extract overflows register");
3900 const MCExpr *NewOp4 =
3901 MCConstantExpr::Create(NewOp4Val, getContext());
3902 Operands[4] = ARM64Operand::CreateImm(
3903 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3905 Operands[0] = ARM64Operand::CreateToken(
3906 "bfm", false, Op->getStartLoc(), getContext());
3907 else if (Tok == "sbfx")
3908 Operands[0] = ARM64Operand::CreateToken(
3909 "sbfm", false, Op->getStartLoc(), getContext());
3910 else if (Tok == "ubfx")
3911 Operands[0] = ARM64Operand::CreateToken(
3912 "ubfm", false, Op->getStartLoc(), getContext());
3914 llvm_unreachable("No valid mnemonic for alias?");
3922 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
3923 // InstAlias can't quite handle this since the reg classes aren't
3925 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
3926 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
3928 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
3929 if (OpCE->getValue() < 32) {
3930 // The source register can be Wn here, but the matcher expects a
3931 // GPR64. Twiddle it here if necessary.
3932 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3934 unsigned Reg = getXRegFromWReg(Op->getReg());
3935 Operands[1] = ARM64Operand::CreateReg(
3936 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
3943 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3944 // InstAlias can't quite handle this since the reg classes aren't
3946 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3947 // The source register can be Wn here, but the matcher expects a
3948 // GPR64. Twiddle it here if necessary.
3949 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
3951 unsigned Reg = getXRegFromWReg(Op->getReg());
3952 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3953 Op->getEndLoc(), getContext());
3957 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3958 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3959 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3961 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3963 // The source register can be Wn here, but the matcher expects a
3964 // GPR64. Twiddle it here if necessary.
3965 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
3967 unsigned Reg = getXRegFromWReg(Op->getReg());
3968 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3969 Op->getEndLoc(), getContext());
3974 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3975 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3976 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3978 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3980 // The source register can be Wn here, but the matcher expects a
3981 // GPR32. Twiddle it here if necessary.
3982 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3984 unsigned Reg = getWRegFromXReg(Op->getReg());
3985 Operands[1] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3986 Op->getEndLoc(), getContext());
3992 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3993 if (NumOperands == 3 && Tok == "fmov") {
3994 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3995 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
3996 if (RegOp->isReg() && ImmOp->isFPImm() &&
3997 ImmOp->getFPImm() == (unsigned)-1) {
3998 unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
4002 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4003 Op->getEndLoc(), getContext());
4008 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4009 // FMOV instructions. The index isn't an actual instruction operand
4010 // but rather syntactic sugar. It really should be part of the mnemonic,
4011 // not the operand, but whatever.
4012 if ((NumOperands == 5) && Tok == "fmov") {
4013 // If the last operand is a vectorindex of '1', then replace it with
4014 // a '[' '1' ']' token sequence, which is what the matcher
4015 // (annoyingly) expects for a literal vector index operand.
4016 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4017 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4018 SMLoc Loc = Op->getStartLoc();
4019 Operands.pop_back();
4022 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4024 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4026 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4027 } else if (Op->isReg()) {
4028 // Similarly, check the destination operand for the GPR->High-lane
4030 unsigned OpNo = NumOperands - 2;
4031 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4032 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4033 SMLoc Loc = Op->getStartLoc();
4035 ARM64Operand::CreateToken("[", false, Loc, getContext());
4037 Operands.begin() + OpNo + 1,
4038 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4040 Operands.begin() + OpNo + 2,
4041 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4048 // First try to match against the secondary set of tables containing the
4049 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4050 unsigned MatchResult =
4051 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4053 // If that fails, try against the alternate table containing long-form NEON:
4054 // "fadd v0.2s, v1.2s, v2.2s"
4055 if (MatchResult != Match_Success)
4057 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4059 switch (MatchResult) {
4060 case Match_Success: {
4061 // Perform range checking and other semantic validations
4062 SmallVector<SMLoc, 8> OperandLocs;
4063 NumOperands = Operands.size();
4064 for (unsigned i = 1; i < NumOperands; ++i)
4065 OperandLocs.push_back(Operands[i]->getStartLoc());
4066 if (validateInstruction(Inst, OperandLocs))
4070 Out.EmitInstruction(Inst, STI);
4073 case Match_MissingFeature: {
4074 assert(ErrorInfo && "Unknown missing feature!");
4075 // Special case the error message for the very common case where only
4076 // a single subtarget feature is missing (neon, e.g.).
4077 std::string Msg = "instruction requires:";
4079 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4080 if (ErrorInfo & Mask) {
4082 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4086 return Error(IDLoc, Msg);
4088 case Match_MnemonicFail:
4089 return showMatchError(IDLoc, MatchResult);
4090 case Match_InvalidOperand: {
4091 SMLoc ErrorLoc = IDLoc;
4092 if (ErrorInfo != ~0U) {
4093 if (ErrorInfo >= Operands.size())
4094 return Error(IDLoc, "too few operands for instruction");
4096 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4097 if (ErrorLoc == SMLoc())
4100 // If the match failed on a suffix token operand, tweak the diagnostic
4102 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4103 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4104 MatchResult = Match_InvalidSuffix;
4106 return showMatchError(ErrorLoc, MatchResult);
4108 case Match_InvalidMemoryIndexedSImm9: {
4109 // If there is not a '!' after the memory operand that failed, we really
4110 // want the diagnostic for the non-pre-indexed instruction variant instead.
4111 // Be careful to check for the post-indexed variant as well, which also
4112 // uses this match diagnostic. Also exclude the explicitly unscaled
4113 // mnemonics, as they want the unscaled diagnostic as well.
4114 if (Operands.size() == ErrorInfo + 1 &&
4115 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4116 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4117 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4118 // the register class of the previous operand. Default to 64 in case
4119 // we see something unexpected.
4120 MatchResult = Match_InvalidMemoryIndexed64;
4122 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4123 if (PrevOp->isReg() &&
4124 ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
4126 MatchResult = Match_InvalidMemoryIndexed32;
4129 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4130 if (ErrorLoc == SMLoc())
4132 return showMatchError(ErrorLoc, MatchResult);
4134 case Match_InvalidMemoryIndexed32:
4135 case Match_InvalidMemoryIndexed64:
4136 case Match_InvalidMemoryIndexed128:
4137 // If there is a '!' after the memory operand that failed, we really
4138 // want the diagnostic for the pre-indexed instruction variant instead.
4139 if (Operands.size() > ErrorInfo + 1 &&
4140 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4141 MatchResult = Match_InvalidMemoryIndexedSImm9;
4143 case Match_AddSubRegExtendSmall:
4144 case Match_AddSubRegExtendLarge:
4145 case Match_AddSubSecondSource:
4146 case Match_LogicalSecondSource:
4147 case Match_AddSubRegShift32:
4148 case Match_AddSubRegShift64:
4149 case Match_InvalidMemoryIndexed8:
4150 case Match_InvalidMemoryIndexed16:
4151 case Match_InvalidMemoryIndexed32SImm7:
4152 case Match_InvalidMemoryIndexed64SImm7:
4153 case Match_InvalidMemoryIndexed128SImm7:
4154 case Match_InvalidImm0_7:
4155 case Match_InvalidImm0_15:
4156 case Match_InvalidImm0_31:
4157 case Match_InvalidImm0_63:
4158 case Match_InvalidImm1_8:
4159 case Match_InvalidImm1_16:
4160 case Match_InvalidImm1_32:
4161 case Match_InvalidImm1_64:
4162 case Match_InvalidIndexB:
4163 case Match_InvalidIndexH:
4164 case Match_InvalidIndexS:
4165 case Match_InvalidIndexD:
4166 case Match_InvalidLabel:
4169 // Any time we get here, there's nothing fancy to do. Just get the
4170 // operand SMLoc and display the diagnostic.
4171 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4172 // If it's a memory operand, the error is with the offset immediate,
4173 // so get that location instead.
4174 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4175 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4176 if (ErrorLoc == SMLoc())
4178 return showMatchError(ErrorLoc, MatchResult);
4182 llvm_unreachable("Implement any new match types added!");
4186 /// ParseDirective parses the arm specific directives
4187 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4188 StringRef IDVal = DirectiveID.getIdentifier();
4189 SMLoc Loc = DirectiveID.getLoc();
4190 if (IDVal == ".hword")
4191 return parseDirectiveWord(2, Loc);
4192 if (IDVal == ".word")
4193 return parseDirectiveWord(4, Loc);
4194 if (IDVal == ".xword")
4195 return parseDirectiveWord(8, Loc);
4196 if (IDVal == ".tlsdesccall")
4197 return parseDirectiveTLSDescCall(Loc);
4199 return parseDirectiveLOH(IDVal, Loc);
4202 /// parseDirectiveWord
4203 /// ::= .word [ expression (, expression)* ]
4204 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4205 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4207 const MCExpr *Value;
4208 if (getParser().parseExpression(Value))
4211 getParser().getStreamer().EmitValue(Value, Size);
4213 if (getLexer().is(AsmToken::EndOfStatement))
4216 // FIXME: Improve diagnostic.
4217 if (getLexer().isNot(AsmToken::Comma))
4218 return Error(L, "unexpected token in directive");
4227 // parseDirectiveTLSDescCall:
4228 // ::= .tlsdesccall symbol
4229 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4231 if (getParser().parseIdentifier(Name))
4232 return Error(L, "expected symbol after directive");
4234 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4235 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4236 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4239 Inst.setOpcode(ARM64::TLSDESCCALL);
4240 Inst.addOperand(MCOperand::CreateExpr(Expr));
4242 getParser().getStreamer().EmitInstruction(Inst, STI);
4246 /// ::= .loh <lohName | lohId> label1, ..., labelN
4247 /// The number of arguments depends on the loh identifier.
4248 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4249 if (IDVal != MCLOHDirectiveName())
4252 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4253 if (getParser().getTok().isNot(AsmToken::Integer))
4254 return TokError("expected an identifier or a number in directive");
4255 // We successfully get a numeric value for the identifier.
4256 // Check if it is valid.
4257 int64_t Id = getParser().getTok().getIntVal();
4258 Kind = (MCLOHType)Id;
4259 // Check that Id does not overflow MCLOHType.
4260 if (!isValidMCLOHType(Kind) || Id != Kind)
4261 return TokError("invalid numeric identifier in directive");
4263 StringRef Name = getTok().getIdentifier();
4264 // We successfully parse an identifier.
4265 // Check if it is a recognized one.
4266 int Id = MCLOHNameToId(Name);
4269 return TokError("invalid identifier in directive");
4270 Kind = (MCLOHType)Id;
4272 // Consume the identifier.
4274 // Get the number of arguments of this LOH.
4275 int NbArgs = MCLOHIdToNbArgs(Kind);
4277 assert(NbArgs != -1 && "Invalid number of arguments");
4279 SmallVector<MCSymbol *, 3> Args;
4280 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4282 if (getParser().parseIdentifier(Name))
4283 return TokError("expected identifier in directive");
4284 Args.push_back(getContext().GetOrCreateSymbol(Name));
4286 if (Idx + 1 == NbArgs)
4288 if (getLexer().isNot(AsmToken::Comma))
4289 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4292 if (getLexer().isNot(AsmToken::EndOfStatement))
4293 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4295 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4300 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4301 ARM64MCExpr::VariantKind &ELFRefKind,
4302 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4304 ELFRefKind = ARM64MCExpr::VK_INVALID;
4305 DarwinRefKind = MCSymbolRefExpr::VK_None;
4308 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4309 ELFRefKind = AE->getKind();
4310 Expr = AE->getSubExpr();
4313 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4315 // It's a simple symbol reference with no addend.
4316 DarwinRefKind = SE->getKind();
4320 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4324 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4327 DarwinRefKind = SE->getKind();
4329 if (BE->getOpcode() != MCBinaryExpr::Add &&
4330 BE->getOpcode() != MCBinaryExpr::Sub)
4333 // See if the addend is is a constant, otherwise there's more going
4334 // on here than we can deal with.
4335 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4339 Addend = AddendExpr->getValue();
4340 if (BE->getOpcode() == MCBinaryExpr::Sub)
4343 // It's some symbol reference + a constant addend, but really
4344 // shouldn't use both Darwin and ELF syntax.
4345 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4346 DarwinRefKind == MCSymbolRefExpr::VK_None;
4349 /// Force static initialization.
4350 extern "C" void LLVMInitializeARM64AsmParser() {
4351 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
4352 RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
4355 #define GET_REGISTER_MATCHER
4356 #define GET_SUBTARGET_FEATURE_NAME
4357 #define GET_MATCHER_IMPLEMENTATION
4358 #include "ARM64GenAsmMatcher.inc"
4360 // Define this matcher function after the auto-generated include so we
4361 // have the match class enum definitions.
4362 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4364 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4365 // If the kind is a token for a literal immediate, check if our asm
4366 // operand matches. This is for InstAliases which have a fixed-value
4367 // immediate in the syntax.
4368 int64_t ExpectedVal;
4371 return Match_InvalidOperand;
4413 return Match_InvalidOperand;
4414 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4416 return Match_InvalidOperand;
4417 if (CE->getValue() == ExpectedVal)
4418 return Match_Success;
4419 return Match_InvalidOperand;