1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 // Map of register aliases registers via the .req directive.
47 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49 AArch64TargetStreamer &getTargetStreamer() {
50 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
51 return static_cast<AArch64TargetStreamer &>(TS);
54 MCAsmParser &getParser() const { return Parser; }
55 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
57 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
59 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63 int tryParseRegister();
64 int tryMatchVectorRegister(StringRef &Kind, bool expected);
65 bool parseRegister(OperandVector &Operands);
66 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67 bool parseVectorList(OperandVector &Operands);
68 bool parseOperand(OperandVector &Operands, bool isCondCode,
71 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
72 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
73 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
120 MCAsmParserExtension::Initialize(_Parser);
121 if (Parser.getStreamer().getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(Parser.getStreamer());
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
212 uint64_t FeatureBits; // We need to pass through information about which
213 // core we are compiling for so that the SysReg
214 // Mappers can appropriately conditionalize.
225 struct ShiftExtendOp {
226 AArch64_AM::ShiftExtendType Type;
228 bool HasExplicitAmount;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct CondCodeOp CondCode;
243 struct FPImmOp FPImm;
244 struct BarrierOp Barrier;
245 struct SysRegOp SysReg;
246 struct SysCRImmOp SysCRImm;
247 struct PrefetchOp Prefetch;
248 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
256 AArch64Operand(KindTy K, MCContext &_Ctx)
257 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 StartLoc = o.StartLoc;
271 ShiftedImm = o.ShiftedImm;
274 CondCode = o.CondCode;
286 VectorList = o.VectorList;
289 VectorIndex = o.VectorIndex;
295 SysCRImm = o.SysCRImm;
298 Prefetch = o.Prefetch;
301 ShiftExtend = o.ShiftExtend;
306 /// getStartLoc - Get the location of the first token of this operand.
307 SMLoc getStartLoc() const override { return StartLoc; }
308 /// getEndLoc - Get the location of the last token of this operand.
309 SMLoc getEndLoc() const override { return EndLoc; }
311 StringRef getToken() const {
312 assert(Kind == k_Token && "Invalid access!");
313 return StringRef(Tok.Data, Tok.Length);
316 bool isTokenSuffix() const {
317 assert(Kind == k_Token && "Invalid access!");
321 const MCExpr *getImm() const {
322 assert(Kind == k_Immediate && "Invalid access!");
326 const MCExpr *getShiftedImmVal() const {
327 assert(Kind == k_ShiftedImm && "Invalid access!");
328 return ShiftedImm.Val;
331 unsigned getShiftedImmShift() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.ShiftAmount;
336 AArch64CC::CondCode getCondCode() const {
337 assert(Kind == k_CondCode && "Invalid access!");
338 return CondCode.Code;
341 unsigned getFPImm() const {
342 assert(Kind == k_FPImm && "Invalid access!");
346 unsigned getBarrier() const {
347 assert(Kind == k_Barrier && "Invalid access!");
351 unsigned getReg() const override {
352 assert(Kind == k_Register && "Invalid access!");
356 unsigned getVectorListStart() const {
357 assert(Kind == k_VectorList && "Invalid access!");
358 return VectorList.RegNum;
361 unsigned getVectorListCount() const {
362 assert(Kind == k_VectorList && "Invalid access!");
363 return VectorList.Count;
366 unsigned getVectorIndex() const {
367 assert(Kind == k_VectorIndex && "Invalid access!");
368 return VectorIndex.Val;
371 StringRef getSysReg() const {
372 assert(Kind == k_SysReg && "Invalid access!");
373 return StringRef(SysReg.Data, SysReg.Length);
376 uint64_t getSysRegFeatureBits() const {
377 assert(Kind == k_SysReg && "Invalid access!");
378 return SysReg.FeatureBits;
381 unsigned getSysCR() const {
382 assert(Kind == k_SysCR && "Invalid access!");
386 unsigned getPrefetch() const {
387 assert(Kind == k_Prefetch && "Invalid access!");
391 AArch64_AM::ShiftExtendType getShiftExtendType() const {
392 assert(Kind == k_ShiftExtend && "Invalid access!");
393 return ShiftExtend.Type;
396 unsigned getShiftExtendAmount() const {
397 assert(Kind == k_ShiftExtend && "Invalid access!");
398 return ShiftExtend.Amount;
401 bool hasShiftExtendAmount() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.HasExplicitAmount;
406 bool isImm() const override { return Kind == k_Immediate; }
407 bool isMem() const override { return false; }
408 bool isSImm9() const {
411 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
414 int64_t Val = MCE->getValue();
415 return (Val >= -256 && Val < 256);
417 bool isSImm7s4() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 bool isSImm7s8() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 bool isSImm7s16() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
445 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
446 AArch64MCExpr::VariantKind ELFRefKind;
447 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 // If we don't understand the expression, assume the best and
452 // let the fixup and relocation code deal with it.
456 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
457 ELFRefKind == AArch64MCExpr::VK_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
463 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
465 // Note that we don't range-check the addend. It's adjusted modulo page
466 // size when converted, so there is no "out of range" condition when using
468 return Addend >= 0 && (Addend % Scale) == 0;
469 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
470 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
471 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
478 template <int Scale> bool isUImm12Offset() const {
482 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 return isSymbolicUImm12Offset(getImm(), Scale);
486 int64_t Val = MCE->getValue();
487 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
490 bool isImm0_7() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 0 && Val < 8);
499 bool isImm1_8() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val > 0 && Val < 9);
508 bool isImm0_15() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 16);
517 bool isImm1_16() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 17);
526 bool isImm0_31() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 32);
535 bool isImm1_31() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val >= 1 && Val < 32);
544 bool isImm1_32() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 1 && Val < 33);
553 bool isImm0_63() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 0 && Val < 64);
562 bool isImm1_63() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 64);
571 bool isImm1_64() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 1 && Val < 65);
580 bool isImm0_127() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 0 && Val < 128);
589 bool isImm0_255() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 0 && Val < 256);
598 bool isImm0_65535() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 65536);
607 bool isImm32_63() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 32 && Val < 64);
616 bool isLogicalImm32() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
626 return AArch64_AM::isLogicalImmediate(Val, 32);
628 bool isLogicalImm64() const {
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
634 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
636 bool isLogicalImm32Not() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
643 return AArch64_AM::isLogicalImmediate(Val, 32);
645 bool isLogicalImm64Not() const {
648 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
651 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
653 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
654 bool isAddSubImm() const {
655 if (!isShiftedImm() && !isImm())
660 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
661 if (isShiftedImm()) {
662 unsigned Shift = ShiftedImm.ShiftAmount;
663 Expr = ShiftedImm.Val;
664 if (Shift != 0 && Shift != 12)
670 AArch64MCExpr::VariantKind ELFRefKind;
671 MCSymbolRefExpr::VariantKind DarwinRefKind;
673 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
674 DarwinRefKind, Addend)) {
675 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
676 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
677 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
678 || ELFRefKind == AArch64MCExpr::VK_LO12
679 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
680 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
681 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
682 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
683 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
684 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
685 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
688 // Otherwise it should be a real immediate in range:
689 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
690 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
692 bool isCondCode() const { return Kind == k_CondCode; }
693 bool isSIMDImmType10() const {
696 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
699 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
701 bool isBranchTarget26() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 int64_t Val = MCE->getValue();
710 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
712 bool isPCRelLabel19() const {
715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
718 int64_t Val = MCE->getValue();
721 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
723 bool isBranchTarget14() const {
726 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729 int64_t Val = MCE->getValue();
732 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
736 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
740 AArch64MCExpr::VariantKind ELFRefKind;
741 MCSymbolRefExpr::VariantKind DarwinRefKind;
743 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
744 DarwinRefKind, Addend)) {
747 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
750 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
751 if (ELFRefKind == AllowedModifiers[i])
758 bool isMovZSymbolG3() const {
759 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
760 return isMovWSymbol(Variants);
763 bool isMovZSymbolG2() const {
764 static AArch64MCExpr::VariantKind Variants[] = {
765 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
766 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
767 return isMovWSymbol(Variants);
770 bool isMovZSymbolG1() const {
771 static AArch64MCExpr::VariantKind Variants[] = {
772 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
773 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
774 AArch64MCExpr::VK_DTPREL_G1,
776 return isMovWSymbol(Variants);
779 bool isMovZSymbolG0() const {
780 static AArch64MCExpr::VariantKind Variants[] = {
781 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
782 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
783 return isMovWSymbol(Variants);
786 bool isMovKSymbolG3() const {
787 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
788 return isMovWSymbol(Variants);
791 bool isMovKSymbolG2() const {
792 static AArch64MCExpr::VariantKind Variants[] = {
793 AArch64MCExpr::VK_ABS_G2_NC};
794 return isMovWSymbol(Variants);
797 bool isMovKSymbolG1() const {
798 static AArch64MCExpr::VariantKind Variants[] = {
799 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
800 AArch64MCExpr::VK_DTPREL_G1_NC
802 return isMovWSymbol(Variants);
805 bool isMovKSymbolG0() const {
806 static AArch64MCExpr::VariantKind Variants[] = {
807 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
808 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
810 return isMovWSymbol(Variants);
813 template<int RegWidth, int Shift>
814 bool isMOVZMovAlias() const {
815 if (!isImm()) return false;
817 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818 if (!CE) return false;
819 uint64_t Value = CE->getValue();
822 Value &= 0xffffffffULL;
824 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
825 if (Value == 0 && Shift != 0)
828 return (Value & ~(0xffffULL << Shift)) == 0;
831 template<int RegWidth, int Shift>
832 bool isMOVNMovAlias() const {
833 if (!isImm()) return false;
835 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836 if (!CE) return false;
837 uint64_t Value = CE->getValue();
839 // MOVZ takes precedence over MOVN.
840 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
841 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
846 Value &= 0xffffffffULL;
848 return (Value & ~(0xffffULL << Shift)) == 0;
851 bool isFPImm() const { return Kind == k_FPImm; }
852 bool isBarrier() const { return Kind == k_Barrier; }
853 bool isSysReg() const { return Kind == k_SysReg; }
854 bool isMRSSystemRegister() const {
855 if (!isSysReg()) return false;
857 bool IsKnownRegister;
858 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
859 Mapper.fromString(getSysReg(), IsKnownRegister);
861 return IsKnownRegister;
863 bool isMSRSystemRegister() const {
864 if (!isSysReg()) return false;
866 bool IsKnownRegister;
867 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
868 Mapper.fromString(getSysReg(), IsKnownRegister);
870 return IsKnownRegister;
872 bool isSystemPStateField() const {
873 if (!isSysReg()) return false;
875 bool IsKnownRegister;
876 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
878 return IsKnownRegister;
880 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
881 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
882 bool isVectorRegLo() const {
883 return Kind == k_Register && Reg.isVector &&
884 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
887 bool isGPR32as64() const {
888 return Kind == k_Register && !Reg.isVector &&
889 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
892 bool isGPR64sp0() const {
893 return Kind == k_Register && !Reg.isVector &&
894 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
897 /// Is this a vector list with the type implicit (presumably attached to the
898 /// instruction itself)?
899 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
900 return Kind == k_VectorList && VectorList.Count == NumRegs &&
901 !VectorList.ElementKind;
904 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
905 bool isTypedVectorList() const {
906 if (Kind != k_VectorList)
908 if (VectorList.Count != NumRegs)
910 if (VectorList.ElementKind != ElementKind)
912 return VectorList.NumElements == NumElements;
915 bool isVectorIndex1() const {
916 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 bool isVectorIndexB() const {
919 return Kind == k_VectorIndex && VectorIndex.Val < 16;
921 bool isVectorIndexH() const {
922 return Kind == k_VectorIndex && VectorIndex.Val < 8;
924 bool isVectorIndexS() const {
925 return Kind == k_VectorIndex && VectorIndex.Val < 4;
927 bool isVectorIndexD() const {
928 return Kind == k_VectorIndex && VectorIndex.Val < 2;
930 bool isToken() const override { return Kind == k_Token; }
931 bool isTokenEqual(StringRef Str) const {
932 return Kind == k_Token && getToken() == Str;
934 bool isSysCR() const { return Kind == k_SysCR; }
935 bool isPrefetch() const { return Kind == k_Prefetch; }
936 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
937 bool isShifter() const {
938 if (!isShiftExtend())
941 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
942 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
943 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
944 ST == AArch64_AM::MSL);
946 bool isExtend() const {
947 if (!isShiftExtend())
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
952 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
953 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
954 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955 ET == AArch64_AM::LSL) &&
956 getShiftExtendAmount() <= 4;
959 bool isExtend64() const {
962 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
963 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
966 bool isExtendLSL64() const {
969 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
970 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
971 ET == AArch64_AM::LSL) &&
972 getShiftExtendAmount() <= 4;
975 template<int Width> bool isMemXExtend() const {
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
980 (getShiftExtendAmount() == Log2_32(Width / 8) ||
981 getShiftExtendAmount() == 0);
984 template<int Width> bool isMemWExtend() const {
987 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
988 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
989 (getShiftExtendAmount() == Log2_32(Width / 8) ||
990 getShiftExtendAmount() == 0);
993 template <unsigned width>
994 bool isArithmeticShifter() const {
998 // An arithmetic shifter is LSL, LSR, or ASR.
999 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1000 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1001 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1004 template <unsigned width>
1005 bool isLogicalShifter() const {
1009 // A logical shifter is LSL, LSR, ASR or ROR.
1010 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1011 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1012 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1013 getShiftExtendAmount() < width;
1016 bool isMovImm32Shifter() const {
1020 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1021 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1022 if (ST != AArch64_AM::LSL)
1024 uint64_t Val = getShiftExtendAmount();
1025 return (Val == 0 || Val == 16);
1028 bool isMovImm64Shifter() const {
1032 // A MOVi shifter is LSL of 0 or 16.
1033 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1034 if (ST != AArch64_AM::LSL)
1036 uint64_t Val = getShiftExtendAmount();
1037 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1040 bool isLogicalVecShifter() const {
1044 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1045 unsigned Shift = getShiftExtendAmount();
1046 return getShiftExtendType() == AArch64_AM::LSL &&
1047 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1050 bool isLogicalVecHalfWordShifter() const {
1051 if (!isLogicalVecShifter())
1054 // A logical vector shifter is a left shift by 0 or 8.
1055 unsigned Shift = getShiftExtendAmount();
1056 return getShiftExtendType() == AArch64_AM::LSL &&
1057 (Shift == 0 || Shift == 8);
1060 bool isMoveVecShifter() const {
1061 if (!isShiftExtend())
1064 // A logical vector shifter is a left shift by 8 or 16.
1065 unsigned Shift = getShiftExtendAmount();
1066 return getShiftExtendType() == AArch64_AM::MSL &&
1067 (Shift == 8 || Shift == 16);
1070 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1071 // to LDUR/STUR when the offset is not legal for the former but is for
1072 // the latter. As such, in addition to checking for being a legal unscaled
1073 // address, also check that it is not a legal scaled address. This avoids
1074 // ambiguity in the matcher.
1076 bool isSImm9OffsetFB() const {
1077 return isSImm9() && !isUImm12Offset<Width / 8>();
1080 bool isAdrpLabel() const {
1081 // Validation was handled during parsing, so we just sanity check that
1082 // something didn't go haywire.
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087 int64_t Val = CE->getValue();
1088 int64_t Min = - (4096 * (1LL << (21 - 1)));
1089 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1090 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1096 bool isAdrLabel() const {
1097 // Validation was handled during parsing, so we just sanity check that
1098 // something didn't go haywire.
1102 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1103 int64_t Val = CE->getValue();
1104 int64_t Min = - (1LL << (21 - 1));
1105 int64_t Max = ((1LL << (21 - 1)) - 1);
1106 return Val >= Min && Val <= Max;
1112 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1113 // Add as immediates when possible. Null MCExpr = 0.
1115 Inst.addOperand(MCOperand::CreateImm(0));
1116 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1117 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1119 Inst.addOperand(MCOperand::CreateExpr(Expr));
1122 void addRegOperands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1124 Inst.addOperand(MCOperand::CreateReg(getReg()));
1127 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1128 assert(N == 1 && "Invalid number of operands!");
1130 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1132 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1133 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1134 RI->getEncodingValue(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(Reg));
1139 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1142 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1143 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1146 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1149 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1150 Inst.addOperand(MCOperand::CreateReg(getReg()));
1153 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 Inst.addOperand(MCOperand::CreateReg(getReg()));
1158 template <unsigned NumRegs>
1159 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1162 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1163 unsigned FirstReg = FirstRegs[NumRegs - 1];
1166 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1169 template <unsigned NumRegs>
1170 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1173 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1174 unsigned FirstReg = FirstRegs[NumRegs - 1];
1177 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1180 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1185 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1190 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1195 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1200 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1205 void addImmOperands(MCInst &Inst, unsigned N) const {
1206 assert(N == 1 && "Invalid number of operands!");
1207 // If this is a pageoff symrefexpr with an addend, adjust the addend
1208 // to be only the page-offset portion. Otherwise, just add the expr
1210 addExpr(Inst, getImm());
1213 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1214 assert(N == 2 && "Invalid number of operands!");
1215 if (isShiftedImm()) {
1216 addExpr(Inst, getShiftedImmVal());
1217 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1219 addExpr(Inst, getImm());
1220 Inst.addOperand(MCOperand::CreateImm(0));
1224 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!");
1226 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1229 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 addExpr(Inst, getImm());
1235 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1238 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239 addImmOperands(Inst, N);
1243 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244 assert(N == 1 && "Invalid number of operands!");
1245 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1248 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1251 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1254 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1257 assert(MCE && "Invalid constant immediate operand!");
1258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1261 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264 assert(MCE && "Invalid constant immediate operand!");
1265 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1268 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1271 assert(MCE && "Invalid constant immediate operand!");
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1275 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1278 assert(MCE && "Invalid constant immediate operand!");
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1282 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 assert(MCE && "Invalid constant immediate operand!");
1293 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1296 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1303 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1306 assert(MCE && "Invalid constant immediate operand!");
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1313 assert(MCE && "Invalid constant immediate operand!");
1314 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1318 assert(N == 1 && "Invalid number of operands!");
1319 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1320 assert(MCE && "Invalid constant immediate operand!");
1321 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1324 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1327 assert(MCE && "Invalid constant immediate operand!");
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1334 assert(MCE && "Invalid constant immediate operand!");
1335 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1341 assert(MCE && "Invalid constant immediate operand!");
1342 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1345 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1348 assert(MCE && "Invalid constant immediate operand!");
1349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1352 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355 assert(MCE && "Invalid constant immediate operand!");
1356 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1359 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1362 assert(MCE && "Invalid constant immediate operand!");
1363 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1366 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1367 assert(N == 1 && "Invalid number of operands!");
1368 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1369 assert(MCE && "Invalid constant immediate operand!");
1370 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1373 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1376 assert(MCE && "Invalid constant immediate operand!");
1377 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1380 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1383 assert(MCE && "Invalid logical immediate operand!");
1385 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1386 Inst.addOperand(MCOperand::CreateImm(encoding));
1389 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1392 assert(MCE && "Invalid logical immediate operand!");
1393 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1394 Inst.addOperand(MCOperand::CreateImm(encoding));
1397 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1398 assert(N == 1 && "Invalid number of operands!");
1399 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1400 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1401 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1402 Inst.addOperand(MCOperand::CreateImm(encoding));
1405 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1410 Inst.addOperand(MCOperand::CreateImm(encoding));
1413 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1416 assert(MCE && "Invalid immediate operand!");
1417 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1418 Inst.addOperand(MCOperand::CreateImm(encoding));
1421 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1422 // Branch operands don't encode the low bits, so shift them off
1423 // here. If it's a label, however, just put it on directly as there's
1424 // not enough information now to do anything.
1425 assert(N == 1 && "Invalid number of operands!");
1426 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1428 addExpr(Inst, getImm());
1431 assert(MCE && "Invalid constant immediate operand!");
1432 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1435 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1436 // Branch operands don't encode the low bits, so shift them off
1437 // here. If it's a label, however, just put it on directly as there's
1438 // not enough information now to do anything.
1439 assert(N == 1 && "Invalid number of operands!");
1440 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1442 addExpr(Inst, getImm());
1445 assert(MCE && "Invalid constant immediate operand!");
1446 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1449 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1450 // Branch operands don't encode the low bits, so shift them off
1451 // here. If it's a label, however, just put it on directly as there's
1452 // not enough information now to do anything.
1453 assert(N == 1 && "Invalid number of operands!");
1454 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1456 addExpr(Inst, getImm());
1459 assert(MCE && "Invalid constant immediate operand!");
1460 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1463 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1465 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1468 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1469 assert(N == 1 && "Invalid number of operands!");
1470 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1473 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1474 assert(N == 1 && "Invalid number of operands!");
1477 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1478 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1480 Inst.addOperand(MCOperand::CreateImm(Bits));
1483 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1487 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1488 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1490 Inst.addOperand(MCOperand::CreateImm(Bits));
1493 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1498 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1500 Inst.addOperand(MCOperand::CreateImm(Bits));
1503 void addSysCROperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 1 && "Invalid number of operands!");
1505 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1508 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1510 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1513 void addShifterOperands(MCInst &Inst, unsigned N) const {
1514 assert(N == 1 && "Invalid number of operands!");
1516 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1517 Inst.addOperand(MCOperand::CreateImm(Imm));
1520 void addExtendOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1522 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1523 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1524 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1525 Inst.addOperand(MCOperand::CreateImm(Imm));
1528 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1529 assert(N == 1 && "Invalid number of operands!");
1530 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1531 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1532 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1533 Inst.addOperand(MCOperand::CreateImm(Imm));
1536 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 2 && "Invalid number of operands!");
1538 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1540 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1541 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1544 // For 8-bit load/store instructions with a register offset, both the
1545 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1546 // they're disambiguated by whether the shift was explicit or implicit rather
1548 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1549 assert(N == 2 && "Invalid number of operands!");
1550 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1551 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1552 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1553 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1557 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1558 assert(N == 1 && "Invalid number of operands!");
1560 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1561 uint64_t Value = CE->getValue();
1562 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1566 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1567 assert(N == 1 && "Invalid number of operands!");
1569 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1570 uint64_t Value = CE->getValue();
1571 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1574 void print(raw_ostream &OS) const override;
1576 static std::unique_ptr<AArch64Operand>
1577 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1578 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1579 Op->Tok.Data = Str.data();
1580 Op->Tok.Length = Str.size();
1581 Op->Tok.IsSuffix = IsSuffix;
1587 static std::unique_ptr<AArch64Operand>
1588 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1589 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1590 Op->Reg.RegNum = RegNum;
1591 Op->Reg.isVector = isVector;
1597 static std::unique_ptr<AArch64Operand>
1598 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1599 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1600 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1601 Op->VectorList.RegNum = RegNum;
1602 Op->VectorList.Count = Count;
1603 Op->VectorList.NumElements = NumElements;
1604 Op->VectorList.ElementKind = ElementKind;
1610 static std::unique_ptr<AArch64Operand>
1611 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1612 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1613 Op->VectorIndex.Val = Idx;
1619 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1620 SMLoc E, MCContext &Ctx) {
1621 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1628 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1629 unsigned ShiftAmount,
1632 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1633 Op->ShiftedImm .Val = Val;
1634 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1640 static std::unique_ptr<AArch64Operand>
1641 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1642 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1643 Op->CondCode.Code = Code;
1649 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1651 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1652 Op->FPImm.Val = Val;
1658 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1660 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1661 Op->Barrier.Val = Val;
1667 static std::unique_ptr<AArch64Operand>
1668 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1669 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1670 Op->SysReg.Data = Str.data();
1671 Op->SysReg.Length = Str.size();
1672 Op->SysReg.FeatureBits = FeatureBits;
1678 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1679 SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1681 Op->SysCRImm.Val = Val;
1687 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1689 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1690 Op->Prefetch.Val = Val;
1696 static std::unique_ptr<AArch64Operand>
1697 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1698 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1699 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1700 Op->ShiftExtend.Type = ShOp;
1701 Op->ShiftExtend.Amount = Val;
1702 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1709 } // end anonymous namespace.
1711 void AArch64Operand::print(raw_ostream &OS) const {
1714 OS << "<fpimm " << getFPImm() << "("
1715 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1719 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1721 OS << "<barrier " << Name << ">";
1723 OS << "<barrier invalid #" << getBarrier() << ">";
1727 getImm()->print(OS);
1729 case k_ShiftedImm: {
1730 unsigned Shift = getShiftedImmShift();
1731 OS << "<shiftedimm ";
1732 getShiftedImmVal()->print(OS);
1733 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1737 OS << "<condcode " << getCondCode() << ">";
1740 OS << "<register " << getReg() << ">";
1742 case k_VectorList: {
1743 OS << "<vectorlist ";
1744 unsigned Reg = getVectorListStart();
1745 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1746 OS << Reg + i << " ";
1751 OS << "<vectorindex " << getVectorIndex() << ">";
1754 OS << "<sysreg: " << getSysReg() << '>';
1757 OS << "'" << getToken() << "'";
1760 OS << "c" << getSysCR();
1764 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1766 OS << "<prfop " << Name << ">";
1768 OS << "<prfop invalid #" << getPrefetch() << ">";
1771 case k_ShiftExtend: {
1772 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1773 << getShiftExtendAmount();
1774 if (!hasShiftExtendAmount())
1782 /// @name Auto-generated Match Functions
1785 static unsigned MatchRegisterName(StringRef Name);
1789 static unsigned matchVectorRegName(StringRef Name) {
1790 return StringSwitch<unsigned>(Name)
1791 .Case("v0", AArch64::Q0)
1792 .Case("v1", AArch64::Q1)
1793 .Case("v2", AArch64::Q2)
1794 .Case("v3", AArch64::Q3)
1795 .Case("v4", AArch64::Q4)
1796 .Case("v5", AArch64::Q5)
1797 .Case("v6", AArch64::Q6)
1798 .Case("v7", AArch64::Q7)
1799 .Case("v8", AArch64::Q8)
1800 .Case("v9", AArch64::Q9)
1801 .Case("v10", AArch64::Q10)
1802 .Case("v11", AArch64::Q11)
1803 .Case("v12", AArch64::Q12)
1804 .Case("v13", AArch64::Q13)
1805 .Case("v14", AArch64::Q14)
1806 .Case("v15", AArch64::Q15)
1807 .Case("v16", AArch64::Q16)
1808 .Case("v17", AArch64::Q17)
1809 .Case("v18", AArch64::Q18)
1810 .Case("v19", AArch64::Q19)
1811 .Case("v20", AArch64::Q20)
1812 .Case("v21", AArch64::Q21)
1813 .Case("v22", AArch64::Q22)
1814 .Case("v23", AArch64::Q23)
1815 .Case("v24", AArch64::Q24)
1816 .Case("v25", AArch64::Q25)
1817 .Case("v26", AArch64::Q26)
1818 .Case("v27", AArch64::Q27)
1819 .Case("v28", AArch64::Q28)
1820 .Case("v29", AArch64::Q29)
1821 .Case("v30", AArch64::Q30)
1822 .Case("v31", AArch64::Q31)
1826 static bool isValidVectorKind(StringRef Name) {
1827 return StringSwitch<bool>(Name.lower())
1837 // Accept the width neutral ones, too, for verbose syntax. If those
1838 // aren't used in the right places, the token operand won't match so
1839 // all will work out.
1847 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1848 char &ElementKind) {
1849 assert(isValidVectorKind(Name));
1851 ElementKind = Name.lower()[Name.size() - 1];
1854 if (Name.size() == 2)
1857 // Parse the lane count
1858 Name = Name.drop_front();
1859 while (isdigit(Name.front())) {
1860 NumElements = 10 * NumElements + (Name.front() - '0');
1861 Name = Name.drop_front();
1865 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1867 StartLoc = getLoc();
1868 RegNo = tryParseRegister();
1869 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1870 return (RegNo == (unsigned)-1);
1873 // Matches a register name or register alias previously defined by '.req'
1874 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1876 unsigned RegNum = isVector ? matchVectorRegName(Name)
1877 : MatchRegisterName(Name);
1880 // Check for aliases registered via .req. Canonicalize to lower case.
1881 // That's more consistent since register names are case insensitive, and
1882 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1883 auto Entry = RegisterReqs.find(Name.lower());
1884 if (Entry == RegisterReqs.end())
1886 // set RegNum if the match is the right kind of register
1887 if (isVector == Entry->getValue().first)
1888 RegNum = Entry->getValue().second;
1893 /// tryParseRegister - Try to parse a register name. The token must be an
1894 /// Identifier when called, and if it is a register name the token is eaten and
1895 /// the register is added to the operand list.
1896 int AArch64AsmParser::tryParseRegister() {
1897 const AsmToken &Tok = Parser.getTok();
1898 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1900 std::string lowerCase = Tok.getString().lower();
1901 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1902 // Also handle a few aliases of registers.
1904 RegNum = StringSwitch<unsigned>(lowerCase)
1905 .Case("fp", AArch64::FP)
1906 .Case("lr", AArch64::LR)
1907 .Case("x31", AArch64::XZR)
1908 .Case("w31", AArch64::WZR)
1914 Parser.Lex(); // Eat identifier token.
1918 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1919 /// kind specifier. If it is a register specifier, eat the token and return it.
1920 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1921 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1922 TokError("vector register expected");
1926 StringRef Name = Parser.getTok().getString();
1927 // If there is a kind specifier, it's separated from the register name by
1929 size_t Start = 0, Next = Name.find('.');
1930 StringRef Head = Name.slice(Start, Next);
1931 unsigned RegNum = matchRegisterNameAlias(Head, true);
1934 if (Next != StringRef::npos) {
1935 Kind = Name.slice(Next, StringRef::npos);
1936 if (!isValidVectorKind(Kind)) {
1937 TokError("invalid vector kind qualifier");
1941 Parser.Lex(); // Eat the register token.
1946 TokError("vector register expected");
1950 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1951 AArch64AsmParser::OperandMatchResultTy
1952 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1955 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1956 Error(S, "Expected cN operand where 0 <= N <= 15");
1957 return MatchOperand_ParseFail;
1960 StringRef Tok = Parser.getTok().getIdentifier();
1961 if (Tok[0] != 'c' && Tok[0] != 'C') {
1962 Error(S, "Expected cN operand where 0 <= N <= 15");
1963 return MatchOperand_ParseFail;
1967 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1968 if (BadNum || CRNum > 15) {
1969 Error(S, "Expected cN operand where 0 <= N <= 15");
1970 return MatchOperand_ParseFail;
1973 Parser.Lex(); // Eat identifier token.
1975 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1976 return MatchOperand_Success;
1979 /// tryParsePrefetch - Try to parse a prefetch operand.
1980 AArch64AsmParser::OperandMatchResultTy
1981 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1983 const AsmToken &Tok = Parser.getTok();
1984 // Either an identifier for named values or a 5-bit immediate.
1985 bool Hash = Tok.is(AsmToken::Hash);
1986 if (Hash || Tok.is(AsmToken::Integer)) {
1988 Parser.Lex(); // Eat hash token.
1989 const MCExpr *ImmVal;
1990 if (getParser().parseExpression(ImmVal))
1991 return MatchOperand_ParseFail;
1993 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1995 TokError("immediate value expected for prefetch operand");
1996 return MatchOperand_ParseFail;
1998 unsigned prfop = MCE->getValue();
2000 TokError("prefetch operand out of range, [0,31] expected");
2001 return MatchOperand_ParseFail;
2004 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2005 return MatchOperand_Success;
2008 if (Tok.isNot(AsmToken::Identifier)) {
2009 TokError("pre-fetch hint expected");
2010 return MatchOperand_ParseFail;
2014 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2016 TokError("pre-fetch hint expected");
2017 return MatchOperand_ParseFail;
2020 Parser.Lex(); // Eat identifier token.
2021 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2022 return MatchOperand_Success;
2025 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2027 AArch64AsmParser::OperandMatchResultTy
2028 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2032 if (Parser.getTok().is(AsmToken::Hash)) {
2033 Parser.Lex(); // Eat hash token.
2036 if (parseSymbolicImmVal(Expr))
2037 return MatchOperand_ParseFail;
2039 AArch64MCExpr::VariantKind ELFRefKind;
2040 MCSymbolRefExpr::VariantKind DarwinRefKind;
2042 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2043 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2044 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2045 // No modifier was specified at all; this is the syntax for an ELF basic
2046 // ADRP relocation (unfortunately).
2048 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2049 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2050 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2052 Error(S, "gotpage label reference not allowed an addend");
2053 return MatchOperand_ParseFail;
2054 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2055 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2056 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2057 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2058 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2059 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2060 // The operand must be an @page or @gotpage qualified symbolref.
2061 Error(S, "page or gotpage label reference expected");
2062 return MatchOperand_ParseFail;
2066 // We have either a label reference possibly with addend or an immediate. The
2067 // addend is a raw value here. The linker will adjust it to only reference the
2069 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2070 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2072 return MatchOperand_Success;
2075 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2077 AArch64AsmParser::OperandMatchResultTy
2078 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2082 if (Parser.getTok().is(AsmToken::Hash)) {
2083 Parser.Lex(); // Eat hash token.
2086 if (getParser().parseExpression(Expr))
2087 return MatchOperand_ParseFail;
2089 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2090 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2092 return MatchOperand_Success;
2095 /// tryParseFPImm - A floating point immediate expression operand.
2096 AArch64AsmParser::OperandMatchResultTy
2097 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2101 if (Parser.getTok().is(AsmToken::Hash)) {
2102 Parser.Lex(); // Eat '#'
2106 // Handle negation, as that still comes through as a separate token.
2107 bool isNegative = false;
2108 if (Parser.getTok().is(AsmToken::Minus)) {
2112 const AsmToken &Tok = Parser.getTok();
2113 if (Tok.is(AsmToken::Real)) {
2114 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2115 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2116 // If we had a '-' in front, toggle the sign bit.
2117 IntVal ^= (uint64_t)isNegative << 63;
2118 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2119 Parser.Lex(); // Eat the token.
2120 // Check for out of range values. As an exception, we let Zero through,
2121 // as we handle that special case in post-processing before matching in
2122 // order to use the zero register for it.
2123 if (Val == -1 && !RealVal.isZero()) {
2124 TokError("expected compatible register or floating-point constant");
2125 return MatchOperand_ParseFail;
2127 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128 return MatchOperand_Success;
2130 if (Tok.is(AsmToken::Integer)) {
2132 if (!isNegative && Tok.getString().startswith("0x")) {
2133 Val = Tok.getIntVal();
2134 if (Val > 255 || Val < 0) {
2135 TokError("encoded floating point value out of range");
2136 return MatchOperand_ParseFail;
2139 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2140 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2141 // If we had a '-' in front, toggle the sign bit.
2142 IntVal ^= (uint64_t)isNegative << 63;
2143 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2145 Parser.Lex(); // Eat the token.
2146 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2147 return MatchOperand_Success;
2151 return MatchOperand_NoMatch;
2153 TokError("invalid floating point immediate");
2154 return MatchOperand_ParseFail;
2157 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2158 AArch64AsmParser::OperandMatchResultTy
2159 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2162 if (Parser.getTok().is(AsmToken::Hash))
2163 Parser.Lex(); // Eat '#'
2164 else if (Parser.getTok().isNot(AsmToken::Integer))
2165 // Operand should start from # or should be integer, emit error otherwise.
2166 return MatchOperand_NoMatch;
2169 if (parseSymbolicImmVal(Imm))
2170 return MatchOperand_ParseFail;
2171 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2172 uint64_t ShiftAmount = 0;
2173 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2175 int64_t Val = MCE->getValue();
2176 if (Val > 0xfff && (Val & 0xfff) == 0) {
2177 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2181 SMLoc E = Parser.getTok().getLoc();
2182 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2184 return MatchOperand_Success;
2190 // The optional operand must be "lsl #N" where N is non-negative.
2191 if (!Parser.getTok().is(AsmToken::Identifier) ||
2192 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2193 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2194 return MatchOperand_ParseFail;
2200 if (Parser.getTok().is(AsmToken::Hash)) {
2204 if (Parser.getTok().isNot(AsmToken::Integer)) {
2205 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2206 return MatchOperand_ParseFail;
2209 int64_t ShiftAmount = Parser.getTok().getIntVal();
2211 if (ShiftAmount < 0) {
2212 Error(Parser.getTok().getLoc(), "positive shift amount required");
2213 return MatchOperand_ParseFail;
2215 Parser.Lex(); // Eat the number
2217 SMLoc E = Parser.getTok().getLoc();
2218 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2219 S, E, getContext()));
2220 return MatchOperand_Success;
2223 /// parseCondCodeString - Parse a Condition Code string.
2224 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2225 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2226 .Case("eq", AArch64CC::EQ)
2227 .Case("ne", AArch64CC::NE)
2228 .Case("cs", AArch64CC::HS)
2229 .Case("hs", AArch64CC::HS)
2230 .Case("cc", AArch64CC::LO)
2231 .Case("lo", AArch64CC::LO)
2232 .Case("mi", AArch64CC::MI)
2233 .Case("pl", AArch64CC::PL)
2234 .Case("vs", AArch64CC::VS)
2235 .Case("vc", AArch64CC::VC)
2236 .Case("hi", AArch64CC::HI)
2237 .Case("ls", AArch64CC::LS)
2238 .Case("ge", AArch64CC::GE)
2239 .Case("lt", AArch64CC::LT)
2240 .Case("gt", AArch64CC::GT)
2241 .Case("le", AArch64CC::LE)
2242 .Case("al", AArch64CC::AL)
2243 .Case("nv", AArch64CC::NV)
2244 .Default(AArch64CC::Invalid);
2248 /// parseCondCode - Parse a Condition Code operand.
2249 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2250 bool invertCondCode) {
2252 const AsmToken &Tok = Parser.getTok();
2253 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2255 StringRef Cond = Tok.getString();
2256 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2257 if (CC == AArch64CC::Invalid)
2258 return TokError("invalid condition code");
2259 Parser.Lex(); // Eat identifier token.
2261 if (invertCondCode) {
2262 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2263 return TokError("condition codes AL and NV are invalid for this instruction");
2264 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2268 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2272 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2273 /// them if present.
2274 AArch64AsmParser::OperandMatchResultTy
2275 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2276 const AsmToken &Tok = Parser.getTok();
2277 std::string LowerID = Tok.getString().lower();
2278 AArch64_AM::ShiftExtendType ShOp =
2279 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2280 .Case("lsl", AArch64_AM::LSL)
2281 .Case("lsr", AArch64_AM::LSR)
2282 .Case("asr", AArch64_AM::ASR)
2283 .Case("ror", AArch64_AM::ROR)
2284 .Case("msl", AArch64_AM::MSL)
2285 .Case("uxtb", AArch64_AM::UXTB)
2286 .Case("uxth", AArch64_AM::UXTH)
2287 .Case("uxtw", AArch64_AM::UXTW)
2288 .Case("uxtx", AArch64_AM::UXTX)
2289 .Case("sxtb", AArch64_AM::SXTB)
2290 .Case("sxth", AArch64_AM::SXTH)
2291 .Case("sxtw", AArch64_AM::SXTW)
2292 .Case("sxtx", AArch64_AM::SXTX)
2293 .Default(AArch64_AM::InvalidShiftExtend);
2295 if (ShOp == AArch64_AM::InvalidShiftExtend)
2296 return MatchOperand_NoMatch;
2298 SMLoc S = Tok.getLoc();
2301 bool Hash = getLexer().is(AsmToken::Hash);
2302 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2303 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2304 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2305 ShOp == AArch64_AM::MSL) {
2306 // We expect a number here.
2307 TokError("expected #imm after shift specifier");
2308 return MatchOperand_ParseFail;
2311 // "extend" type operatoins don't need an immediate, #0 is implicit.
2312 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2314 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2315 return MatchOperand_Success;
2319 Parser.Lex(); // Eat the '#'.
2321 // Make sure we do actually have a number
2322 if (!Parser.getTok().is(AsmToken::Integer)) {
2323 Error(Parser.getTok().getLoc(),
2324 "expected integer shift amount");
2325 return MatchOperand_ParseFail;
2328 const MCExpr *ImmVal;
2329 if (getParser().parseExpression(ImmVal))
2330 return MatchOperand_ParseFail;
2332 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2334 TokError("expected #imm after shift specifier");
2335 return MatchOperand_ParseFail;
2338 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2339 Operands.push_back(AArch64Operand::CreateShiftExtend(
2340 ShOp, MCE->getValue(), true, S, E, getContext()));
2341 return MatchOperand_Success;
2344 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2345 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2346 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2347 OperandVector &Operands) {
2348 if (Name.find('.') != StringRef::npos)
2349 return TokError("invalid operand");
2353 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2355 const AsmToken &Tok = Parser.getTok();
2356 StringRef Op = Tok.getString();
2357 SMLoc S = Tok.getLoc();
2359 const MCExpr *Expr = nullptr;
2361 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2363 Expr = MCConstantExpr::Create(op1, getContext()); \
2364 Operands.push_back( \
2365 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2366 Operands.push_back( \
2367 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2368 Operands.push_back( \
2369 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2370 Expr = MCConstantExpr::Create(op2, getContext()); \
2371 Operands.push_back( \
2372 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2375 if (Mnemonic == "ic") {
2376 if (!Op.compare_lower("ialluis")) {
2377 // SYS #0, C7, C1, #0
2378 SYS_ALIAS(0, 7, 1, 0);
2379 } else if (!Op.compare_lower("iallu")) {
2380 // SYS #0, C7, C5, #0
2381 SYS_ALIAS(0, 7, 5, 0);
2382 } else if (!Op.compare_lower("ivau")) {
2383 // SYS #3, C7, C5, #1
2384 SYS_ALIAS(3, 7, 5, 1);
2386 return TokError("invalid operand for IC instruction");
2388 } else if (Mnemonic == "dc") {
2389 if (!Op.compare_lower("zva")) {
2390 // SYS #3, C7, C4, #1
2391 SYS_ALIAS(3, 7, 4, 1);
2392 } else if (!Op.compare_lower("ivac")) {
2393 // SYS #3, C7, C6, #1
2394 SYS_ALIAS(0, 7, 6, 1);
2395 } else if (!Op.compare_lower("isw")) {
2396 // SYS #0, C7, C6, #2
2397 SYS_ALIAS(0, 7, 6, 2);
2398 } else if (!Op.compare_lower("cvac")) {
2399 // SYS #3, C7, C10, #1
2400 SYS_ALIAS(3, 7, 10, 1);
2401 } else if (!Op.compare_lower("csw")) {
2402 // SYS #0, C7, C10, #2
2403 SYS_ALIAS(0, 7, 10, 2);
2404 } else if (!Op.compare_lower("cvau")) {
2405 // SYS #3, C7, C11, #1
2406 SYS_ALIAS(3, 7, 11, 1);
2407 } else if (!Op.compare_lower("civac")) {
2408 // SYS #3, C7, C14, #1
2409 SYS_ALIAS(3, 7, 14, 1);
2410 } else if (!Op.compare_lower("cisw")) {
2411 // SYS #0, C7, C14, #2
2412 SYS_ALIAS(0, 7, 14, 2);
2414 return TokError("invalid operand for DC instruction");
2416 } else if (Mnemonic == "at") {
2417 if (!Op.compare_lower("s1e1r")) {
2418 // SYS #0, C7, C8, #0
2419 SYS_ALIAS(0, 7, 8, 0);
2420 } else if (!Op.compare_lower("s1e2r")) {
2421 // SYS #4, C7, C8, #0
2422 SYS_ALIAS(4, 7, 8, 0);
2423 } else if (!Op.compare_lower("s1e3r")) {
2424 // SYS #6, C7, C8, #0
2425 SYS_ALIAS(6, 7, 8, 0);
2426 } else if (!Op.compare_lower("s1e1w")) {
2427 // SYS #0, C7, C8, #1
2428 SYS_ALIAS(0, 7, 8, 1);
2429 } else if (!Op.compare_lower("s1e2w")) {
2430 // SYS #4, C7, C8, #1
2431 SYS_ALIAS(4, 7, 8, 1);
2432 } else if (!Op.compare_lower("s1e3w")) {
2433 // SYS #6, C7, C8, #1
2434 SYS_ALIAS(6, 7, 8, 1);
2435 } else if (!Op.compare_lower("s1e0r")) {
2436 // SYS #0, C7, C8, #3
2437 SYS_ALIAS(0, 7, 8, 2);
2438 } else if (!Op.compare_lower("s1e0w")) {
2439 // SYS #0, C7, C8, #3
2440 SYS_ALIAS(0, 7, 8, 3);
2441 } else if (!Op.compare_lower("s12e1r")) {
2442 // SYS #4, C7, C8, #4
2443 SYS_ALIAS(4, 7, 8, 4);
2444 } else if (!Op.compare_lower("s12e1w")) {
2445 // SYS #4, C7, C8, #5
2446 SYS_ALIAS(4, 7, 8, 5);
2447 } else if (!Op.compare_lower("s12e0r")) {
2448 // SYS #4, C7, C8, #6
2449 SYS_ALIAS(4, 7, 8, 6);
2450 } else if (!Op.compare_lower("s12e0w")) {
2451 // SYS #4, C7, C8, #7
2452 SYS_ALIAS(4, 7, 8, 7);
2454 return TokError("invalid operand for AT instruction");
2456 } else if (Mnemonic == "tlbi") {
2457 if (!Op.compare_lower("vmalle1is")) {
2458 // SYS #0, C8, C3, #0
2459 SYS_ALIAS(0, 8, 3, 0);
2460 } else if (!Op.compare_lower("alle2is")) {
2461 // SYS #4, C8, C3, #0
2462 SYS_ALIAS(4, 8, 3, 0);
2463 } else if (!Op.compare_lower("alle3is")) {
2464 // SYS #6, C8, C3, #0
2465 SYS_ALIAS(6, 8, 3, 0);
2466 } else if (!Op.compare_lower("vae1is")) {
2467 // SYS #0, C8, C3, #1
2468 SYS_ALIAS(0, 8, 3, 1);
2469 } else if (!Op.compare_lower("vae2is")) {
2470 // SYS #4, C8, C3, #1
2471 SYS_ALIAS(4, 8, 3, 1);
2472 } else if (!Op.compare_lower("vae3is")) {
2473 // SYS #6, C8, C3, #1
2474 SYS_ALIAS(6, 8, 3, 1);
2475 } else if (!Op.compare_lower("aside1is")) {
2476 // SYS #0, C8, C3, #2
2477 SYS_ALIAS(0, 8, 3, 2);
2478 } else if (!Op.compare_lower("vaae1is")) {
2479 // SYS #0, C8, C3, #3
2480 SYS_ALIAS(0, 8, 3, 3);
2481 } else if (!Op.compare_lower("alle1is")) {
2482 // SYS #4, C8, C3, #4
2483 SYS_ALIAS(4, 8, 3, 4);
2484 } else if (!Op.compare_lower("vale1is")) {
2485 // SYS #0, C8, C3, #5
2486 SYS_ALIAS(0, 8, 3, 5);
2487 } else if (!Op.compare_lower("vaale1is")) {
2488 // SYS #0, C8, C3, #7
2489 SYS_ALIAS(0, 8, 3, 7);
2490 } else if (!Op.compare_lower("vmalle1")) {
2491 // SYS #0, C8, C7, #0
2492 SYS_ALIAS(0, 8, 7, 0);
2493 } else if (!Op.compare_lower("alle2")) {
2494 // SYS #4, C8, C7, #0
2495 SYS_ALIAS(4, 8, 7, 0);
2496 } else if (!Op.compare_lower("vale2is")) {
2497 // SYS #4, C8, C3, #5
2498 SYS_ALIAS(4, 8, 3, 5);
2499 } else if (!Op.compare_lower("vale3is")) {
2500 // SYS #6, C8, C3, #5
2501 SYS_ALIAS(6, 8, 3, 5);
2502 } else if (!Op.compare_lower("alle3")) {
2503 // SYS #6, C8, C7, #0
2504 SYS_ALIAS(6, 8, 7, 0);
2505 } else if (!Op.compare_lower("vae1")) {
2506 // SYS #0, C8, C7, #1
2507 SYS_ALIAS(0, 8, 7, 1);
2508 } else if (!Op.compare_lower("vae2")) {
2509 // SYS #4, C8, C7, #1
2510 SYS_ALIAS(4, 8, 7, 1);
2511 } else if (!Op.compare_lower("vae3")) {
2512 // SYS #6, C8, C7, #1
2513 SYS_ALIAS(6, 8, 7, 1);
2514 } else if (!Op.compare_lower("aside1")) {
2515 // SYS #0, C8, C7, #2
2516 SYS_ALIAS(0, 8, 7, 2);
2517 } else if (!Op.compare_lower("vaae1")) {
2518 // SYS #0, C8, C7, #3
2519 SYS_ALIAS(0, 8, 7, 3);
2520 } else if (!Op.compare_lower("alle1")) {
2521 // SYS #4, C8, C7, #4
2522 SYS_ALIAS(4, 8, 7, 4);
2523 } else if (!Op.compare_lower("vale1")) {
2524 // SYS #0, C8, C7, #5
2525 SYS_ALIAS(0, 8, 7, 5);
2526 } else if (!Op.compare_lower("vale2")) {
2527 // SYS #4, C8, C7, #5
2528 SYS_ALIAS(4, 8, 7, 5);
2529 } else if (!Op.compare_lower("vale3")) {
2530 // SYS #6, C8, C7, #5
2531 SYS_ALIAS(6, 8, 7, 5);
2532 } else if (!Op.compare_lower("vaale1")) {
2533 // SYS #0, C8, C7, #7
2534 SYS_ALIAS(0, 8, 7, 7);
2535 } else if (!Op.compare_lower("ipas2e1")) {
2536 // SYS #4, C8, C4, #1
2537 SYS_ALIAS(4, 8, 4, 1);
2538 } else if (!Op.compare_lower("ipas2le1")) {
2539 // SYS #4, C8, C4, #5
2540 SYS_ALIAS(4, 8, 4, 5);
2541 } else if (!Op.compare_lower("ipas2e1is")) {
2542 // SYS #4, C8, C4, #1
2543 SYS_ALIAS(4, 8, 0, 1);
2544 } else if (!Op.compare_lower("ipas2le1is")) {
2545 // SYS #4, C8, C4, #5
2546 SYS_ALIAS(4, 8, 0, 5);
2547 } else if (!Op.compare_lower("vmalls12e1")) {
2548 // SYS #4, C8, C7, #6
2549 SYS_ALIAS(4, 8, 7, 6);
2550 } else if (!Op.compare_lower("vmalls12e1is")) {
2551 // SYS #4, C8, C3, #6
2552 SYS_ALIAS(4, 8, 3, 6);
2554 return TokError("invalid operand for TLBI instruction");
2560 Parser.Lex(); // Eat operand.
2562 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2563 bool HasRegister = false;
2565 // Check for the optional register operand.
2566 if (getLexer().is(AsmToken::Comma)) {
2567 Parser.Lex(); // Eat comma.
2569 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2570 return TokError("expected register operand");
2575 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2576 Parser.eatToEndOfStatement();
2577 return TokError("unexpected token in argument list");
2580 if (ExpectRegister && !HasRegister) {
2581 return TokError("specified " + Mnemonic + " op requires a register");
2583 else if (!ExpectRegister && HasRegister) {
2584 return TokError("specified " + Mnemonic + " op does not use a register");
2587 Parser.Lex(); // Consume the EndOfStatement
2591 AArch64AsmParser::OperandMatchResultTy
2592 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2593 const AsmToken &Tok = Parser.getTok();
2595 // Can be either a #imm style literal or an option name
2596 bool Hash = Tok.is(AsmToken::Hash);
2597 if (Hash || Tok.is(AsmToken::Integer)) {
2598 // Immediate operand.
2600 Parser.Lex(); // Eat the '#'
2601 const MCExpr *ImmVal;
2602 SMLoc ExprLoc = getLoc();
2603 if (getParser().parseExpression(ImmVal))
2604 return MatchOperand_ParseFail;
2605 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2607 Error(ExprLoc, "immediate value expected for barrier operand");
2608 return MatchOperand_ParseFail;
2610 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2611 Error(ExprLoc, "barrier operand out of range");
2612 return MatchOperand_ParseFail;
2615 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2616 return MatchOperand_Success;
2619 if (Tok.isNot(AsmToken::Identifier)) {
2620 TokError("invalid operand for instruction");
2621 return MatchOperand_ParseFail;
2625 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2627 TokError("invalid barrier option name");
2628 return MatchOperand_ParseFail;
2631 // The only valid named option for ISB is 'sy'
2632 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2633 TokError("'sy' or #imm operand expected");
2634 return MatchOperand_ParseFail;
2638 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2639 Parser.Lex(); // Consume the option
2641 return MatchOperand_Success;
2644 AArch64AsmParser::OperandMatchResultTy
2645 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2646 const AsmToken &Tok = Parser.getTok();
2648 if (Tok.isNot(AsmToken::Identifier))
2649 return MatchOperand_NoMatch;
2651 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2652 STI.getFeatureBits(), getContext()));
2653 Parser.Lex(); // Eat identifier
2655 return MatchOperand_Success;
2658 /// tryParseVectorRegister - Parse a vector register operand.
2659 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2660 if (Parser.getTok().isNot(AsmToken::Identifier))
2664 // Check for a vector register specifier first.
2666 int64_t Reg = tryMatchVectorRegister(Kind, false);
2670 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2671 // If there was an explicit qualifier, that goes on as a literal text
2675 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2677 // If there is an index specifier following the register, parse that too.
2678 if (Parser.getTok().is(AsmToken::LBrac)) {
2679 SMLoc SIdx = getLoc();
2680 Parser.Lex(); // Eat left bracket token.
2682 const MCExpr *ImmVal;
2683 if (getParser().parseExpression(ImmVal))
2685 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2687 TokError("immediate value expected for vector index");
2692 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2693 Error(E, "']' expected");
2697 Parser.Lex(); // Eat right bracket token.
2699 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2706 /// parseRegister - Parse a non-vector register operand.
2707 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2709 // Try for a vector register.
2710 if (!tryParseVectorRegister(Operands))
2713 // Try for a scalar register.
2714 int64_t Reg = tryParseRegister();
2718 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2720 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2721 // as a string token in the instruction itself.
2722 if (getLexer().getKind() == AsmToken::LBrac) {
2723 SMLoc LBracS = getLoc();
2725 const AsmToken &Tok = Parser.getTok();
2726 if (Tok.is(AsmToken::Integer)) {
2727 SMLoc IntS = getLoc();
2728 int64_t Val = Tok.getIntVal();
2731 if (getLexer().getKind() == AsmToken::RBrac) {
2732 SMLoc RBracS = getLoc();
2735 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2737 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2739 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2749 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2750 bool HasELFModifier = false;
2751 AArch64MCExpr::VariantKind RefKind;
2753 if (Parser.getTok().is(AsmToken::Colon)) {
2754 Parser.Lex(); // Eat ':"
2755 HasELFModifier = true;
2757 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2758 Error(Parser.getTok().getLoc(),
2759 "expect relocation specifier in operand after ':'");
2763 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2764 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2765 .Case("lo12", AArch64MCExpr::VK_LO12)
2766 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2767 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2768 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2769 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2770 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2771 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2772 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2773 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2774 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2775 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2776 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2777 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2778 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2779 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2780 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2781 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2782 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2783 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2784 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2785 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2786 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2787 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2788 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2789 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2790 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2791 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2792 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2793 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2794 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2795 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2796 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2797 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2798 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2799 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2800 .Default(AArch64MCExpr::VK_INVALID);
2802 if (RefKind == AArch64MCExpr::VK_INVALID) {
2803 Error(Parser.getTok().getLoc(),
2804 "expect relocation specifier in operand after ':'");
2808 Parser.Lex(); // Eat identifier
2810 if (Parser.getTok().isNot(AsmToken::Colon)) {
2811 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2814 Parser.Lex(); // Eat ':'
2817 if (getParser().parseExpression(ImmVal))
2821 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2826 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2827 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2828 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2830 Parser.Lex(); // Eat left bracket token.
2832 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2835 int64_t PrevReg = FirstReg;
2838 if (Parser.getTok().is(AsmToken::Minus)) {
2839 Parser.Lex(); // Eat the minus.
2841 SMLoc Loc = getLoc();
2843 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2846 // Any Kind suffices must match on all regs in the list.
2847 if (Kind != NextKind)
2848 return Error(Loc, "mismatched register size suffix");
2850 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2852 if (Space == 0 || Space > 3) {
2853 return Error(Loc, "invalid number of vectors");
2859 while (Parser.getTok().is(AsmToken::Comma)) {
2860 Parser.Lex(); // Eat the comma token.
2862 SMLoc Loc = getLoc();
2864 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2867 // Any Kind suffices must match on all regs in the list.
2868 if (Kind != NextKind)
2869 return Error(Loc, "mismatched register size suffix");
2871 // Registers must be incremental (with wraparound at 31)
2872 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2873 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2874 return Error(Loc, "registers must be sequential");
2881 if (Parser.getTok().isNot(AsmToken::RCurly))
2882 return Error(getLoc(), "'}' expected");
2883 Parser.Lex(); // Eat the '}' token.
2886 return Error(S, "invalid number of vectors");
2888 unsigned NumElements = 0;
2889 char ElementKind = 0;
2891 parseValidVectorKind(Kind, NumElements, ElementKind);
2893 Operands.push_back(AArch64Operand::CreateVectorList(
2894 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2896 // If there is an index specifier following the list, parse that too.
2897 if (Parser.getTok().is(AsmToken::LBrac)) {
2898 SMLoc SIdx = getLoc();
2899 Parser.Lex(); // Eat left bracket token.
2901 const MCExpr *ImmVal;
2902 if (getParser().parseExpression(ImmVal))
2904 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2906 TokError("immediate value expected for vector index");
2911 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2912 Error(E, "']' expected");
2916 Parser.Lex(); // Eat right bracket token.
2918 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2924 AArch64AsmParser::OperandMatchResultTy
2925 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2926 const AsmToken &Tok = Parser.getTok();
2927 if (!Tok.is(AsmToken::Identifier))
2928 return MatchOperand_NoMatch;
2930 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2932 MCContext &Ctx = getContext();
2933 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2934 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2935 return MatchOperand_NoMatch;
2938 Parser.Lex(); // Eat register
2940 if (Parser.getTok().isNot(AsmToken::Comma)) {
2942 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2943 return MatchOperand_Success;
2945 Parser.Lex(); // Eat comma.
2947 if (Parser.getTok().is(AsmToken::Hash))
2948 Parser.Lex(); // Eat hash
2950 if (Parser.getTok().isNot(AsmToken::Integer)) {
2951 Error(getLoc(), "index must be absent or #0");
2952 return MatchOperand_ParseFail;
2955 const MCExpr *ImmVal;
2956 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2957 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2958 Error(getLoc(), "index must be absent or #0");
2959 return MatchOperand_ParseFail;
2963 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2964 return MatchOperand_Success;
2967 /// parseOperand - Parse a arm instruction operand. For now this parses the
2968 /// operand regardless of the mnemonic.
2969 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2970 bool invertCondCode) {
2971 // Check if the current operand has a custom associated parser, if so, try to
2972 // custom parse the operand, or fallback to the general approach.
2973 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2974 if (ResTy == MatchOperand_Success)
2976 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2977 // there was a match, but an error occurred, in which case, just return that
2978 // the operand parsing failed.
2979 if (ResTy == MatchOperand_ParseFail)
2982 // Nothing custom, so do general case parsing.
2984 switch (getLexer().getKind()) {
2988 if (parseSymbolicImmVal(Expr))
2989 return Error(S, "invalid operand");
2991 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2992 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2995 case AsmToken::LBrac: {
2996 SMLoc Loc = Parser.getTok().getLoc();
2997 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2999 Parser.Lex(); // Eat '['
3001 // There's no comma after a '[', so we can parse the next operand
3003 return parseOperand(Operands, false, false);
3005 case AsmToken::LCurly:
3006 return parseVectorList(Operands);
3007 case AsmToken::Identifier: {
3008 // If we're expecting a Condition Code operand, then just parse that.
3010 return parseCondCode(Operands, invertCondCode);
3012 // If it's a register name, parse it.
3013 if (!parseRegister(Operands))
3016 // This could be an optional "shift" or "extend" operand.
3017 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3018 // We can only continue if no tokens were eaten.
3019 if (GotShift != MatchOperand_NoMatch)
3022 // This was not a register so parse other operands that start with an
3023 // identifier (like labels) as expressions and create them as immediates.
3024 const MCExpr *IdVal;
3026 if (getParser().parseExpression(IdVal))
3029 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3030 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3033 case AsmToken::Integer:
3034 case AsmToken::Real:
3035 case AsmToken::Hash: {
3036 // #42 -> immediate.
3038 if (getLexer().is(AsmToken::Hash))
3041 // Parse a negative sign
3042 bool isNegative = false;
3043 if (Parser.getTok().is(AsmToken::Minus)) {
3045 // We need to consume this token only when we have a Real, otherwise
3046 // we let parseSymbolicImmVal take care of it
3047 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3051 // The only Real that should come through here is a literal #0.0 for
3052 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3053 // so convert the value.
3054 const AsmToken &Tok = Parser.getTok();
3055 if (Tok.is(AsmToken::Real)) {
3056 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3057 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3058 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3059 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3060 Mnemonic != "fcmlt")
3061 return TokError("unexpected floating point literal");
3062 else if (IntVal != 0 || isNegative)
3063 return TokError("expected floating-point constant #0.0");
3064 Parser.Lex(); // Eat the token.
3067 AArch64Operand::CreateToken("#0", false, S, getContext()));
3069 AArch64Operand::CreateToken(".0", false, S, getContext()));
3073 const MCExpr *ImmVal;
3074 if (parseSymbolicImmVal(ImmVal))
3077 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3078 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3081 case AsmToken::Equal: {
3082 SMLoc Loc = Parser.getTok().getLoc();
3083 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3084 return Error(Loc, "unexpected token in operand");
3085 Parser.Lex(); // Eat '='
3086 const MCExpr *SubExprVal;
3087 if (getParser().parseExpression(SubExprVal))
3090 MCContext& Ctx = getContext();
3091 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3092 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3093 if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3094 static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3095 bool IsXReg = AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3096 Operands[1]->getReg());
3097 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3098 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3099 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3103 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3104 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3105 Operands.push_back(AArch64Operand::CreateImm(
3106 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3108 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3109 ShiftAmt, true, S, E, Ctx));
3113 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3114 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3115 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3121 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3123 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3124 StringRef Name, SMLoc NameLoc,
3125 OperandVector &Operands) {
3126 Name = StringSwitch<StringRef>(Name.lower())
3127 .Case("beq", "b.eq")
3128 .Case("bne", "b.ne")
3129 .Case("bhs", "b.hs")
3130 .Case("bcs", "b.cs")
3131 .Case("blo", "b.lo")
3132 .Case("bcc", "b.cc")
3133 .Case("bmi", "b.mi")
3134 .Case("bpl", "b.pl")
3135 .Case("bvs", "b.vs")
3136 .Case("bvc", "b.vc")
3137 .Case("bhi", "b.hi")
3138 .Case("bls", "b.ls")
3139 .Case("bge", "b.ge")
3140 .Case("blt", "b.lt")
3141 .Case("bgt", "b.gt")
3142 .Case("ble", "b.le")
3143 .Case("bal", "b.al")
3144 .Case("bnv", "b.nv")
3147 // First check for the AArch64-specific .req directive.
3148 if (Parser.getTok().is(AsmToken::Identifier) &&
3149 Parser.getTok().getIdentifier() == ".req") {
3150 parseDirectiveReq(Name, NameLoc);
3151 // We always return 'error' for this, as we're done with this
3152 // statement and don't need to match the 'instruction."
3156 // Create the leading tokens for the mnemonic, split by '.' characters.
3157 size_t Start = 0, Next = Name.find('.');
3158 StringRef Head = Name.slice(Start, Next);
3160 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3161 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3162 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3163 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3164 Parser.eatToEndOfStatement();
3169 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3172 // Handle condition codes for a branch mnemonic
3173 if (Head == "b" && Next != StringRef::npos) {
3175 Next = Name.find('.', Start + 1);
3176 Head = Name.slice(Start + 1, Next);
3178 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3179 (Head.data() - Name.data()));
3180 AArch64CC::CondCode CC = parseCondCodeString(Head);
3181 if (CC == AArch64CC::Invalid)
3182 return Error(SuffixLoc, "invalid condition code");
3184 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3186 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3189 // Add the remaining tokens in the mnemonic.
3190 while (Next != StringRef::npos) {
3192 Next = Name.find('.', Start + 1);
3193 Head = Name.slice(Start, Next);
3194 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3195 (Head.data() - Name.data()) + 1);
3197 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3200 // Conditional compare instructions have a Condition Code operand, which needs
3201 // to be parsed and an immediate operand created.
3202 bool condCodeFourthOperand =
3203 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3204 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3205 Head == "csinc" || Head == "csinv" || Head == "csneg");
3207 // These instructions are aliases to some of the conditional select
3208 // instructions. However, the condition code is inverted in the aliased
3211 // FIXME: Is this the correct way to handle these? Or should the parser
3212 // generate the aliased instructions directly?
3213 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3214 bool condCodeThirdOperand =
3215 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3217 // Read the remaining operands.
3218 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3219 // Read the first operand.
3220 if (parseOperand(Operands, false, false)) {
3221 Parser.eatToEndOfStatement();
3226 while (getLexer().is(AsmToken::Comma)) {
3227 Parser.Lex(); // Eat the comma.
3229 // Parse and remember the operand.
3230 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3231 (N == 3 && condCodeThirdOperand) ||
3232 (N == 2 && condCodeSecondOperand),
3233 condCodeSecondOperand || condCodeThirdOperand)) {
3234 Parser.eatToEndOfStatement();
3238 // After successfully parsing some operands there are two special cases to
3239 // consider (i.e. notional operands not separated by commas). Both are due
3240 // to memory specifiers:
3241 // + An RBrac will end an address for load/store/prefetch
3242 // + An '!' will indicate a pre-indexed operation.
3244 // It's someone else's responsibility to make sure these tokens are sane
3245 // in the given context!
3246 if (Parser.getTok().is(AsmToken::RBrac)) {
3247 SMLoc Loc = Parser.getTok().getLoc();
3248 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3253 if (Parser.getTok().is(AsmToken::Exclaim)) {
3254 SMLoc Loc = Parser.getTok().getLoc();
3255 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3264 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3265 SMLoc Loc = Parser.getTok().getLoc();
3266 Parser.eatToEndOfStatement();
3267 return Error(Loc, "unexpected token in argument list");
3270 Parser.Lex(); // Consume the EndOfStatement
3274 // FIXME: This entire function is a giant hack to provide us with decent
3275 // operand range validation/diagnostics until TableGen/MC can be extended
3276 // to support autogeneration of this kind of validation.
3277 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3278 SmallVectorImpl<SMLoc> &Loc) {
3279 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3280 // Check for indexed addressing modes w/ the base register being the
3281 // same as a destination/source register or pair load where
3282 // the Rt == Rt2. All of those are undefined behaviour.
3283 switch (Inst.getOpcode()) {
3284 case AArch64::LDPSWpre:
3285 case AArch64::LDPWpost:
3286 case AArch64::LDPWpre:
3287 case AArch64::LDPXpost:
3288 case AArch64::LDPXpre: {
3289 unsigned Rt = Inst.getOperand(1).getReg();
3290 unsigned Rt2 = Inst.getOperand(2).getReg();
3291 unsigned Rn = Inst.getOperand(3).getReg();
3292 if (RI->isSubRegisterEq(Rn, Rt))
3293 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3294 "is also a destination");
3295 if (RI->isSubRegisterEq(Rn, Rt2))
3296 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3297 "is also a destination");
3300 case AArch64::LDPDi:
3301 case AArch64::LDPQi:
3302 case AArch64::LDPSi:
3303 case AArch64::LDPSWi:
3304 case AArch64::LDPWi:
3305 case AArch64::LDPXi: {
3306 unsigned Rt = Inst.getOperand(0).getReg();
3307 unsigned Rt2 = Inst.getOperand(1).getReg();
3309 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3312 case AArch64::LDPDpost:
3313 case AArch64::LDPDpre:
3314 case AArch64::LDPQpost:
3315 case AArch64::LDPQpre:
3316 case AArch64::LDPSpost:
3317 case AArch64::LDPSpre:
3318 case AArch64::LDPSWpost: {
3319 unsigned Rt = Inst.getOperand(1).getReg();
3320 unsigned Rt2 = Inst.getOperand(2).getReg();
3322 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3325 case AArch64::STPDpost:
3326 case AArch64::STPDpre:
3327 case AArch64::STPQpost:
3328 case AArch64::STPQpre:
3329 case AArch64::STPSpost:
3330 case AArch64::STPSpre:
3331 case AArch64::STPWpost:
3332 case AArch64::STPWpre:
3333 case AArch64::STPXpost:
3334 case AArch64::STPXpre: {
3335 unsigned Rt = Inst.getOperand(1).getReg();
3336 unsigned Rt2 = Inst.getOperand(2).getReg();
3337 unsigned Rn = Inst.getOperand(3).getReg();
3338 if (RI->isSubRegisterEq(Rn, Rt))
3339 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3340 "is also a source");
3341 if (RI->isSubRegisterEq(Rn, Rt2))
3342 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3343 "is also a source");
3346 case AArch64::LDRBBpre:
3347 case AArch64::LDRBpre:
3348 case AArch64::LDRHHpre:
3349 case AArch64::LDRHpre:
3350 case AArch64::LDRSBWpre:
3351 case AArch64::LDRSBXpre:
3352 case AArch64::LDRSHWpre:
3353 case AArch64::LDRSHXpre:
3354 case AArch64::LDRSWpre:
3355 case AArch64::LDRWpre:
3356 case AArch64::LDRXpre:
3357 case AArch64::LDRBBpost:
3358 case AArch64::LDRBpost:
3359 case AArch64::LDRHHpost:
3360 case AArch64::LDRHpost:
3361 case AArch64::LDRSBWpost:
3362 case AArch64::LDRSBXpost:
3363 case AArch64::LDRSHWpost:
3364 case AArch64::LDRSHXpost:
3365 case AArch64::LDRSWpost:
3366 case AArch64::LDRWpost:
3367 case AArch64::LDRXpost: {
3368 unsigned Rt = Inst.getOperand(1).getReg();
3369 unsigned Rn = Inst.getOperand(2).getReg();
3370 if (RI->isSubRegisterEq(Rn, Rt))
3371 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3372 "is also a source");
3375 case AArch64::STRBBpost:
3376 case AArch64::STRBpost:
3377 case AArch64::STRHHpost:
3378 case AArch64::STRHpost:
3379 case AArch64::STRWpost:
3380 case AArch64::STRXpost:
3381 case AArch64::STRBBpre:
3382 case AArch64::STRBpre:
3383 case AArch64::STRHHpre:
3384 case AArch64::STRHpre:
3385 case AArch64::STRWpre:
3386 case AArch64::STRXpre: {
3387 unsigned Rt = Inst.getOperand(1).getReg();
3388 unsigned Rn = Inst.getOperand(2).getReg();
3389 if (RI->isSubRegisterEq(Rn, Rt))
3390 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3391 "is also a source");
3396 // Now check immediate ranges. Separate from the above as there is overlap
3397 // in the instructions being checked and this keeps the nested conditionals
3399 switch (Inst.getOpcode()) {
3400 case AArch64::ADDSWri:
3401 case AArch64::ADDSXri:
3402 case AArch64::ADDWri:
3403 case AArch64::ADDXri:
3404 case AArch64::SUBSWri:
3405 case AArch64::SUBSXri:
3406 case AArch64::SUBWri:
3407 case AArch64::SUBXri: {
3408 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3409 // some slight duplication here.
3410 if (Inst.getOperand(2).isExpr()) {
3411 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3412 AArch64MCExpr::VariantKind ELFRefKind;
3413 MCSymbolRefExpr::VariantKind DarwinRefKind;
3415 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3416 return Error(Loc[2], "invalid immediate expression");
3419 // Only allow these with ADDXri.
3420 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3421 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3422 Inst.getOpcode() == AArch64::ADDXri)
3425 // Only allow these with ADDXri/ADDWri
3426 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3427 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3428 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3429 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3430 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3431 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3432 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3433 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3434 (Inst.getOpcode() == AArch64::ADDXri ||
3435 Inst.getOpcode() == AArch64::ADDWri))
3438 // Don't allow expressions in the immediate field otherwise
3439 return Error(Loc[2], "invalid immediate expression");
3448 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3450 case Match_MissingFeature:
3452 "instruction requires a CPU feature not currently enabled");
3453 case Match_InvalidOperand:
3454 return Error(Loc, "invalid operand for instruction");
3455 case Match_InvalidSuffix:
3456 return Error(Loc, "invalid type suffix for instruction");
3457 case Match_InvalidCondCode:
3458 return Error(Loc, "expected AArch64 condition code");
3459 case Match_AddSubRegExtendSmall:
3461 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3462 case Match_AddSubRegExtendLarge:
3464 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3465 case Match_AddSubSecondSource:
3467 "expected compatible register, symbol or integer in range [0, 4095]");
3468 case Match_LogicalSecondSource:
3469 return Error(Loc, "expected compatible register or logical immediate");
3470 case Match_InvalidMovImm32Shift:
3471 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3472 case Match_InvalidMovImm64Shift:
3473 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3474 case Match_AddSubRegShift32:
3476 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3477 case Match_AddSubRegShift64:
3479 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3480 case Match_InvalidFPImm:
3482 "expected compatible register or floating-point constant");
3483 case Match_InvalidMemoryIndexedSImm9:
3484 return Error(Loc, "index must be an integer in range [-256, 255].");
3485 case Match_InvalidMemoryIndexed4SImm7:
3486 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3487 case Match_InvalidMemoryIndexed8SImm7:
3488 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3489 case Match_InvalidMemoryIndexed16SImm7:
3490 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3491 case Match_InvalidMemoryWExtend8:
3493 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3494 case Match_InvalidMemoryWExtend16:
3496 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3497 case Match_InvalidMemoryWExtend32:
3499 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3500 case Match_InvalidMemoryWExtend64:
3502 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3503 case Match_InvalidMemoryWExtend128:
3505 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3506 case Match_InvalidMemoryXExtend8:
3508 "expected 'lsl' or 'sxtx' with optional shift of #0");
3509 case Match_InvalidMemoryXExtend16:
3511 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3512 case Match_InvalidMemoryXExtend32:
3514 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3515 case Match_InvalidMemoryXExtend64:
3517 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3518 case Match_InvalidMemoryXExtend128:
3520 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3521 case Match_InvalidMemoryIndexed1:
3522 return Error(Loc, "index must be an integer in range [0, 4095].");
3523 case Match_InvalidMemoryIndexed2:
3524 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3525 case Match_InvalidMemoryIndexed4:
3526 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3527 case Match_InvalidMemoryIndexed8:
3528 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3529 case Match_InvalidMemoryIndexed16:
3530 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3531 case Match_InvalidImm0_7:
3532 return Error(Loc, "immediate must be an integer in range [0, 7].");
3533 case Match_InvalidImm0_15:
3534 return Error(Loc, "immediate must be an integer in range [0, 15].");
3535 case Match_InvalidImm0_31:
3536 return Error(Loc, "immediate must be an integer in range [0, 31].");
3537 case Match_InvalidImm0_63:
3538 return Error(Loc, "immediate must be an integer in range [0, 63].");
3539 case Match_InvalidImm0_127:
3540 return Error(Loc, "immediate must be an integer in range [0, 127].");
3541 case Match_InvalidImm0_65535:
3542 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3543 case Match_InvalidImm1_8:
3544 return Error(Loc, "immediate must be an integer in range [1, 8].");
3545 case Match_InvalidImm1_16:
3546 return Error(Loc, "immediate must be an integer in range [1, 16].");
3547 case Match_InvalidImm1_32:
3548 return Error(Loc, "immediate must be an integer in range [1, 32].");
3549 case Match_InvalidImm1_64:
3550 return Error(Loc, "immediate must be an integer in range [1, 64].");
3551 case Match_InvalidIndex1:
3552 return Error(Loc, "expected lane specifier '[1]'");
3553 case Match_InvalidIndexB:
3554 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3555 case Match_InvalidIndexH:
3556 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3557 case Match_InvalidIndexS:
3558 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3559 case Match_InvalidIndexD:
3560 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3561 case Match_InvalidLabel:
3562 return Error(Loc, "expected label or encodable integer pc offset");
3564 return Error(Loc, "expected readable system register");
3566 return Error(Loc, "expected writable system register or pstate");
3567 case Match_MnemonicFail:
3568 return Error(Loc, "unrecognized instruction mnemonic");
3570 llvm_unreachable("unexpected error code!");
3574 static const char *getSubtargetFeatureName(unsigned Val);
3576 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3577 OperandVector &Operands,
3579 unsigned &ErrorInfo,
3580 bool MatchingInlineAsm) {
3581 assert(!Operands.empty() && "Unexpect empty operand list!");
3582 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3583 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3585 StringRef Tok = Op.getToken();
3586 unsigned NumOperands = Operands.size();
3588 if (NumOperands == 4 && Tok == "lsl") {
3589 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3590 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3591 if (Op2.isReg() && Op3.isImm()) {
3592 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3594 uint64_t Op3Val = Op3CE->getValue();
3595 uint64_t NewOp3Val = 0;
3596 uint64_t NewOp4Val = 0;
3597 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3599 NewOp3Val = (32 - Op3Val) & 0x1f;
3600 NewOp4Val = 31 - Op3Val;
3602 NewOp3Val = (64 - Op3Val) & 0x3f;
3603 NewOp4Val = 63 - Op3Val;
3606 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3607 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3609 Operands[0] = AArch64Operand::CreateToken(
3610 "ubfm", false, Op.getStartLoc(), getContext());
3611 Operands.push_back(AArch64Operand::CreateImm(
3612 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3613 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3614 Op3.getEndLoc(), getContext());
3617 } else if (NumOperands == 5) {
3618 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3619 // UBFIZ -> UBFM aliases.
3620 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3621 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3622 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3623 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3625 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3626 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3627 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3629 if (Op3CE && Op4CE) {
3630 uint64_t Op3Val = Op3CE->getValue();
3631 uint64_t Op4Val = Op4CE->getValue();
3633 uint64_t RegWidth = 0;
3634 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3640 if (Op3Val >= RegWidth)
3641 return Error(Op3.getStartLoc(),
3642 "expected integer in range [0, 31]");
3643 if (Op4Val < 1 || Op4Val > RegWidth)
3644 return Error(Op4.getStartLoc(),
3645 "expected integer in range [1, 32]");
3647 uint64_t NewOp3Val = 0;
3648 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3650 NewOp3Val = (32 - Op3Val) & 0x1f;
3652 NewOp3Val = (64 - Op3Val) & 0x3f;
3654 uint64_t NewOp4Val = Op4Val - 1;
3656 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3657 return Error(Op4.getStartLoc(),
3658 "requested insert overflows register");
3660 const MCExpr *NewOp3 =
3661 MCConstantExpr::Create(NewOp3Val, getContext());
3662 const MCExpr *NewOp4 =
3663 MCConstantExpr::Create(NewOp4Val, getContext());
3664 Operands[3] = AArch64Operand::CreateImm(
3665 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3666 Operands[4] = AArch64Operand::CreateImm(
3667 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3669 Operands[0] = AArch64Operand::CreateToken(
3670 "bfm", false, Op.getStartLoc(), getContext());
3671 else if (Tok == "sbfiz")
3672 Operands[0] = AArch64Operand::CreateToken(
3673 "sbfm", false, Op.getStartLoc(), getContext());
3674 else if (Tok == "ubfiz")
3675 Operands[0] = AArch64Operand::CreateToken(
3676 "ubfm", false, Op.getStartLoc(), getContext());
3678 llvm_unreachable("No valid mnemonic for alias?");
3682 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3683 // UBFX -> UBFM aliases.
3684 } else if (NumOperands == 5 &&
3685 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3686 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3687 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3688 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3690 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3691 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3692 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3694 if (Op3CE && Op4CE) {
3695 uint64_t Op3Val = Op3CE->getValue();
3696 uint64_t Op4Val = Op4CE->getValue();
3698 uint64_t RegWidth = 0;
3699 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3705 if (Op3Val >= RegWidth)
3706 return Error(Op3.getStartLoc(),
3707 "expected integer in range [0, 31]");
3708 if (Op4Val < 1 || Op4Val > RegWidth)
3709 return Error(Op4.getStartLoc(),
3710 "expected integer in range [1, 32]");
3712 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3714 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3715 return Error(Op4.getStartLoc(),
3716 "requested extract overflows register");
3718 const MCExpr *NewOp4 =
3719 MCConstantExpr::Create(NewOp4Val, getContext());
3720 Operands[4] = AArch64Operand::CreateImm(
3721 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3723 Operands[0] = AArch64Operand::CreateToken(
3724 "bfm", false, Op.getStartLoc(), getContext());
3725 else if (Tok == "sbfx")
3726 Operands[0] = AArch64Operand::CreateToken(
3727 "sbfm", false, Op.getStartLoc(), getContext());
3728 else if (Tok == "ubfx")
3729 Operands[0] = AArch64Operand::CreateToken(
3730 "ubfm", false, Op.getStartLoc(), getContext());
3732 llvm_unreachable("No valid mnemonic for alias?");
3737 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3738 // InstAlias can't quite handle this since the reg classes aren't
3740 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3741 // The source register can be Wn here, but the matcher expects a
3742 // GPR64. Twiddle it here if necessary.
3743 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3745 unsigned Reg = getXRegFromWReg(Op.getReg());
3746 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3747 Op.getEndLoc(), getContext());
3750 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3751 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3752 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3754 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3756 // The source register can be Wn here, but the matcher expects a
3757 // GPR64. Twiddle it here if necessary.
3758 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3760 unsigned Reg = getXRegFromWReg(Op.getReg());
3761 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3762 Op.getEndLoc(), getContext());
3766 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3767 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3768 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3770 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3772 // The source register can be Wn here, but the matcher expects a
3773 // GPR32. Twiddle it here if necessary.
3774 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3776 unsigned Reg = getWRegFromXReg(Op.getReg());
3777 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3778 Op.getEndLoc(), getContext());
3783 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3784 if (NumOperands == 3 && Tok == "fmov") {
3785 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3786 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3787 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3789 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3793 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3794 Op.getEndLoc(), getContext());
3799 // First try to match against the secondary set of tables containing the
3800 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3801 unsigned MatchResult =
3802 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3804 // If that fails, try against the alternate table containing long-form NEON:
3805 // "fadd v0.2s, v1.2s, v2.2s"
3806 if (MatchResult != Match_Success)
3808 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3810 switch (MatchResult) {
3811 case Match_Success: {
3812 // Perform range checking and other semantic validations
3813 SmallVector<SMLoc, 8> OperandLocs;
3814 NumOperands = Operands.size();
3815 for (unsigned i = 1; i < NumOperands; ++i)
3816 OperandLocs.push_back(Operands[i]->getStartLoc());
3817 if (validateInstruction(Inst, OperandLocs))
3821 Out.EmitInstruction(Inst, STI);
3824 case Match_MissingFeature: {
3825 assert(ErrorInfo && "Unknown missing feature!");
3826 // Special case the error message for the very common case where only
3827 // a single subtarget feature is missing (neon, e.g.).
3828 std::string Msg = "instruction requires:";
3830 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3831 if (ErrorInfo & Mask) {
3833 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3837 return Error(IDLoc, Msg);
3839 case Match_MnemonicFail:
3840 return showMatchError(IDLoc, MatchResult);
3841 case Match_InvalidOperand: {
3842 SMLoc ErrorLoc = IDLoc;
3843 if (ErrorInfo != ~0U) {
3844 if (ErrorInfo >= Operands.size())
3845 return Error(IDLoc, "too few operands for instruction");
3847 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3848 if (ErrorLoc == SMLoc())
3851 // If the match failed on a suffix token operand, tweak the diagnostic
3853 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3854 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3855 MatchResult = Match_InvalidSuffix;
3857 return showMatchError(ErrorLoc, MatchResult);
3859 case Match_InvalidMemoryIndexed1:
3860 case Match_InvalidMemoryIndexed2:
3861 case Match_InvalidMemoryIndexed4:
3862 case Match_InvalidMemoryIndexed8:
3863 case Match_InvalidMemoryIndexed16:
3864 case Match_InvalidCondCode:
3865 case Match_AddSubRegExtendSmall:
3866 case Match_AddSubRegExtendLarge:
3867 case Match_AddSubSecondSource:
3868 case Match_LogicalSecondSource:
3869 case Match_AddSubRegShift32:
3870 case Match_AddSubRegShift64:
3871 case Match_InvalidMovImm32Shift:
3872 case Match_InvalidMovImm64Shift:
3873 case Match_InvalidFPImm:
3874 case Match_InvalidMemoryWExtend8:
3875 case Match_InvalidMemoryWExtend16:
3876 case Match_InvalidMemoryWExtend32:
3877 case Match_InvalidMemoryWExtend64:
3878 case Match_InvalidMemoryWExtend128:
3879 case Match_InvalidMemoryXExtend8:
3880 case Match_InvalidMemoryXExtend16:
3881 case Match_InvalidMemoryXExtend32:
3882 case Match_InvalidMemoryXExtend64:
3883 case Match_InvalidMemoryXExtend128:
3884 case Match_InvalidMemoryIndexed4SImm7:
3885 case Match_InvalidMemoryIndexed8SImm7:
3886 case Match_InvalidMemoryIndexed16SImm7:
3887 case Match_InvalidMemoryIndexedSImm9:
3888 case Match_InvalidImm0_7:
3889 case Match_InvalidImm0_15:
3890 case Match_InvalidImm0_31:
3891 case Match_InvalidImm0_63:
3892 case Match_InvalidImm0_127:
3893 case Match_InvalidImm0_65535:
3894 case Match_InvalidImm1_8:
3895 case Match_InvalidImm1_16:
3896 case Match_InvalidImm1_32:
3897 case Match_InvalidImm1_64:
3898 case Match_InvalidIndex1:
3899 case Match_InvalidIndexB:
3900 case Match_InvalidIndexH:
3901 case Match_InvalidIndexS:
3902 case Match_InvalidIndexD:
3903 case Match_InvalidLabel:
3906 if (ErrorInfo >= Operands.size())
3907 return Error(IDLoc, "too few operands for instruction");
3908 // Any time we get here, there's nothing fancy to do. Just get the
3909 // operand SMLoc and display the diagnostic.
3910 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3911 if (ErrorLoc == SMLoc())
3913 return showMatchError(ErrorLoc, MatchResult);
3917 llvm_unreachable("Implement any new match types added!");
3921 /// ParseDirective parses the arm specific directives
3922 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3923 StringRef IDVal = DirectiveID.getIdentifier();
3924 SMLoc Loc = DirectiveID.getLoc();
3925 if (IDVal == ".hword")
3926 return parseDirectiveWord(2, Loc);
3927 if (IDVal == ".word")
3928 return parseDirectiveWord(4, Loc);
3929 if (IDVal == ".xword")
3930 return parseDirectiveWord(8, Loc);
3931 if (IDVal == ".tlsdesccall")
3932 return parseDirectiveTLSDescCall(Loc);
3933 if (IDVal == ".ltorg" || IDVal == ".pool")
3934 return parseDirectiveLtorg(Loc);
3935 if (IDVal == ".unreq")
3936 return parseDirectiveUnreq(DirectiveID.getLoc());
3938 return parseDirectiveLOH(IDVal, Loc);
3941 /// parseDirectiveWord
3942 /// ::= .word [ expression (, expression)* ]
3943 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3944 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3946 const MCExpr *Value;
3947 if (getParser().parseExpression(Value))
3950 getParser().getStreamer().EmitValue(Value, Size);
3952 if (getLexer().is(AsmToken::EndOfStatement))
3955 // FIXME: Improve diagnostic.
3956 if (getLexer().isNot(AsmToken::Comma))
3957 return Error(L, "unexpected token in directive");
3966 // parseDirectiveTLSDescCall:
3967 // ::= .tlsdesccall symbol
3968 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3970 if (getParser().parseIdentifier(Name))
3971 return Error(L, "expected symbol after directive");
3973 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3974 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3975 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3978 Inst.setOpcode(AArch64::TLSDESCCALL);
3979 Inst.addOperand(MCOperand::CreateExpr(Expr));
3981 getParser().getStreamer().EmitInstruction(Inst, STI);
3985 /// ::= .loh <lohName | lohId> label1, ..., labelN
3986 /// The number of arguments depends on the loh identifier.
3987 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3988 if (IDVal != MCLOHDirectiveName())
3991 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3992 if (getParser().getTok().isNot(AsmToken::Integer))
3993 return TokError("expected an identifier or a number in directive");
3994 // We successfully get a numeric value for the identifier.
3995 // Check if it is valid.
3996 int64_t Id = getParser().getTok().getIntVal();
3997 Kind = (MCLOHType)Id;
3998 // Check that Id does not overflow MCLOHType.
3999 if (!isValidMCLOHType(Kind) || Id != Kind)
4000 return TokError("invalid numeric identifier in directive");
4002 StringRef Name = getTok().getIdentifier();
4003 // We successfully parse an identifier.
4004 // Check if it is a recognized one.
4005 int Id = MCLOHNameToId(Name);
4008 return TokError("invalid identifier in directive");
4009 Kind = (MCLOHType)Id;
4011 // Consume the identifier.
4013 // Get the number of arguments of this LOH.
4014 int NbArgs = MCLOHIdToNbArgs(Kind);
4016 assert(NbArgs != -1 && "Invalid number of arguments");
4018 SmallVector<MCSymbol *, 3> Args;
4019 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4021 if (getParser().parseIdentifier(Name))
4022 return TokError("expected identifier in directive");
4023 Args.push_back(getContext().GetOrCreateSymbol(Name));
4025 if (Idx + 1 == NbArgs)
4027 if (getLexer().isNot(AsmToken::Comma))
4028 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4031 if (getLexer().isNot(AsmToken::EndOfStatement))
4032 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4034 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4038 /// parseDirectiveLtorg
4039 /// ::= .ltorg | .pool
4040 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4041 getTargetStreamer().emitCurrentConstantPool();
4045 /// parseDirectiveReq
4046 /// ::= name .req registername
4047 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4048 Parser.Lex(); // Eat the '.req' token.
4049 SMLoc SRegLoc = getLoc();
4050 unsigned RegNum = tryParseRegister();
4051 bool IsVector = false;
4053 if (RegNum == static_cast<unsigned>(-1)) {
4055 RegNum = tryMatchVectorRegister(Kind, false);
4056 if (!Kind.empty()) {
4057 Error(SRegLoc, "vector register without type specifier expected");
4063 if (RegNum == static_cast<unsigned>(-1)) {
4064 Parser.eatToEndOfStatement();
4065 Error(SRegLoc, "register name or alias expected");
4069 // Shouldn't be anything else.
4070 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4071 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4072 Parser.eatToEndOfStatement();
4076 Parser.Lex(); // Consume the EndOfStatement
4078 auto pair = std::make_pair(IsVector, RegNum);
4079 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4080 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4085 /// parseDirectiveUneq
4086 /// ::= .unreq registername
4087 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4088 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4089 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4090 Parser.eatToEndOfStatement();
4093 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4094 Parser.Lex(); // Eat the identifier.
4099 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4100 AArch64MCExpr::VariantKind &ELFRefKind,
4101 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4103 ELFRefKind = AArch64MCExpr::VK_INVALID;
4104 DarwinRefKind = MCSymbolRefExpr::VK_None;
4107 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4108 ELFRefKind = AE->getKind();
4109 Expr = AE->getSubExpr();
4112 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4114 // It's a simple symbol reference with no addend.
4115 DarwinRefKind = SE->getKind();
4119 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4123 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4126 DarwinRefKind = SE->getKind();
4128 if (BE->getOpcode() != MCBinaryExpr::Add &&
4129 BE->getOpcode() != MCBinaryExpr::Sub)
4132 // See if the addend is is a constant, otherwise there's more going
4133 // on here than we can deal with.
4134 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4138 Addend = AddendExpr->getValue();
4139 if (BE->getOpcode() == MCBinaryExpr::Sub)
4142 // It's some symbol reference + a constant addend, but really
4143 // shouldn't use both Darwin and ELF syntax.
4144 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4145 DarwinRefKind == MCSymbolRefExpr::VK_None;
4148 /// Force static initialization.
4149 extern "C" void LLVMInitializeAArch64AsmParser() {
4150 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4151 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4153 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4154 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4157 #define GET_REGISTER_MATCHER
4158 #define GET_SUBTARGET_FEATURE_NAME
4159 #define GET_MATCHER_IMPLEMENTATION
4160 #include "AArch64GenAsmMatcher.inc"
4162 // Define this matcher function after the auto-generated include so we
4163 // have the match class enum definitions.
4164 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4166 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4167 // If the kind is a token for a literal immediate, check if our asm
4168 // operand matches. This is for InstAliases which have a fixed-value
4169 // immediate in the syntax.
4170 int64_t ExpectedVal;
4173 return Match_InvalidOperand;
4215 return Match_InvalidOperand;
4216 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4218 return Match_InvalidOperand;
4219 if (CE->getValue() == ExpectedVal)
4220 return Match_Success;
4221 return Match_InvalidOperand;