1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 // Map of register aliases registers via the .req directive.
47 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49 AArch64TargetStreamer &getTargetStreamer() {
50 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
51 return static_cast<AArch64TargetStreamer &>(TS);
54 MCAsmParser &getParser() const { return Parser; }
55 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
57 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
59 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63 int tryParseRegister();
64 int tryMatchVectorRegister(StringRef &Kind, bool expected);
65 bool parseRegister(OperandVector &Operands);
66 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67 bool parseVectorList(OperandVector &Operands);
68 bool parseOperand(OperandVector &Operands, bool isCondCode,
71 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
72 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
73 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
120 MCAsmParserExtension::Initialize(_Parser);
121 if (Parser.getStreamer().getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(Parser.getStreamer());
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
212 uint64_t FeatureBits; // We need to pass through information about which
213 // core we are compiling for so that the SysReg
214 // Mappers can appropriately conditionalize.
225 struct ShiftExtendOp {
226 AArch64_AM::ShiftExtendType Type;
228 bool HasExplicitAmount;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct CondCodeOp CondCode;
243 struct FPImmOp FPImm;
244 struct BarrierOp Barrier;
245 struct SysRegOp SysReg;
246 struct SysCRImmOp SysCRImm;
247 struct PrefetchOp Prefetch;
248 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
256 AArch64Operand(KindTy K, MCContext &_Ctx)
257 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 StartLoc = o.StartLoc;
271 ShiftedImm = o.ShiftedImm;
274 CondCode = o.CondCode;
286 VectorList = o.VectorList;
289 VectorIndex = o.VectorIndex;
295 SysCRImm = o.SysCRImm;
298 Prefetch = o.Prefetch;
301 ShiftExtend = o.ShiftExtend;
306 /// getStartLoc - Get the location of the first token of this operand.
307 SMLoc getStartLoc() const override { return StartLoc; }
308 /// getEndLoc - Get the location of the last token of this operand.
309 SMLoc getEndLoc() const override { return EndLoc; }
311 StringRef getToken() const {
312 assert(Kind == k_Token && "Invalid access!");
313 return StringRef(Tok.Data, Tok.Length);
316 bool isTokenSuffix() const {
317 assert(Kind == k_Token && "Invalid access!");
321 const MCExpr *getImm() const {
322 assert(Kind == k_Immediate && "Invalid access!");
326 const MCExpr *getShiftedImmVal() const {
327 assert(Kind == k_ShiftedImm && "Invalid access!");
328 return ShiftedImm.Val;
331 unsigned getShiftedImmShift() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.ShiftAmount;
336 AArch64CC::CondCode getCondCode() const {
337 assert(Kind == k_CondCode && "Invalid access!");
338 return CondCode.Code;
341 unsigned getFPImm() const {
342 assert(Kind == k_FPImm && "Invalid access!");
346 unsigned getBarrier() const {
347 assert(Kind == k_Barrier && "Invalid access!");
351 unsigned getReg() const override {
352 assert(Kind == k_Register && "Invalid access!");
356 unsigned getVectorListStart() const {
357 assert(Kind == k_VectorList && "Invalid access!");
358 return VectorList.RegNum;
361 unsigned getVectorListCount() const {
362 assert(Kind == k_VectorList && "Invalid access!");
363 return VectorList.Count;
366 unsigned getVectorIndex() const {
367 assert(Kind == k_VectorIndex && "Invalid access!");
368 return VectorIndex.Val;
371 StringRef getSysReg() const {
372 assert(Kind == k_SysReg && "Invalid access!");
373 return StringRef(SysReg.Data, SysReg.Length);
376 uint64_t getSysRegFeatureBits() const {
377 assert(Kind == k_SysReg && "Invalid access!");
378 return SysReg.FeatureBits;
381 unsigned getSysCR() const {
382 assert(Kind == k_SysCR && "Invalid access!");
386 unsigned getPrefetch() const {
387 assert(Kind == k_Prefetch && "Invalid access!");
391 AArch64_AM::ShiftExtendType getShiftExtendType() const {
392 assert(Kind == k_ShiftExtend && "Invalid access!");
393 return ShiftExtend.Type;
396 unsigned getShiftExtendAmount() const {
397 assert(Kind == k_ShiftExtend && "Invalid access!");
398 return ShiftExtend.Amount;
401 bool hasShiftExtendAmount() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.HasExplicitAmount;
406 bool isImm() const override { return Kind == k_Immediate; }
407 bool isMem() const override { return false; }
408 bool isSImm9() const {
411 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
414 int64_t Val = MCE->getValue();
415 return (Val >= -256 && Val < 256);
417 bool isSImm7s4() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 bool isSImm7s8() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 bool isSImm7s16() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
445 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
446 AArch64MCExpr::VariantKind ELFRefKind;
447 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 // If we don't understand the expression, assume the best and
452 // let the fixup and relocation code deal with it.
456 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
457 ELFRefKind == AArch64MCExpr::VK_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
463 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
465 // Note that we don't range-check the addend. It's adjusted modulo page
466 // size when converted, so there is no "out of range" condition when using
468 return Addend >= 0 && (Addend % Scale) == 0;
469 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
470 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
471 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
478 template <int Scale> bool isUImm12Offset() const {
482 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 return isSymbolicUImm12Offset(getImm(), Scale);
486 int64_t Val = MCE->getValue();
487 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
490 bool isImm0_7() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 0 && Val < 8);
499 bool isImm1_8() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val > 0 && Val < 9);
508 bool isImm0_15() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 16);
517 bool isImm1_16() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 17);
526 bool isImm0_31() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 32);
535 bool isImm1_31() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val >= 1 && Val < 32);
544 bool isImm1_32() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 1 && Val < 33);
553 bool isImm0_63() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 0 && Val < 64);
562 bool isImm1_63() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 64);
571 bool isImm1_64() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 1 && Val < 65);
580 bool isImm0_127() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 0 && Val < 128);
589 bool isImm0_255() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 0 && Val < 256);
598 bool isImm0_65535() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 65536);
607 bool isImm32_63() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 32 && Val < 64);
616 bool isLogicalImm32() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
626 return AArch64_AM::isLogicalImmediate(Val, 32);
628 bool isLogicalImm64() const {
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
634 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
636 bool isLogicalImm32Not() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
643 return AArch64_AM::isLogicalImmediate(Val, 32);
645 bool isLogicalImm64Not() const {
648 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
651 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
653 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
654 bool isAddSubImm() const {
655 if (!isShiftedImm() && !isImm())
660 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
661 if (isShiftedImm()) {
662 unsigned Shift = ShiftedImm.ShiftAmount;
663 Expr = ShiftedImm.Val;
664 if (Shift != 0 && Shift != 12)
670 AArch64MCExpr::VariantKind ELFRefKind;
671 MCSymbolRefExpr::VariantKind DarwinRefKind;
673 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
674 DarwinRefKind, Addend)) {
675 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
676 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
677 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
678 || ELFRefKind == AArch64MCExpr::VK_LO12
679 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
680 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
681 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
682 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
683 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
684 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
685 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
688 // Otherwise it should be a real immediate in range:
689 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
690 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
692 bool isCondCode() const { return Kind == k_CondCode; }
693 bool isSIMDImmType10() const {
696 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
699 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
701 bool isBranchTarget26() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 int64_t Val = MCE->getValue();
710 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
712 bool isPCRelLabel19() const {
715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
718 int64_t Val = MCE->getValue();
721 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
723 bool isBranchTarget14() const {
726 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729 int64_t Val = MCE->getValue();
732 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
736 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
740 AArch64MCExpr::VariantKind ELFRefKind;
741 MCSymbolRefExpr::VariantKind DarwinRefKind;
743 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
744 DarwinRefKind, Addend)) {
747 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
750 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
751 if (ELFRefKind == AllowedModifiers[i])
758 bool isMovZSymbolG3() const {
759 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
760 return isMovWSymbol(Variants);
763 bool isMovZSymbolG2() const {
764 static AArch64MCExpr::VariantKind Variants[] = {
765 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
766 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
767 return isMovWSymbol(Variants);
770 bool isMovZSymbolG1() const {
771 static AArch64MCExpr::VariantKind Variants[] = {
772 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
773 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
774 AArch64MCExpr::VK_DTPREL_G1,
776 return isMovWSymbol(Variants);
779 bool isMovZSymbolG0() const {
780 static AArch64MCExpr::VariantKind Variants[] = {
781 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
782 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
783 return isMovWSymbol(Variants);
786 bool isMovKSymbolG3() const {
787 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
788 return isMovWSymbol(Variants);
791 bool isMovKSymbolG2() const {
792 static AArch64MCExpr::VariantKind Variants[] = {
793 AArch64MCExpr::VK_ABS_G2_NC};
794 return isMovWSymbol(Variants);
797 bool isMovKSymbolG1() const {
798 static AArch64MCExpr::VariantKind Variants[] = {
799 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
800 AArch64MCExpr::VK_DTPREL_G1_NC
802 return isMovWSymbol(Variants);
805 bool isMovKSymbolG0() const {
806 static AArch64MCExpr::VariantKind Variants[] = {
807 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
808 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
810 return isMovWSymbol(Variants);
813 template<int RegWidth, int Shift>
814 bool isMOVZMovAlias() const {
815 if (!isImm()) return false;
817 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818 if (!CE) return false;
819 uint64_t Value = CE->getValue();
822 Value &= 0xffffffffULL;
824 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
825 if (Value == 0 && Shift != 0)
828 return (Value & ~(0xffffULL << Shift)) == 0;
831 template<int RegWidth, int Shift>
832 bool isMOVNMovAlias() const {
833 if (!isImm()) return false;
835 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836 if (!CE) return false;
837 uint64_t Value = CE->getValue();
839 // MOVZ takes precedence over MOVN.
840 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
841 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
846 Value &= 0xffffffffULL;
848 return (Value & ~(0xffffULL << Shift)) == 0;
851 bool isFPImm() const { return Kind == k_FPImm; }
852 bool isBarrier() const { return Kind == k_Barrier; }
853 bool isSysReg() const { return Kind == k_SysReg; }
854 bool isMRSSystemRegister() const {
855 if (!isSysReg()) return false;
857 bool IsKnownRegister;
858 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
859 Mapper.fromString(getSysReg(), IsKnownRegister);
861 return IsKnownRegister;
863 bool isMSRSystemRegister() const {
864 if (!isSysReg()) return false;
866 bool IsKnownRegister;
867 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
868 Mapper.fromString(getSysReg(), IsKnownRegister);
870 return IsKnownRegister;
872 bool isSystemPStateField() const {
873 if (!isSysReg()) return false;
875 bool IsKnownRegister;
876 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
878 return IsKnownRegister;
880 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
881 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
882 bool isVectorRegLo() const {
883 return Kind == k_Register && Reg.isVector &&
884 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
887 bool isGPR32as64() const {
888 return Kind == k_Register && !Reg.isVector &&
889 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
892 bool isGPR64sp0() const {
893 return Kind == k_Register && !Reg.isVector &&
894 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
897 /// Is this a vector list with the type implicit (presumably attached to the
898 /// instruction itself)?
899 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
900 return Kind == k_VectorList && VectorList.Count == NumRegs &&
901 !VectorList.ElementKind;
904 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
905 bool isTypedVectorList() const {
906 if (Kind != k_VectorList)
908 if (VectorList.Count != NumRegs)
910 if (VectorList.ElementKind != ElementKind)
912 return VectorList.NumElements == NumElements;
915 bool isVectorIndex1() const {
916 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 bool isVectorIndexB() const {
919 return Kind == k_VectorIndex && VectorIndex.Val < 16;
921 bool isVectorIndexH() const {
922 return Kind == k_VectorIndex && VectorIndex.Val < 8;
924 bool isVectorIndexS() const {
925 return Kind == k_VectorIndex && VectorIndex.Val < 4;
927 bool isVectorIndexD() const {
928 return Kind == k_VectorIndex && VectorIndex.Val < 2;
930 bool isToken() const override { return Kind == k_Token; }
931 bool isTokenEqual(StringRef Str) const {
932 return Kind == k_Token && getToken() == Str;
934 bool isSysCR() const { return Kind == k_SysCR; }
935 bool isPrefetch() const { return Kind == k_Prefetch; }
936 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
937 bool isShifter() const {
938 if (!isShiftExtend())
941 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
942 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
943 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
944 ST == AArch64_AM::MSL);
946 bool isExtend() const {
947 if (!isShiftExtend())
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
952 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
953 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
954 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955 ET == AArch64_AM::LSL) &&
956 getShiftExtendAmount() <= 4;
959 bool isExtend64() const {
962 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
963 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
966 bool isExtendLSL64() const {
969 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
970 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
971 ET == AArch64_AM::LSL) &&
972 getShiftExtendAmount() <= 4;
975 template<int Width> bool isMemXExtend() const {
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
980 (getShiftExtendAmount() == Log2_32(Width / 8) ||
981 getShiftExtendAmount() == 0);
984 template<int Width> bool isMemWExtend() const {
987 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
988 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
989 (getShiftExtendAmount() == Log2_32(Width / 8) ||
990 getShiftExtendAmount() == 0);
993 template <unsigned width>
994 bool isArithmeticShifter() const {
998 // An arithmetic shifter is LSL, LSR, or ASR.
999 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1000 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1001 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1004 template <unsigned width>
1005 bool isLogicalShifter() const {
1009 // A logical shifter is LSL, LSR, ASR or ROR.
1010 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1011 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1012 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1013 getShiftExtendAmount() < width;
1016 bool isMovImm32Shifter() const {
1020 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1021 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1022 if (ST != AArch64_AM::LSL)
1024 uint64_t Val = getShiftExtendAmount();
1025 return (Val == 0 || Val == 16);
1028 bool isMovImm64Shifter() const {
1032 // A MOVi shifter is LSL of 0 or 16.
1033 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1034 if (ST != AArch64_AM::LSL)
1036 uint64_t Val = getShiftExtendAmount();
1037 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1040 bool isLogicalVecShifter() const {
1044 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1045 unsigned Shift = getShiftExtendAmount();
1046 return getShiftExtendType() == AArch64_AM::LSL &&
1047 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1050 bool isLogicalVecHalfWordShifter() const {
1051 if (!isLogicalVecShifter())
1054 // A logical vector shifter is a left shift by 0 or 8.
1055 unsigned Shift = getShiftExtendAmount();
1056 return getShiftExtendType() == AArch64_AM::LSL &&
1057 (Shift == 0 || Shift == 8);
1060 bool isMoveVecShifter() const {
1061 if (!isShiftExtend())
1064 // A logical vector shifter is a left shift by 8 or 16.
1065 unsigned Shift = getShiftExtendAmount();
1066 return getShiftExtendType() == AArch64_AM::MSL &&
1067 (Shift == 8 || Shift == 16);
1070 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1071 // to LDUR/STUR when the offset is not legal for the former but is for
1072 // the latter. As such, in addition to checking for being a legal unscaled
1073 // address, also check that it is not a legal scaled address. This avoids
1074 // ambiguity in the matcher.
1076 bool isSImm9OffsetFB() const {
1077 return isSImm9() && !isUImm12Offset<Width / 8>();
1080 bool isAdrpLabel() const {
1081 // Validation was handled during parsing, so we just sanity check that
1082 // something didn't go haywire.
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087 int64_t Val = CE->getValue();
1088 int64_t Min = - (4096 * (1LL << (21 - 1)));
1089 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1090 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1096 bool isAdrLabel() const {
1097 // Validation was handled during parsing, so we just sanity check that
1098 // something didn't go haywire.
1102 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1103 int64_t Val = CE->getValue();
1104 int64_t Min = - (1LL << (21 - 1));
1105 int64_t Max = ((1LL << (21 - 1)) - 1);
1106 return Val >= Min && Val <= Max;
1112 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1113 // Add as immediates when possible. Null MCExpr = 0.
1115 Inst.addOperand(MCOperand::CreateImm(0));
1116 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1117 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1119 Inst.addOperand(MCOperand::CreateExpr(Expr));
1122 void addRegOperands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1124 Inst.addOperand(MCOperand::CreateReg(getReg()));
1127 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1128 assert(N == 1 && "Invalid number of operands!");
1130 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1132 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1133 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1134 RI->getEncodingValue(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(Reg));
1139 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1142 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1143 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1146 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1149 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1150 Inst.addOperand(MCOperand::CreateReg(getReg()));
1153 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 Inst.addOperand(MCOperand::CreateReg(getReg()));
1158 template <unsigned NumRegs>
1159 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1162 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1163 unsigned FirstReg = FirstRegs[NumRegs - 1];
1166 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1169 template <unsigned NumRegs>
1170 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1173 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1174 unsigned FirstReg = FirstRegs[NumRegs - 1];
1177 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1180 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1185 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1190 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1195 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1200 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1205 void addImmOperands(MCInst &Inst, unsigned N) const {
1206 assert(N == 1 && "Invalid number of operands!");
1207 // If this is a pageoff symrefexpr with an addend, adjust the addend
1208 // to be only the page-offset portion. Otherwise, just add the expr
1210 addExpr(Inst, getImm());
1213 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1214 assert(N == 2 && "Invalid number of operands!");
1215 if (isShiftedImm()) {
1216 addExpr(Inst, getShiftedImmVal());
1217 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1219 addExpr(Inst, getImm());
1220 Inst.addOperand(MCOperand::CreateImm(0));
1224 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!");
1226 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1229 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 addExpr(Inst, getImm());
1235 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1238 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239 addImmOperands(Inst, N);
1243 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244 assert(N == 1 && "Invalid number of operands!");
1245 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1248 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1251 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1254 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1257 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1260 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1263 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1266 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1267 assert(N == 1 && "Invalid number of operands!");
1268 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1269 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1272 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1275 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1278 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!");
1280 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1281 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1284 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!");
1286 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1287 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1290 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!");
1292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1293 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1296 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1303 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1309 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1312 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1315 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!");
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1321 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1327 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1333 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1339 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1345 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1351 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1353 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1357 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1363 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1368 Inst.addOperand(MCOperand::CreateImm(encoding));
1371 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1375 Inst.addOperand(MCOperand::CreateImm(encoding));
1378 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1379 assert(N == 1 && "Invalid number of operands!");
1380 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1381 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1382 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1383 Inst.addOperand(MCOperand::CreateImm(encoding));
1386 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1391 Inst.addOperand(MCOperand::CreateImm(encoding));
1394 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1398 Inst.addOperand(MCOperand::CreateImm(encoding));
1401 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1415 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1429 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1430 // Branch operands don't encode the low bits, so shift them off
1431 // here. If it's a label, however, just put it on directly as there's
1432 // not enough information now to do anything.
1433 assert(N == 1 && "Invalid number of operands!");
1434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1436 addExpr(Inst, getImm());
1439 assert(MCE && "Invalid constant immediate operand!");
1440 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1443 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1448 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1449 assert(N == 1 && "Invalid number of operands!");
1450 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1453 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1454 assert(N == 1 && "Invalid number of operands!");
1457 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1458 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1460 Inst.addOperand(MCOperand::CreateImm(Bits));
1463 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1467 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1468 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1470 Inst.addOperand(MCOperand::CreateImm(Bits));
1473 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1474 assert(N == 1 && "Invalid number of operands!");
1478 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1480 Inst.addOperand(MCOperand::CreateImm(Bits));
1483 void addSysCROperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1485 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1488 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1489 assert(N == 1 && "Invalid number of operands!");
1490 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1493 void addShifterOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1496 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1497 Inst.addOperand(MCOperand::CreateImm(Imm));
1500 void addExtendOperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1502 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1503 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1504 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1505 Inst.addOperand(MCOperand::CreateImm(Imm));
1508 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1510 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1511 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1512 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1513 Inst.addOperand(MCOperand::CreateImm(Imm));
1516 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1517 assert(N == 2 && "Invalid number of operands!");
1518 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1519 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1520 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1521 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1524 // For 8-bit load/store instructions with a register offset, both the
1525 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1526 // they're disambiguated by whether the shift was explicit or implicit rather
1528 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1529 assert(N == 2 && "Invalid number of operands!");
1530 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1531 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1532 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1533 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1537 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && "Invalid number of operands!");
1540 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1541 uint64_t Value = CE->getValue();
1542 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1546 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1547 assert(N == 1 && "Invalid number of operands!");
1549 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1550 uint64_t Value = CE->getValue();
1551 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1554 void print(raw_ostream &OS) const override;
1556 static std::unique_ptr<AArch64Operand>
1557 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1558 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1559 Op->Tok.Data = Str.data();
1560 Op->Tok.Length = Str.size();
1561 Op->Tok.IsSuffix = IsSuffix;
1567 static std::unique_ptr<AArch64Operand>
1568 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1569 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1570 Op->Reg.RegNum = RegNum;
1571 Op->Reg.isVector = isVector;
1577 static std::unique_ptr<AArch64Operand>
1578 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1579 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1580 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1581 Op->VectorList.RegNum = RegNum;
1582 Op->VectorList.Count = Count;
1583 Op->VectorList.NumElements = NumElements;
1584 Op->VectorList.ElementKind = ElementKind;
1590 static std::unique_ptr<AArch64Operand>
1591 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1592 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1593 Op->VectorIndex.Val = Idx;
1599 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1600 SMLoc E, MCContext &Ctx) {
1601 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1608 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1609 unsigned ShiftAmount,
1612 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1613 Op->ShiftedImm .Val = Val;
1614 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1620 static std::unique_ptr<AArch64Operand>
1621 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1622 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1623 Op->CondCode.Code = Code;
1629 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1631 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1632 Op->FPImm.Val = Val;
1638 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1640 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1641 Op->Barrier.Val = Val;
1647 static std::unique_ptr<AArch64Operand>
1648 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1649 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1650 Op->SysReg.Data = Str.data();
1651 Op->SysReg.Length = Str.size();
1652 Op->SysReg.FeatureBits = FeatureBits;
1658 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1659 SMLoc E, MCContext &Ctx) {
1660 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1661 Op->SysCRImm.Val = Val;
1667 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1669 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1670 Op->Prefetch.Val = Val;
1676 static std::unique_ptr<AArch64Operand>
1677 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1678 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1679 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1680 Op->ShiftExtend.Type = ShOp;
1681 Op->ShiftExtend.Amount = Val;
1682 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1689 } // end anonymous namespace.
1691 void AArch64Operand::print(raw_ostream &OS) const {
1694 OS << "<fpimm " << getFPImm() << "("
1695 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1699 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1701 OS << "<barrier " << Name << ">";
1703 OS << "<barrier invalid #" << getBarrier() << ">";
1707 getImm()->print(OS);
1709 case k_ShiftedImm: {
1710 unsigned Shift = getShiftedImmShift();
1711 OS << "<shiftedimm ";
1712 getShiftedImmVal()->print(OS);
1713 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1717 OS << "<condcode " << getCondCode() << ">";
1720 OS << "<register " << getReg() << ">";
1722 case k_VectorList: {
1723 OS << "<vectorlist ";
1724 unsigned Reg = getVectorListStart();
1725 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1726 OS << Reg + i << " ";
1731 OS << "<vectorindex " << getVectorIndex() << ">";
1734 OS << "<sysreg: " << getSysReg() << '>';
1737 OS << "'" << getToken() << "'";
1740 OS << "c" << getSysCR();
1744 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1746 OS << "<prfop " << Name << ">";
1748 OS << "<prfop invalid #" << getPrefetch() << ">";
1751 case k_ShiftExtend: {
1752 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1753 << getShiftExtendAmount();
1754 if (!hasShiftExtendAmount())
1762 /// @name Auto-generated Match Functions
1765 static unsigned MatchRegisterName(StringRef Name);
1769 static unsigned matchVectorRegName(StringRef Name) {
1770 return StringSwitch<unsigned>(Name)
1771 .Case("v0", AArch64::Q0)
1772 .Case("v1", AArch64::Q1)
1773 .Case("v2", AArch64::Q2)
1774 .Case("v3", AArch64::Q3)
1775 .Case("v4", AArch64::Q4)
1776 .Case("v5", AArch64::Q5)
1777 .Case("v6", AArch64::Q6)
1778 .Case("v7", AArch64::Q7)
1779 .Case("v8", AArch64::Q8)
1780 .Case("v9", AArch64::Q9)
1781 .Case("v10", AArch64::Q10)
1782 .Case("v11", AArch64::Q11)
1783 .Case("v12", AArch64::Q12)
1784 .Case("v13", AArch64::Q13)
1785 .Case("v14", AArch64::Q14)
1786 .Case("v15", AArch64::Q15)
1787 .Case("v16", AArch64::Q16)
1788 .Case("v17", AArch64::Q17)
1789 .Case("v18", AArch64::Q18)
1790 .Case("v19", AArch64::Q19)
1791 .Case("v20", AArch64::Q20)
1792 .Case("v21", AArch64::Q21)
1793 .Case("v22", AArch64::Q22)
1794 .Case("v23", AArch64::Q23)
1795 .Case("v24", AArch64::Q24)
1796 .Case("v25", AArch64::Q25)
1797 .Case("v26", AArch64::Q26)
1798 .Case("v27", AArch64::Q27)
1799 .Case("v28", AArch64::Q28)
1800 .Case("v29", AArch64::Q29)
1801 .Case("v30", AArch64::Q30)
1802 .Case("v31", AArch64::Q31)
1806 static bool isValidVectorKind(StringRef Name) {
1807 return StringSwitch<bool>(Name.lower())
1817 // Accept the width neutral ones, too, for verbose syntax. If those
1818 // aren't used in the right places, the token operand won't match so
1819 // all will work out.
1827 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1828 char &ElementKind) {
1829 assert(isValidVectorKind(Name));
1831 ElementKind = Name.lower()[Name.size() - 1];
1834 if (Name.size() == 2)
1837 // Parse the lane count
1838 Name = Name.drop_front();
1839 while (isdigit(Name.front())) {
1840 NumElements = 10 * NumElements + (Name.front() - '0');
1841 Name = Name.drop_front();
1845 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1847 StartLoc = getLoc();
1848 RegNo = tryParseRegister();
1849 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1850 return (RegNo == (unsigned)-1);
1853 // Matches a register name or register alias previously defined by '.req'
1854 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1856 unsigned RegNum = isVector ? matchVectorRegName(Name)
1857 : MatchRegisterName(Name);
1860 // Check for aliases registered via .req. Canonicalize to lower case.
1861 // That's more consistent since register names are case insensitive, and
1862 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1863 auto Entry = RegisterReqs.find(Name.lower());
1864 if (Entry == RegisterReqs.end())
1866 // set RegNum if the match is the right kind of register
1867 if (isVector == Entry->getValue().first)
1868 RegNum = Entry->getValue().second;
1873 /// tryParseRegister - Try to parse a register name. The token must be an
1874 /// Identifier when called, and if it is a register name the token is eaten and
1875 /// the register is added to the operand list.
1876 int AArch64AsmParser::tryParseRegister() {
1877 const AsmToken &Tok = Parser.getTok();
1878 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1880 std::string lowerCase = Tok.getString().lower();
1881 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1882 // Also handle a few aliases of registers.
1884 RegNum = StringSwitch<unsigned>(lowerCase)
1885 .Case("fp", AArch64::FP)
1886 .Case("lr", AArch64::LR)
1887 .Case("x31", AArch64::XZR)
1888 .Case("w31", AArch64::WZR)
1894 Parser.Lex(); // Eat identifier token.
1898 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1899 /// kind specifier. If it is a register specifier, eat the token and return it.
1900 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1901 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1902 TokError("vector register expected");
1906 StringRef Name = Parser.getTok().getString();
1907 // If there is a kind specifier, it's separated from the register name by
1909 size_t Start = 0, Next = Name.find('.');
1910 StringRef Head = Name.slice(Start, Next);
1911 unsigned RegNum = matchRegisterNameAlias(Head, true);
1914 if (Next != StringRef::npos) {
1915 Kind = Name.slice(Next, StringRef::npos);
1916 if (!isValidVectorKind(Kind)) {
1917 TokError("invalid vector kind qualifier");
1921 Parser.Lex(); // Eat the register token.
1926 TokError("vector register expected");
1930 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1931 AArch64AsmParser::OperandMatchResultTy
1932 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1935 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1936 Error(S, "Expected cN operand where 0 <= N <= 15");
1937 return MatchOperand_ParseFail;
1940 StringRef Tok = Parser.getTok().getIdentifier();
1941 if (Tok[0] != 'c' && Tok[0] != 'C') {
1942 Error(S, "Expected cN operand where 0 <= N <= 15");
1943 return MatchOperand_ParseFail;
1947 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1948 if (BadNum || CRNum > 15) {
1949 Error(S, "Expected cN operand where 0 <= N <= 15");
1950 return MatchOperand_ParseFail;
1953 Parser.Lex(); // Eat identifier token.
1955 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1956 return MatchOperand_Success;
1959 /// tryParsePrefetch - Try to parse a prefetch operand.
1960 AArch64AsmParser::OperandMatchResultTy
1961 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1963 const AsmToken &Tok = Parser.getTok();
1964 // Either an identifier for named values or a 5-bit immediate.
1965 bool Hash = Tok.is(AsmToken::Hash);
1966 if (Hash || Tok.is(AsmToken::Integer)) {
1968 Parser.Lex(); // Eat hash token.
1969 const MCExpr *ImmVal;
1970 if (getParser().parseExpression(ImmVal))
1971 return MatchOperand_ParseFail;
1973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1975 TokError("immediate value expected for prefetch operand");
1976 return MatchOperand_ParseFail;
1978 unsigned prfop = MCE->getValue();
1980 TokError("prefetch operand out of range, [0,31] expected");
1981 return MatchOperand_ParseFail;
1984 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1985 return MatchOperand_Success;
1988 if (Tok.isNot(AsmToken::Identifier)) {
1989 TokError("pre-fetch hint expected");
1990 return MatchOperand_ParseFail;
1994 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1996 TokError("pre-fetch hint expected");
1997 return MatchOperand_ParseFail;
2000 Parser.Lex(); // Eat identifier token.
2001 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2002 return MatchOperand_Success;
2005 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2007 AArch64AsmParser::OperandMatchResultTy
2008 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2012 if (Parser.getTok().is(AsmToken::Hash)) {
2013 Parser.Lex(); // Eat hash token.
2016 if (parseSymbolicImmVal(Expr))
2017 return MatchOperand_ParseFail;
2019 AArch64MCExpr::VariantKind ELFRefKind;
2020 MCSymbolRefExpr::VariantKind DarwinRefKind;
2022 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2023 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2024 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2025 // No modifier was specified at all; this is the syntax for an ELF basic
2026 // ADRP relocation (unfortunately).
2028 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2029 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2030 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2032 Error(S, "gotpage label reference not allowed an addend");
2033 return MatchOperand_ParseFail;
2034 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2035 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2036 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2037 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2038 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2039 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2040 // The operand must be an @page or @gotpage qualified symbolref.
2041 Error(S, "page or gotpage label reference expected");
2042 return MatchOperand_ParseFail;
2046 // We have either a label reference possibly with addend or an immediate. The
2047 // addend is a raw value here. The linker will adjust it to only reference the
2049 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2050 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2052 return MatchOperand_Success;
2055 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2057 AArch64AsmParser::OperandMatchResultTy
2058 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2062 if (Parser.getTok().is(AsmToken::Hash)) {
2063 Parser.Lex(); // Eat hash token.
2066 if (getParser().parseExpression(Expr))
2067 return MatchOperand_ParseFail;
2069 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2070 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2072 return MatchOperand_Success;
2075 /// tryParseFPImm - A floating point immediate expression operand.
2076 AArch64AsmParser::OperandMatchResultTy
2077 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2081 if (Parser.getTok().is(AsmToken::Hash)) {
2082 Parser.Lex(); // Eat '#'
2086 // Handle negation, as that still comes through as a separate token.
2087 bool isNegative = false;
2088 if (Parser.getTok().is(AsmToken::Minus)) {
2092 const AsmToken &Tok = Parser.getTok();
2093 if (Tok.is(AsmToken::Real)) {
2094 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2095 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2096 // If we had a '-' in front, toggle the sign bit.
2097 IntVal ^= (uint64_t)isNegative << 63;
2098 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2099 Parser.Lex(); // Eat the token.
2100 // Check for out of range values. As an exception, we let Zero through,
2101 // as we handle that special case in post-processing before matching in
2102 // order to use the zero register for it.
2103 if (Val == -1 && !RealVal.isZero()) {
2104 TokError("expected compatible register or floating-point constant");
2105 return MatchOperand_ParseFail;
2107 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2108 return MatchOperand_Success;
2110 if (Tok.is(AsmToken::Integer)) {
2112 if (!isNegative && Tok.getString().startswith("0x")) {
2113 Val = Tok.getIntVal();
2114 if (Val > 255 || Val < 0) {
2115 TokError("encoded floating point value out of range");
2116 return MatchOperand_ParseFail;
2119 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2120 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2121 // If we had a '-' in front, toggle the sign bit.
2122 IntVal ^= (uint64_t)isNegative << 63;
2123 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2125 Parser.Lex(); // Eat the token.
2126 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2127 return MatchOperand_Success;
2131 return MatchOperand_NoMatch;
2133 TokError("invalid floating point immediate");
2134 return MatchOperand_ParseFail;
2137 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2138 AArch64AsmParser::OperandMatchResultTy
2139 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2142 if (Parser.getTok().is(AsmToken::Hash))
2143 Parser.Lex(); // Eat '#'
2144 else if (Parser.getTok().isNot(AsmToken::Integer))
2145 // Operand should start from # or should be integer, emit error otherwise.
2146 return MatchOperand_NoMatch;
2149 if (parseSymbolicImmVal(Imm))
2150 return MatchOperand_ParseFail;
2151 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2152 uint64_t ShiftAmount = 0;
2153 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2155 int64_t Val = MCE->getValue();
2156 if (Val > 0xfff && (Val & 0xfff) == 0) {
2157 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2161 SMLoc E = Parser.getTok().getLoc();
2162 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2164 return MatchOperand_Success;
2170 // The optional operand must be "lsl #N" where N is non-negative.
2171 if (!Parser.getTok().is(AsmToken::Identifier) ||
2172 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2173 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2174 return MatchOperand_ParseFail;
2180 if (Parser.getTok().is(AsmToken::Hash)) {
2184 if (Parser.getTok().isNot(AsmToken::Integer)) {
2185 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2186 return MatchOperand_ParseFail;
2189 int64_t ShiftAmount = Parser.getTok().getIntVal();
2191 if (ShiftAmount < 0) {
2192 Error(Parser.getTok().getLoc(), "positive shift amount required");
2193 return MatchOperand_ParseFail;
2195 Parser.Lex(); // Eat the number
2197 SMLoc E = Parser.getTok().getLoc();
2198 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2199 S, E, getContext()));
2200 return MatchOperand_Success;
2203 /// parseCondCodeString - Parse a Condition Code string.
2204 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2205 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2206 .Case("eq", AArch64CC::EQ)
2207 .Case("ne", AArch64CC::NE)
2208 .Case("cs", AArch64CC::HS)
2209 .Case("hs", AArch64CC::HS)
2210 .Case("cc", AArch64CC::LO)
2211 .Case("lo", AArch64CC::LO)
2212 .Case("mi", AArch64CC::MI)
2213 .Case("pl", AArch64CC::PL)
2214 .Case("vs", AArch64CC::VS)
2215 .Case("vc", AArch64CC::VC)
2216 .Case("hi", AArch64CC::HI)
2217 .Case("ls", AArch64CC::LS)
2218 .Case("ge", AArch64CC::GE)
2219 .Case("lt", AArch64CC::LT)
2220 .Case("gt", AArch64CC::GT)
2221 .Case("le", AArch64CC::LE)
2222 .Case("al", AArch64CC::AL)
2223 .Case("nv", AArch64CC::NV)
2224 .Default(AArch64CC::Invalid);
2228 /// parseCondCode - Parse a Condition Code operand.
2229 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2230 bool invertCondCode) {
2232 const AsmToken &Tok = Parser.getTok();
2233 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2235 StringRef Cond = Tok.getString();
2236 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2237 if (CC == AArch64CC::Invalid)
2238 return TokError("invalid condition code");
2239 Parser.Lex(); // Eat identifier token.
2241 if (invertCondCode) {
2242 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2243 return TokError("condition codes AL and NV are invalid for this instruction");
2244 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2248 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2252 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2253 /// them if present.
2254 AArch64AsmParser::OperandMatchResultTy
2255 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2256 const AsmToken &Tok = Parser.getTok();
2257 std::string LowerID = Tok.getString().lower();
2258 AArch64_AM::ShiftExtendType ShOp =
2259 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2260 .Case("lsl", AArch64_AM::LSL)
2261 .Case("lsr", AArch64_AM::LSR)
2262 .Case("asr", AArch64_AM::ASR)
2263 .Case("ror", AArch64_AM::ROR)
2264 .Case("msl", AArch64_AM::MSL)
2265 .Case("uxtb", AArch64_AM::UXTB)
2266 .Case("uxth", AArch64_AM::UXTH)
2267 .Case("uxtw", AArch64_AM::UXTW)
2268 .Case("uxtx", AArch64_AM::UXTX)
2269 .Case("sxtb", AArch64_AM::SXTB)
2270 .Case("sxth", AArch64_AM::SXTH)
2271 .Case("sxtw", AArch64_AM::SXTW)
2272 .Case("sxtx", AArch64_AM::SXTX)
2273 .Default(AArch64_AM::InvalidShiftExtend);
2275 if (ShOp == AArch64_AM::InvalidShiftExtend)
2276 return MatchOperand_NoMatch;
2278 SMLoc S = Tok.getLoc();
2281 bool Hash = getLexer().is(AsmToken::Hash);
2282 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2283 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2284 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2285 ShOp == AArch64_AM::MSL) {
2286 // We expect a number here.
2287 TokError("expected #imm after shift specifier");
2288 return MatchOperand_ParseFail;
2291 // "extend" type operatoins don't need an immediate, #0 is implicit.
2292 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2294 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2295 return MatchOperand_Success;
2299 Parser.Lex(); // Eat the '#'.
2301 // Make sure we do actually have a number
2302 if (!Parser.getTok().is(AsmToken::Integer)) {
2303 Error(Parser.getTok().getLoc(),
2304 "expected integer shift amount");
2305 return MatchOperand_ParseFail;
2308 const MCExpr *ImmVal;
2309 if (getParser().parseExpression(ImmVal))
2310 return MatchOperand_ParseFail;
2312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2314 TokError("expected #imm after shift specifier");
2315 return MatchOperand_ParseFail;
2318 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2319 Operands.push_back(AArch64Operand::CreateShiftExtend(
2320 ShOp, MCE->getValue(), true, S, E, getContext()));
2321 return MatchOperand_Success;
2324 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2325 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2326 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2327 OperandVector &Operands) {
2328 if (Name.find('.') != StringRef::npos)
2329 return TokError("invalid operand");
2333 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2335 const AsmToken &Tok = Parser.getTok();
2336 StringRef Op = Tok.getString();
2337 SMLoc S = Tok.getLoc();
2339 const MCExpr *Expr = nullptr;
2341 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2343 Expr = MCConstantExpr::Create(op1, getContext()); \
2344 Operands.push_back( \
2345 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2346 Operands.push_back( \
2347 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2348 Operands.push_back( \
2349 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2350 Expr = MCConstantExpr::Create(op2, getContext()); \
2351 Operands.push_back( \
2352 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2355 if (Mnemonic == "ic") {
2356 if (!Op.compare_lower("ialluis")) {
2357 // SYS #0, C7, C1, #0
2358 SYS_ALIAS(0, 7, 1, 0);
2359 } else if (!Op.compare_lower("iallu")) {
2360 // SYS #0, C7, C5, #0
2361 SYS_ALIAS(0, 7, 5, 0);
2362 } else if (!Op.compare_lower("ivau")) {
2363 // SYS #3, C7, C5, #1
2364 SYS_ALIAS(3, 7, 5, 1);
2366 return TokError("invalid operand for IC instruction");
2368 } else if (Mnemonic == "dc") {
2369 if (!Op.compare_lower("zva")) {
2370 // SYS #3, C7, C4, #1
2371 SYS_ALIAS(3, 7, 4, 1);
2372 } else if (!Op.compare_lower("ivac")) {
2373 // SYS #3, C7, C6, #1
2374 SYS_ALIAS(0, 7, 6, 1);
2375 } else if (!Op.compare_lower("isw")) {
2376 // SYS #0, C7, C6, #2
2377 SYS_ALIAS(0, 7, 6, 2);
2378 } else if (!Op.compare_lower("cvac")) {
2379 // SYS #3, C7, C10, #1
2380 SYS_ALIAS(3, 7, 10, 1);
2381 } else if (!Op.compare_lower("csw")) {
2382 // SYS #0, C7, C10, #2
2383 SYS_ALIAS(0, 7, 10, 2);
2384 } else if (!Op.compare_lower("cvau")) {
2385 // SYS #3, C7, C11, #1
2386 SYS_ALIAS(3, 7, 11, 1);
2387 } else if (!Op.compare_lower("civac")) {
2388 // SYS #3, C7, C14, #1
2389 SYS_ALIAS(3, 7, 14, 1);
2390 } else if (!Op.compare_lower("cisw")) {
2391 // SYS #0, C7, C14, #2
2392 SYS_ALIAS(0, 7, 14, 2);
2394 return TokError("invalid operand for DC instruction");
2396 } else if (Mnemonic == "at") {
2397 if (!Op.compare_lower("s1e1r")) {
2398 // SYS #0, C7, C8, #0
2399 SYS_ALIAS(0, 7, 8, 0);
2400 } else if (!Op.compare_lower("s1e2r")) {
2401 // SYS #4, C7, C8, #0
2402 SYS_ALIAS(4, 7, 8, 0);
2403 } else if (!Op.compare_lower("s1e3r")) {
2404 // SYS #6, C7, C8, #0
2405 SYS_ALIAS(6, 7, 8, 0);
2406 } else if (!Op.compare_lower("s1e1w")) {
2407 // SYS #0, C7, C8, #1
2408 SYS_ALIAS(0, 7, 8, 1);
2409 } else if (!Op.compare_lower("s1e2w")) {
2410 // SYS #4, C7, C8, #1
2411 SYS_ALIAS(4, 7, 8, 1);
2412 } else if (!Op.compare_lower("s1e3w")) {
2413 // SYS #6, C7, C8, #1
2414 SYS_ALIAS(6, 7, 8, 1);
2415 } else if (!Op.compare_lower("s1e0r")) {
2416 // SYS #0, C7, C8, #3
2417 SYS_ALIAS(0, 7, 8, 2);
2418 } else if (!Op.compare_lower("s1e0w")) {
2419 // SYS #0, C7, C8, #3
2420 SYS_ALIAS(0, 7, 8, 3);
2421 } else if (!Op.compare_lower("s12e1r")) {
2422 // SYS #4, C7, C8, #4
2423 SYS_ALIAS(4, 7, 8, 4);
2424 } else if (!Op.compare_lower("s12e1w")) {
2425 // SYS #4, C7, C8, #5
2426 SYS_ALIAS(4, 7, 8, 5);
2427 } else if (!Op.compare_lower("s12e0r")) {
2428 // SYS #4, C7, C8, #6
2429 SYS_ALIAS(4, 7, 8, 6);
2430 } else if (!Op.compare_lower("s12e0w")) {
2431 // SYS #4, C7, C8, #7
2432 SYS_ALIAS(4, 7, 8, 7);
2434 return TokError("invalid operand for AT instruction");
2436 } else if (Mnemonic == "tlbi") {
2437 if (!Op.compare_lower("vmalle1is")) {
2438 // SYS #0, C8, C3, #0
2439 SYS_ALIAS(0, 8, 3, 0);
2440 } else if (!Op.compare_lower("alle2is")) {
2441 // SYS #4, C8, C3, #0
2442 SYS_ALIAS(4, 8, 3, 0);
2443 } else if (!Op.compare_lower("alle3is")) {
2444 // SYS #6, C8, C3, #0
2445 SYS_ALIAS(6, 8, 3, 0);
2446 } else if (!Op.compare_lower("vae1is")) {
2447 // SYS #0, C8, C3, #1
2448 SYS_ALIAS(0, 8, 3, 1);
2449 } else if (!Op.compare_lower("vae2is")) {
2450 // SYS #4, C8, C3, #1
2451 SYS_ALIAS(4, 8, 3, 1);
2452 } else if (!Op.compare_lower("vae3is")) {
2453 // SYS #6, C8, C3, #1
2454 SYS_ALIAS(6, 8, 3, 1);
2455 } else if (!Op.compare_lower("aside1is")) {
2456 // SYS #0, C8, C3, #2
2457 SYS_ALIAS(0, 8, 3, 2);
2458 } else if (!Op.compare_lower("vaae1is")) {
2459 // SYS #0, C8, C3, #3
2460 SYS_ALIAS(0, 8, 3, 3);
2461 } else if (!Op.compare_lower("alle1is")) {
2462 // SYS #4, C8, C3, #4
2463 SYS_ALIAS(4, 8, 3, 4);
2464 } else if (!Op.compare_lower("vale1is")) {
2465 // SYS #0, C8, C3, #5
2466 SYS_ALIAS(0, 8, 3, 5);
2467 } else if (!Op.compare_lower("vaale1is")) {
2468 // SYS #0, C8, C3, #7
2469 SYS_ALIAS(0, 8, 3, 7);
2470 } else if (!Op.compare_lower("vmalle1")) {
2471 // SYS #0, C8, C7, #0
2472 SYS_ALIAS(0, 8, 7, 0);
2473 } else if (!Op.compare_lower("alle2")) {
2474 // SYS #4, C8, C7, #0
2475 SYS_ALIAS(4, 8, 7, 0);
2476 } else if (!Op.compare_lower("vale2is")) {
2477 // SYS #4, C8, C3, #5
2478 SYS_ALIAS(4, 8, 3, 5);
2479 } else if (!Op.compare_lower("vale3is")) {
2480 // SYS #6, C8, C3, #5
2481 SYS_ALIAS(6, 8, 3, 5);
2482 } else if (!Op.compare_lower("alle3")) {
2483 // SYS #6, C8, C7, #0
2484 SYS_ALIAS(6, 8, 7, 0);
2485 } else if (!Op.compare_lower("vae1")) {
2486 // SYS #0, C8, C7, #1
2487 SYS_ALIAS(0, 8, 7, 1);
2488 } else if (!Op.compare_lower("vae2")) {
2489 // SYS #4, C8, C7, #1
2490 SYS_ALIAS(4, 8, 7, 1);
2491 } else if (!Op.compare_lower("vae3")) {
2492 // SYS #6, C8, C7, #1
2493 SYS_ALIAS(6, 8, 7, 1);
2494 } else if (!Op.compare_lower("aside1")) {
2495 // SYS #0, C8, C7, #2
2496 SYS_ALIAS(0, 8, 7, 2);
2497 } else if (!Op.compare_lower("vaae1")) {
2498 // SYS #0, C8, C7, #3
2499 SYS_ALIAS(0, 8, 7, 3);
2500 } else if (!Op.compare_lower("alle1")) {
2501 // SYS #4, C8, C7, #4
2502 SYS_ALIAS(4, 8, 7, 4);
2503 } else if (!Op.compare_lower("vale1")) {
2504 // SYS #0, C8, C7, #5
2505 SYS_ALIAS(0, 8, 7, 5);
2506 } else if (!Op.compare_lower("vale2")) {
2507 // SYS #4, C8, C7, #5
2508 SYS_ALIAS(4, 8, 7, 5);
2509 } else if (!Op.compare_lower("vale3")) {
2510 // SYS #6, C8, C7, #5
2511 SYS_ALIAS(6, 8, 7, 5);
2512 } else if (!Op.compare_lower("vaale1")) {
2513 // SYS #0, C8, C7, #7
2514 SYS_ALIAS(0, 8, 7, 7);
2515 } else if (!Op.compare_lower("ipas2e1")) {
2516 // SYS #4, C8, C4, #1
2517 SYS_ALIAS(4, 8, 4, 1);
2518 } else if (!Op.compare_lower("ipas2le1")) {
2519 // SYS #4, C8, C4, #5
2520 SYS_ALIAS(4, 8, 4, 5);
2521 } else if (!Op.compare_lower("ipas2e1is")) {
2522 // SYS #4, C8, C4, #1
2523 SYS_ALIAS(4, 8, 0, 1);
2524 } else if (!Op.compare_lower("ipas2le1is")) {
2525 // SYS #4, C8, C4, #5
2526 SYS_ALIAS(4, 8, 0, 5);
2527 } else if (!Op.compare_lower("vmalls12e1")) {
2528 // SYS #4, C8, C7, #6
2529 SYS_ALIAS(4, 8, 7, 6);
2530 } else if (!Op.compare_lower("vmalls12e1is")) {
2531 // SYS #4, C8, C3, #6
2532 SYS_ALIAS(4, 8, 3, 6);
2534 return TokError("invalid operand for TLBI instruction");
2540 Parser.Lex(); // Eat operand.
2542 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2543 bool HasRegister = false;
2545 // Check for the optional register operand.
2546 if (getLexer().is(AsmToken::Comma)) {
2547 Parser.Lex(); // Eat comma.
2549 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2550 return TokError("expected register operand");
2555 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2556 Parser.eatToEndOfStatement();
2557 return TokError("unexpected token in argument list");
2560 if (ExpectRegister && !HasRegister) {
2561 return TokError("specified " + Mnemonic + " op requires a register");
2563 else if (!ExpectRegister && HasRegister) {
2564 return TokError("specified " + Mnemonic + " op does not use a register");
2567 Parser.Lex(); // Consume the EndOfStatement
2571 AArch64AsmParser::OperandMatchResultTy
2572 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2573 const AsmToken &Tok = Parser.getTok();
2575 // Can be either a #imm style literal or an option name
2576 bool Hash = Tok.is(AsmToken::Hash);
2577 if (Hash || Tok.is(AsmToken::Integer)) {
2578 // Immediate operand.
2580 Parser.Lex(); // Eat the '#'
2581 const MCExpr *ImmVal;
2582 SMLoc ExprLoc = getLoc();
2583 if (getParser().parseExpression(ImmVal))
2584 return MatchOperand_ParseFail;
2585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2587 Error(ExprLoc, "immediate value expected for barrier operand");
2588 return MatchOperand_ParseFail;
2590 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2591 Error(ExprLoc, "barrier operand out of range");
2592 return MatchOperand_ParseFail;
2595 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2596 return MatchOperand_Success;
2599 if (Tok.isNot(AsmToken::Identifier)) {
2600 TokError("invalid operand for instruction");
2601 return MatchOperand_ParseFail;
2605 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2607 TokError("invalid barrier option name");
2608 return MatchOperand_ParseFail;
2611 // The only valid named option for ISB is 'sy'
2612 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2613 TokError("'sy' or #imm operand expected");
2614 return MatchOperand_ParseFail;
2618 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2619 Parser.Lex(); // Consume the option
2621 return MatchOperand_Success;
2624 AArch64AsmParser::OperandMatchResultTy
2625 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2626 const AsmToken &Tok = Parser.getTok();
2628 if (Tok.isNot(AsmToken::Identifier))
2629 return MatchOperand_NoMatch;
2631 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2632 STI.getFeatureBits(), getContext()));
2633 Parser.Lex(); // Eat identifier
2635 return MatchOperand_Success;
2638 /// tryParseVectorRegister - Parse a vector register operand.
2639 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2640 if (Parser.getTok().isNot(AsmToken::Identifier))
2644 // Check for a vector register specifier first.
2646 int64_t Reg = tryMatchVectorRegister(Kind, false);
2650 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2651 // If there was an explicit qualifier, that goes on as a literal text
2655 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2657 // If there is an index specifier following the register, parse that too.
2658 if (Parser.getTok().is(AsmToken::LBrac)) {
2659 SMLoc SIdx = getLoc();
2660 Parser.Lex(); // Eat left bracket token.
2662 const MCExpr *ImmVal;
2663 if (getParser().parseExpression(ImmVal))
2665 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2667 TokError("immediate value expected for vector index");
2672 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2673 Error(E, "']' expected");
2677 Parser.Lex(); // Eat right bracket token.
2679 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2686 /// parseRegister - Parse a non-vector register operand.
2687 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2689 // Try for a vector register.
2690 if (!tryParseVectorRegister(Operands))
2693 // Try for a scalar register.
2694 int64_t Reg = tryParseRegister();
2698 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2700 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2701 // as a string token in the instruction itself.
2702 if (getLexer().getKind() == AsmToken::LBrac) {
2703 SMLoc LBracS = getLoc();
2705 const AsmToken &Tok = Parser.getTok();
2706 if (Tok.is(AsmToken::Integer)) {
2707 SMLoc IntS = getLoc();
2708 int64_t Val = Tok.getIntVal();
2711 if (getLexer().getKind() == AsmToken::RBrac) {
2712 SMLoc RBracS = getLoc();
2715 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2717 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2719 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2729 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2730 bool HasELFModifier = false;
2731 AArch64MCExpr::VariantKind RefKind;
2733 if (Parser.getTok().is(AsmToken::Colon)) {
2734 Parser.Lex(); // Eat ':"
2735 HasELFModifier = true;
2737 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2738 Error(Parser.getTok().getLoc(),
2739 "expect relocation specifier in operand after ':'");
2743 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2744 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2745 .Case("lo12", AArch64MCExpr::VK_LO12)
2746 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2747 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2748 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2749 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2750 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2751 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2752 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2753 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2754 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2755 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2756 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2757 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2758 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2759 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2760 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2761 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2762 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2763 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2764 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2765 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2766 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2767 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2768 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2769 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2770 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2771 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2772 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2773 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2774 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2775 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2776 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2777 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2778 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2779 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2780 .Default(AArch64MCExpr::VK_INVALID);
2782 if (RefKind == AArch64MCExpr::VK_INVALID) {
2783 Error(Parser.getTok().getLoc(),
2784 "expect relocation specifier in operand after ':'");
2788 Parser.Lex(); // Eat identifier
2790 if (Parser.getTok().isNot(AsmToken::Colon)) {
2791 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2794 Parser.Lex(); // Eat ':'
2797 if (getParser().parseExpression(ImmVal))
2801 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2806 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2807 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2808 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2810 Parser.Lex(); // Eat left bracket token.
2812 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2815 int64_t PrevReg = FirstReg;
2818 if (Parser.getTok().is(AsmToken::Minus)) {
2819 Parser.Lex(); // Eat the minus.
2821 SMLoc Loc = getLoc();
2823 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2826 // Any Kind suffices must match on all regs in the list.
2827 if (Kind != NextKind)
2828 return Error(Loc, "mismatched register size suffix");
2830 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2832 if (Space == 0 || Space > 3) {
2833 return Error(Loc, "invalid number of vectors");
2839 while (Parser.getTok().is(AsmToken::Comma)) {
2840 Parser.Lex(); // Eat the comma token.
2842 SMLoc Loc = getLoc();
2844 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2847 // Any Kind suffices must match on all regs in the list.
2848 if (Kind != NextKind)
2849 return Error(Loc, "mismatched register size suffix");
2851 // Registers must be incremental (with wraparound at 31)
2852 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2853 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2854 return Error(Loc, "registers must be sequential");
2861 if (Parser.getTok().isNot(AsmToken::RCurly))
2862 return Error(getLoc(), "'}' expected");
2863 Parser.Lex(); // Eat the '}' token.
2866 return Error(S, "invalid number of vectors");
2868 unsigned NumElements = 0;
2869 char ElementKind = 0;
2871 parseValidVectorKind(Kind, NumElements, ElementKind);
2873 Operands.push_back(AArch64Operand::CreateVectorList(
2874 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2876 // If there is an index specifier following the list, parse that too.
2877 if (Parser.getTok().is(AsmToken::LBrac)) {
2878 SMLoc SIdx = getLoc();
2879 Parser.Lex(); // Eat left bracket token.
2881 const MCExpr *ImmVal;
2882 if (getParser().parseExpression(ImmVal))
2884 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2886 TokError("immediate value expected for vector index");
2891 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2892 Error(E, "']' expected");
2896 Parser.Lex(); // Eat right bracket token.
2898 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2904 AArch64AsmParser::OperandMatchResultTy
2905 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2906 const AsmToken &Tok = Parser.getTok();
2907 if (!Tok.is(AsmToken::Identifier))
2908 return MatchOperand_NoMatch;
2910 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2912 MCContext &Ctx = getContext();
2913 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2914 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2915 return MatchOperand_NoMatch;
2918 Parser.Lex(); // Eat register
2920 if (Parser.getTok().isNot(AsmToken::Comma)) {
2922 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2923 return MatchOperand_Success;
2925 Parser.Lex(); // Eat comma.
2927 if (Parser.getTok().is(AsmToken::Hash))
2928 Parser.Lex(); // Eat hash
2930 if (Parser.getTok().isNot(AsmToken::Integer)) {
2931 Error(getLoc(), "index must be absent or #0");
2932 return MatchOperand_ParseFail;
2935 const MCExpr *ImmVal;
2936 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2937 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2938 Error(getLoc(), "index must be absent or #0");
2939 return MatchOperand_ParseFail;
2943 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2944 return MatchOperand_Success;
2947 /// parseOperand - Parse a arm instruction operand. For now this parses the
2948 /// operand regardless of the mnemonic.
2949 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2950 bool invertCondCode) {
2951 // Check if the current operand has a custom associated parser, if so, try to
2952 // custom parse the operand, or fallback to the general approach.
2953 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2954 if (ResTy == MatchOperand_Success)
2956 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2957 // there was a match, but an error occurred, in which case, just return that
2958 // the operand parsing failed.
2959 if (ResTy == MatchOperand_ParseFail)
2962 // Nothing custom, so do general case parsing.
2964 switch (getLexer().getKind()) {
2968 if (parseSymbolicImmVal(Expr))
2969 return Error(S, "invalid operand");
2971 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2972 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2975 case AsmToken::LBrac: {
2976 SMLoc Loc = Parser.getTok().getLoc();
2977 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2979 Parser.Lex(); // Eat '['
2981 // There's no comma after a '[', so we can parse the next operand
2983 return parseOperand(Operands, false, false);
2985 case AsmToken::LCurly:
2986 return parseVectorList(Operands);
2987 case AsmToken::Identifier: {
2988 // If we're expecting a Condition Code operand, then just parse that.
2990 return parseCondCode(Operands, invertCondCode);
2992 // If it's a register name, parse it.
2993 if (!parseRegister(Operands))
2996 // This could be an optional "shift" or "extend" operand.
2997 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2998 // We can only continue if no tokens were eaten.
2999 if (GotShift != MatchOperand_NoMatch)
3002 // This was not a register so parse other operands that start with an
3003 // identifier (like labels) as expressions and create them as immediates.
3004 const MCExpr *IdVal;
3006 if (getParser().parseExpression(IdVal))
3009 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3010 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3013 case AsmToken::Integer:
3014 case AsmToken::Real:
3015 case AsmToken::Hash: {
3016 // #42 -> immediate.
3018 if (getLexer().is(AsmToken::Hash))
3021 // Parse a negative sign
3022 bool isNegative = false;
3023 if (Parser.getTok().is(AsmToken::Minus)) {
3025 // We need to consume this token only when we have a Real, otherwise
3026 // we let parseSymbolicImmVal take care of it
3027 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3031 // The only Real that should come through here is a literal #0.0 for
3032 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3033 // so convert the value.
3034 const AsmToken &Tok = Parser.getTok();
3035 if (Tok.is(AsmToken::Real)) {
3036 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3037 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3038 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3039 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3040 Mnemonic != "fcmlt")
3041 return TokError("unexpected floating point literal");
3042 else if (IntVal != 0 || isNegative)
3043 return TokError("expected floating-point constant #0.0");
3044 Parser.Lex(); // Eat the token.
3047 AArch64Operand::CreateToken("#0", false, S, getContext()));
3049 AArch64Operand::CreateToken(".0", false, S, getContext()));
3053 const MCExpr *ImmVal;
3054 if (parseSymbolicImmVal(ImmVal))
3057 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3058 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3061 case AsmToken::Equal: {
3062 SMLoc Loc = Parser.getTok().getLoc();
3063 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3064 return Error(Loc, "unexpected token in operand");
3065 Parser.Lex(); // Eat '='
3066 const MCExpr *SubExprVal;
3067 if (getParser().parseExpression(SubExprVal))
3070 MCContext& Ctx = getContext();
3071 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3072 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3073 if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3074 static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3075 bool IsXReg = AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3076 Operands[1]->getReg());
3077 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3078 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3079 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3083 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3084 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3085 Operands.push_back(AArch64Operand::CreateImm(
3086 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3088 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3089 ShiftAmt, true, S, E, Ctx));
3093 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3094 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3095 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3101 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3103 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3104 StringRef Name, SMLoc NameLoc,
3105 OperandVector &Operands) {
3106 Name = StringSwitch<StringRef>(Name.lower())
3107 .Case("beq", "b.eq")
3108 .Case("bne", "b.ne")
3109 .Case("bhs", "b.hs")
3110 .Case("bcs", "b.cs")
3111 .Case("blo", "b.lo")
3112 .Case("bcc", "b.cc")
3113 .Case("bmi", "b.mi")
3114 .Case("bpl", "b.pl")
3115 .Case("bvs", "b.vs")
3116 .Case("bvc", "b.vc")
3117 .Case("bhi", "b.hi")
3118 .Case("bls", "b.ls")
3119 .Case("bge", "b.ge")
3120 .Case("blt", "b.lt")
3121 .Case("bgt", "b.gt")
3122 .Case("ble", "b.le")
3123 .Case("bal", "b.al")
3124 .Case("bnv", "b.nv")
3127 // First check for the AArch64-specific .req directive.
3128 if (Parser.getTok().is(AsmToken::Identifier) &&
3129 Parser.getTok().getIdentifier() == ".req") {
3130 parseDirectiveReq(Name, NameLoc);
3131 // We always return 'error' for this, as we're done with this
3132 // statement and don't need to match the 'instruction."
3136 // Create the leading tokens for the mnemonic, split by '.' characters.
3137 size_t Start = 0, Next = Name.find('.');
3138 StringRef Head = Name.slice(Start, Next);
3140 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3141 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3142 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3143 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3144 Parser.eatToEndOfStatement();
3149 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3152 // Handle condition codes for a branch mnemonic
3153 if (Head == "b" && Next != StringRef::npos) {
3155 Next = Name.find('.', Start + 1);
3156 Head = Name.slice(Start + 1, Next);
3158 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3159 (Head.data() - Name.data()));
3160 AArch64CC::CondCode CC = parseCondCodeString(Head);
3161 if (CC == AArch64CC::Invalid)
3162 return Error(SuffixLoc, "invalid condition code");
3164 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3166 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3169 // Add the remaining tokens in the mnemonic.
3170 while (Next != StringRef::npos) {
3172 Next = Name.find('.', Start + 1);
3173 Head = Name.slice(Start, Next);
3174 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3175 (Head.data() - Name.data()) + 1);
3177 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3180 // Conditional compare instructions have a Condition Code operand, which needs
3181 // to be parsed and an immediate operand created.
3182 bool condCodeFourthOperand =
3183 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3184 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3185 Head == "csinc" || Head == "csinv" || Head == "csneg");
3187 // These instructions are aliases to some of the conditional select
3188 // instructions. However, the condition code is inverted in the aliased
3191 // FIXME: Is this the correct way to handle these? Or should the parser
3192 // generate the aliased instructions directly?
3193 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3194 bool condCodeThirdOperand =
3195 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3197 // Read the remaining operands.
3198 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3199 // Read the first operand.
3200 if (parseOperand(Operands, false, false)) {
3201 Parser.eatToEndOfStatement();
3206 while (getLexer().is(AsmToken::Comma)) {
3207 Parser.Lex(); // Eat the comma.
3209 // Parse and remember the operand.
3210 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3211 (N == 3 && condCodeThirdOperand) ||
3212 (N == 2 && condCodeSecondOperand),
3213 condCodeSecondOperand || condCodeThirdOperand)) {
3214 Parser.eatToEndOfStatement();
3218 // After successfully parsing some operands there are two special cases to
3219 // consider (i.e. notional operands not separated by commas). Both are due
3220 // to memory specifiers:
3221 // + An RBrac will end an address for load/store/prefetch
3222 // + An '!' will indicate a pre-indexed operation.
3224 // It's someone else's responsibility to make sure these tokens are sane
3225 // in the given context!
3226 if (Parser.getTok().is(AsmToken::RBrac)) {
3227 SMLoc Loc = Parser.getTok().getLoc();
3228 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3233 if (Parser.getTok().is(AsmToken::Exclaim)) {
3234 SMLoc Loc = Parser.getTok().getLoc();
3235 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3244 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3245 SMLoc Loc = Parser.getTok().getLoc();
3246 Parser.eatToEndOfStatement();
3247 return Error(Loc, "unexpected token in argument list");
3250 Parser.Lex(); // Consume the EndOfStatement
3254 // FIXME: This entire function is a giant hack to provide us with decent
3255 // operand range validation/diagnostics until TableGen/MC can be extended
3256 // to support autogeneration of this kind of validation.
3257 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3258 SmallVectorImpl<SMLoc> &Loc) {
3259 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3260 // Check for indexed addressing modes w/ the base register being the
3261 // same as a destination/source register or pair load where
3262 // the Rt == Rt2. All of those are undefined behaviour.
3263 switch (Inst.getOpcode()) {
3264 case AArch64::LDPSWpre:
3265 case AArch64::LDPWpost:
3266 case AArch64::LDPWpre:
3267 case AArch64::LDPXpost:
3268 case AArch64::LDPXpre: {
3269 unsigned Rt = Inst.getOperand(1).getReg();
3270 unsigned Rt2 = Inst.getOperand(2).getReg();
3271 unsigned Rn = Inst.getOperand(3).getReg();
3272 if (RI->isSubRegisterEq(Rn, Rt))
3273 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3274 "is also a destination");
3275 if (RI->isSubRegisterEq(Rn, Rt2))
3276 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3277 "is also a destination");
3280 case AArch64::LDPDi:
3281 case AArch64::LDPQi:
3282 case AArch64::LDPSi:
3283 case AArch64::LDPSWi:
3284 case AArch64::LDPWi:
3285 case AArch64::LDPXi: {
3286 unsigned Rt = Inst.getOperand(0).getReg();
3287 unsigned Rt2 = Inst.getOperand(1).getReg();
3289 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3292 case AArch64::LDPDpost:
3293 case AArch64::LDPDpre:
3294 case AArch64::LDPQpost:
3295 case AArch64::LDPQpre:
3296 case AArch64::LDPSpost:
3297 case AArch64::LDPSpre:
3298 case AArch64::LDPSWpost: {
3299 unsigned Rt = Inst.getOperand(1).getReg();
3300 unsigned Rt2 = Inst.getOperand(2).getReg();
3302 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3305 case AArch64::STPDpost:
3306 case AArch64::STPDpre:
3307 case AArch64::STPQpost:
3308 case AArch64::STPQpre:
3309 case AArch64::STPSpost:
3310 case AArch64::STPSpre:
3311 case AArch64::STPWpost:
3312 case AArch64::STPWpre:
3313 case AArch64::STPXpost:
3314 case AArch64::STPXpre: {
3315 unsigned Rt = Inst.getOperand(1).getReg();
3316 unsigned Rt2 = Inst.getOperand(2).getReg();
3317 unsigned Rn = Inst.getOperand(3).getReg();
3318 if (RI->isSubRegisterEq(Rn, Rt))
3319 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3320 "is also a source");
3321 if (RI->isSubRegisterEq(Rn, Rt2))
3322 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3323 "is also a source");
3326 case AArch64::LDRBBpre:
3327 case AArch64::LDRBpre:
3328 case AArch64::LDRHHpre:
3329 case AArch64::LDRHpre:
3330 case AArch64::LDRSBWpre:
3331 case AArch64::LDRSBXpre:
3332 case AArch64::LDRSHWpre:
3333 case AArch64::LDRSHXpre:
3334 case AArch64::LDRSWpre:
3335 case AArch64::LDRWpre:
3336 case AArch64::LDRXpre:
3337 case AArch64::LDRBBpost:
3338 case AArch64::LDRBpost:
3339 case AArch64::LDRHHpost:
3340 case AArch64::LDRHpost:
3341 case AArch64::LDRSBWpost:
3342 case AArch64::LDRSBXpost:
3343 case AArch64::LDRSHWpost:
3344 case AArch64::LDRSHXpost:
3345 case AArch64::LDRSWpost:
3346 case AArch64::LDRWpost:
3347 case AArch64::LDRXpost: {
3348 unsigned Rt = Inst.getOperand(1).getReg();
3349 unsigned Rn = Inst.getOperand(2).getReg();
3350 if (RI->isSubRegisterEq(Rn, Rt))
3351 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3352 "is also a source");
3355 case AArch64::STRBBpost:
3356 case AArch64::STRBpost:
3357 case AArch64::STRHHpost:
3358 case AArch64::STRHpost:
3359 case AArch64::STRWpost:
3360 case AArch64::STRXpost:
3361 case AArch64::STRBBpre:
3362 case AArch64::STRBpre:
3363 case AArch64::STRHHpre:
3364 case AArch64::STRHpre:
3365 case AArch64::STRWpre:
3366 case AArch64::STRXpre: {
3367 unsigned Rt = Inst.getOperand(1).getReg();
3368 unsigned Rn = Inst.getOperand(2).getReg();
3369 if (RI->isSubRegisterEq(Rn, Rt))
3370 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3371 "is also a source");
3376 // Now check immediate ranges. Separate from the above as there is overlap
3377 // in the instructions being checked and this keeps the nested conditionals
3379 switch (Inst.getOpcode()) {
3380 case AArch64::ADDSWri:
3381 case AArch64::ADDSXri:
3382 case AArch64::ADDWri:
3383 case AArch64::ADDXri:
3384 case AArch64::SUBSWri:
3385 case AArch64::SUBSXri:
3386 case AArch64::SUBWri:
3387 case AArch64::SUBXri: {
3388 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3389 // some slight duplication here.
3390 if (Inst.getOperand(2).isExpr()) {
3391 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3392 AArch64MCExpr::VariantKind ELFRefKind;
3393 MCSymbolRefExpr::VariantKind DarwinRefKind;
3395 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3396 return Error(Loc[2], "invalid immediate expression");
3399 // Only allow these with ADDXri.
3400 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3401 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3402 Inst.getOpcode() == AArch64::ADDXri)
3405 // Only allow these with ADDXri/ADDWri
3406 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3407 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3408 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3409 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3410 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3411 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3412 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3413 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3414 (Inst.getOpcode() == AArch64::ADDXri ||
3415 Inst.getOpcode() == AArch64::ADDWri))
3418 // Don't allow expressions in the immediate field otherwise
3419 return Error(Loc[2], "invalid immediate expression");
3428 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3430 case Match_MissingFeature:
3432 "instruction requires a CPU feature not currently enabled");
3433 case Match_InvalidOperand:
3434 return Error(Loc, "invalid operand for instruction");
3435 case Match_InvalidSuffix:
3436 return Error(Loc, "invalid type suffix for instruction");
3437 case Match_InvalidCondCode:
3438 return Error(Loc, "expected AArch64 condition code");
3439 case Match_AddSubRegExtendSmall:
3441 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3442 case Match_AddSubRegExtendLarge:
3444 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3445 case Match_AddSubSecondSource:
3447 "expected compatible register, symbol or integer in range [0, 4095]");
3448 case Match_LogicalSecondSource:
3449 return Error(Loc, "expected compatible register or logical immediate");
3450 case Match_InvalidMovImm32Shift:
3451 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3452 case Match_InvalidMovImm64Shift:
3453 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3454 case Match_AddSubRegShift32:
3456 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3457 case Match_AddSubRegShift64:
3459 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3460 case Match_InvalidFPImm:
3462 "expected compatible register or floating-point constant");
3463 case Match_InvalidMemoryIndexedSImm9:
3464 return Error(Loc, "index must be an integer in range [-256, 255].");
3465 case Match_InvalidMemoryIndexed4SImm7:
3466 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3467 case Match_InvalidMemoryIndexed8SImm7:
3468 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3469 case Match_InvalidMemoryIndexed16SImm7:
3470 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3471 case Match_InvalidMemoryWExtend8:
3473 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3474 case Match_InvalidMemoryWExtend16:
3476 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3477 case Match_InvalidMemoryWExtend32:
3479 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3480 case Match_InvalidMemoryWExtend64:
3482 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3483 case Match_InvalidMemoryWExtend128:
3485 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3486 case Match_InvalidMemoryXExtend8:
3488 "expected 'lsl' or 'sxtx' with optional shift of #0");
3489 case Match_InvalidMemoryXExtend16:
3491 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3492 case Match_InvalidMemoryXExtend32:
3494 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3495 case Match_InvalidMemoryXExtend64:
3497 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3498 case Match_InvalidMemoryXExtend128:
3500 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3501 case Match_InvalidMemoryIndexed1:
3502 return Error(Loc, "index must be an integer in range [0, 4095].");
3503 case Match_InvalidMemoryIndexed2:
3504 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3505 case Match_InvalidMemoryIndexed4:
3506 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3507 case Match_InvalidMemoryIndexed8:
3508 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3509 case Match_InvalidMemoryIndexed16:
3510 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3511 case Match_InvalidImm0_7:
3512 return Error(Loc, "immediate must be an integer in range [0, 7].");
3513 case Match_InvalidImm0_15:
3514 return Error(Loc, "immediate must be an integer in range [0, 15].");
3515 case Match_InvalidImm0_31:
3516 return Error(Loc, "immediate must be an integer in range [0, 31].");
3517 case Match_InvalidImm0_63:
3518 return Error(Loc, "immediate must be an integer in range [0, 63].");
3519 case Match_InvalidImm0_127:
3520 return Error(Loc, "immediate must be an integer in range [0, 127].");
3521 case Match_InvalidImm0_65535:
3522 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3523 case Match_InvalidImm1_8:
3524 return Error(Loc, "immediate must be an integer in range [1, 8].");
3525 case Match_InvalidImm1_16:
3526 return Error(Loc, "immediate must be an integer in range [1, 16].");
3527 case Match_InvalidImm1_32:
3528 return Error(Loc, "immediate must be an integer in range [1, 32].");
3529 case Match_InvalidImm1_64:
3530 return Error(Loc, "immediate must be an integer in range [1, 64].");
3531 case Match_InvalidIndex1:
3532 return Error(Loc, "expected lane specifier '[1]'");
3533 case Match_InvalidIndexB:
3534 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3535 case Match_InvalidIndexH:
3536 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3537 case Match_InvalidIndexS:
3538 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3539 case Match_InvalidIndexD:
3540 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3541 case Match_InvalidLabel:
3542 return Error(Loc, "expected label or encodable integer pc offset");
3544 return Error(Loc, "expected readable system register");
3546 return Error(Loc, "expected writable system register or pstate");
3547 case Match_MnemonicFail:
3548 return Error(Loc, "unrecognized instruction mnemonic");
3550 llvm_unreachable("unexpected error code!");
3554 static const char *getSubtargetFeatureName(unsigned Val);
3556 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3557 OperandVector &Operands,
3559 unsigned &ErrorInfo,
3560 bool MatchingInlineAsm) {
3561 assert(!Operands.empty() && "Unexpect empty operand list!");
3562 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3563 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3565 StringRef Tok = Op.getToken();
3566 unsigned NumOperands = Operands.size();
3568 if (NumOperands == 4 && Tok == "lsl") {
3569 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3570 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3571 if (Op2.isReg() && Op3.isImm()) {
3572 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3574 uint64_t Op3Val = Op3CE->getValue();
3575 uint64_t NewOp3Val = 0;
3576 uint64_t NewOp4Val = 0;
3577 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3579 NewOp3Val = (32 - Op3Val) & 0x1f;
3580 NewOp4Val = 31 - Op3Val;
3582 NewOp3Val = (64 - Op3Val) & 0x3f;
3583 NewOp4Val = 63 - Op3Val;
3586 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3587 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3589 Operands[0] = AArch64Operand::CreateToken(
3590 "ubfm", false, Op.getStartLoc(), getContext());
3591 Operands.push_back(AArch64Operand::CreateImm(
3592 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3593 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3594 Op3.getEndLoc(), getContext());
3597 } else if (NumOperands == 5) {
3598 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3599 // UBFIZ -> UBFM aliases.
3600 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3601 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3602 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3603 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3605 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3606 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3607 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3609 if (Op3CE && Op4CE) {
3610 uint64_t Op3Val = Op3CE->getValue();
3611 uint64_t Op4Val = Op4CE->getValue();
3613 uint64_t RegWidth = 0;
3614 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3620 if (Op3Val >= RegWidth)
3621 return Error(Op3.getStartLoc(),
3622 "expected integer in range [0, 31]");
3623 if (Op4Val < 1 || Op4Val > RegWidth)
3624 return Error(Op4.getStartLoc(),
3625 "expected integer in range [1, 32]");
3627 uint64_t NewOp3Val = 0;
3628 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3630 NewOp3Val = (32 - Op3Val) & 0x1f;
3632 NewOp3Val = (64 - Op3Val) & 0x3f;
3634 uint64_t NewOp4Val = Op4Val - 1;
3636 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3637 return Error(Op4.getStartLoc(),
3638 "requested insert overflows register");
3640 const MCExpr *NewOp3 =
3641 MCConstantExpr::Create(NewOp3Val, getContext());
3642 const MCExpr *NewOp4 =
3643 MCConstantExpr::Create(NewOp4Val, getContext());
3644 Operands[3] = AArch64Operand::CreateImm(
3645 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3646 Operands[4] = AArch64Operand::CreateImm(
3647 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3649 Operands[0] = AArch64Operand::CreateToken(
3650 "bfm", false, Op.getStartLoc(), getContext());
3651 else if (Tok == "sbfiz")
3652 Operands[0] = AArch64Operand::CreateToken(
3653 "sbfm", false, Op.getStartLoc(), getContext());
3654 else if (Tok == "ubfiz")
3655 Operands[0] = AArch64Operand::CreateToken(
3656 "ubfm", false, Op.getStartLoc(), getContext());
3658 llvm_unreachable("No valid mnemonic for alias?");
3662 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3663 // UBFX -> UBFM aliases.
3664 } else if (NumOperands == 5 &&
3665 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3666 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3667 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3668 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3670 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3671 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3672 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3674 if (Op3CE && Op4CE) {
3675 uint64_t Op3Val = Op3CE->getValue();
3676 uint64_t Op4Val = Op4CE->getValue();
3678 uint64_t RegWidth = 0;
3679 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3685 if (Op3Val >= RegWidth)
3686 return Error(Op3.getStartLoc(),
3687 "expected integer in range [0, 31]");
3688 if (Op4Val < 1 || Op4Val > RegWidth)
3689 return Error(Op4.getStartLoc(),
3690 "expected integer in range [1, 32]");
3692 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3694 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3695 return Error(Op4.getStartLoc(),
3696 "requested extract overflows register");
3698 const MCExpr *NewOp4 =
3699 MCConstantExpr::Create(NewOp4Val, getContext());
3700 Operands[4] = AArch64Operand::CreateImm(
3701 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3703 Operands[0] = AArch64Operand::CreateToken(
3704 "bfm", false, Op.getStartLoc(), getContext());
3705 else if (Tok == "sbfx")
3706 Operands[0] = AArch64Operand::CreateToken(
3707 "sbfm", false, Op.getStartLoc(), getContext());
3708 else if (Tok == "ubfx")
3709 Operands[0] = AArch64Operand::CreateToken(
3710 "ubfm", false, Op.getStartLoc(), getContext());
3712 llvm_unreachable("No valid mnemonic for alias?");
3717 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3718 // InstAlias can't quite handle this since the reg classes aren't
3720 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3721 // The source register can be Wn here, but the matcher expects a
3722 // GPR64. Twiddle it here if necessary.
3723 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3725 unsigned Reg = getXRegFromWReg(Op.getReg());
3726 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3727 Op.getEndLoc(), getContext());
3730 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3731 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3732 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3734 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3736 // The source register can be Wn here, but the matcher expects a
3737 // GPR64. Twiddle it here if necessary.
3738 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3740 unsigned Reg = getXRegFromWReg(Op.getReg());
3741 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3742 Op.getEndLoc(), getContext());
3746 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3747 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3748 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3750 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3752 // The source register can be Wn here, but the matcher expects a
3753 // GPR32. Twiddle it here if necessary.
3754 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3756 unsigned Reg = getWRegFromXReg(Op.getReg());
3757 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3758 Op.getEndLoc(), getContext());
3763 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3764 if (NumOperands == 3 && Tok == "fmov") {
3765 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3766 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3767 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3769 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3773 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3774 Op.getEndLoc(), getContext());
3779 // First try to match against the secondary set of tables containing the
3780 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3781 unsigned MatchResult =
3782 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3784 // If that fails, try against the alternate table containing long-form NEON:
3785 // "fadd v0.2s, v1.2s, v2.2s"
3786 if (MatchResult != Match_Success)
3788 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3790 switch (MatchResult) {
3791 case Match_Success: {
3792 // Perform range checking and other semantic validations
3793 SmallVector<SMLoc, 8> OperandLocs;
3794 NumOperands = Operands.size();
3795 for (unsigned i = 1; i < NumOperands; ++i)
3796 OperandLocs.push_back(Operands[i]->getStartLoc());
3797 if (validateInstruction(Inst, OperandLocs))
3801 Out.EmitInstruction(Inst, STI);
3804 case Match_MissingFeature: {
3805 assert(ErrorInfo && "Unknown missing feature!");
3806 // Special case the error message for the very common case where only
3807 // a single subtarget feature is missing (neon, e.g.).
3808 std::string Msg = "instruction requires:";
3810 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3811 if (ErrorInfo & Mask) {
3813 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3817 return Error(IDLoc, Msg);
3819 case Match_MnemonicFail:
3820 return showMatchError(IDLoc, MatchResult);
3821 case Match_InvalidOperand: {
3822 SMLoc ErrorLoc = IDLoc;
3823 if (ErrorInfo != ~0U) {
3824 if (ErrorInfo >= Operands.size())
3825 return Error(IDLoc, "too few operands for instruction");
3827 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3828 if (ErrorLoc == SMLoc())
3831 // If the match failed on a suffix token operand, tweak the diagnostic
3833 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3834 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3835 MatchResult = Match_InvalidSuffix;
3837 return showMatchError(ErrorLoc, MatchResult);
3839 case Match_InvalidMemoryIndexed1:
3840 case Match_InvalidMemoryIndexed2:
3841 case Match_InvalidMemoryIndexed4:
3842 case Match_InvalidMemoryIndexed8:
3843 case Match_InvalidMemoryIndexed16:
3844 case Match_InvalidCondCode:
3845 case Match_AddSubRegExtendSmall:
3846 case Match_AddSubRegExtendLarge:
3847 case Match_AddSubSecondSource:
3848 case Match_LogicalSecondSource:
3849 case Match_AddSubRegShift32:
3850 case Match_AddSubRegShift64:
3851 case Match_InvalidMovImm32Shift:
3852 case Match_InvalidMovImm64Shift:
3853 case Match_InvalidFPImm:
3854 case Match_InvalidMemoryWExtend8:
3855 case Match_InvalidMemoryWExtend16:
3856 case Match_InvalidMemoryWExtend32:
3857 case Match_InvalidMemoryWExtend64:
3858 case Match_InvalidMemoryWExtend128:
3859 case Match_InvalidMemoryXExtend8:
3860 case Match_InvalidMemoryXExtend16:
3861 case Match_InvalidMemoryXExtend32:
3862 case Match_InvalidMemoryXExtend64:
3863 case Match_InvalidMemoryXExtend128:
3864 case Match_InvalidMemoryIndexed4SImm7:
3865 case Match_InvalidMemoryIndexed8SImm7:
3866 case Match_InvalidMemoryIndexed16SImm7:
3867 case Match_InvalidMemoryIndexedSImm9:
3868 case Match_InvalidImm0_7:
3869 case Match_InvalidImm0_15:
3870 case Match_InvalidImm0_31:
3871 case Match_InvalidImm0_63:
3872 case Match_InvalidImm0_127:
3873 case Match_InvalidImm0_65535:
3874 case Match_InvalidImm1_8:
3875 case Match_InvalidImm1_16:
3876 case Match_InvalidImm1_32:
3877 case Match_InvalidImm1_64:
3878 case Match_InvalidIndex1:
3879 case Match_InvalidIndexB:
3880 case Match_InvalidIndexH:
3881 case Match_InvalidIndexS:
3882 case Match_InvalidIndexD:
3883 case Match_InvalidLabel:
3886 if (ErrorInfo >= Operands.size())
3887 return Error(IDLoc, "too few operands for instruction");
3888 // Any time we get here, there's nothing fancy to do. Just get the
3889 // operand SMLoc and display the diagnostic.
3890 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3891 if (ErrorLoc == SMLoc())
3893 return showMatchError(ErrorLoc, MatchResult);
3897 llvm_unreachable("Implement any new match types added!");
3901 /// ParseDirective parses the arm specific directives
3902 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3903 StringRef IDVal = DirectiveID.getIdentifier();
3904 SMLoc Loc = DirectiveID.getLoc();
3905 if (IDVal == ".hword")
3906 return parseDirectiveWord(2, Loc);
3907 if (IDVal == ".word")
3908 return parseDirectiveWord(4, Loc);
3909 if (IDVal == ".xword")
3910 return parseDirectiveWord(8, Loc);
3911 if (IDVal == ".tlsdesccall")
3912 return parseDirectiveTLSDescCall(Loc);
3913 if (IDVal == ".ltorg" || IDVal == ".pool")
3914 return parseDirectiveLtorg(Loc);
3915 if (IDVal == ".unreq")
3916 return parseDirectiveUnreq(DirectiveID.getLoc());
3918 return parseDirectiveLOH(IDVal, Loc);
3921 /// parseDirectiveWord
3922 /// ::= .word [ expression (, expression)* ]
3923 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3924 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3926 const MCExpr *Value;
3927 if (getParser().parseExpression(Value))
3930 getParser().getStreamer().EmitValue(Value, Size);
3932 if (getLexer().is(AsmToken::EndOfStatement))
3935 // FIXME: Improve diagnostic.
3936 if (getLexer().isNot(AsmToken::Comma))
3937 return Error(L, "unexpected token in directive");
3946 // parseDirectiveTLSDescCall:
3947 // ::= .tlsdesccall symbol
3948 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3950 if (getParser().parseIdentifier(Name))
3951 return Error(L, "expected symbol after directive");
3953 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3954 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3955 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3958 Inst.setOpcode(AArch64::TLSDESCCALL);
3959 Inst.addOperand(MCOperand::CreateExpr(Expr));
3961 getParser().getStreamer().EmitInstruction(Inst, STI);
3965 /// ::= .loh <lohName | lohId> label1, ..., labelN
3966 /// The number of arguments depends on the loh identifier.
3967 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3968 if (IDVal != MCLOHDirectiveName())
3971 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3972 if (getParser().getTok().isNot(AsmToken::Integer))
3973 return TokError("expected an identifier or a number in directive");
3974 // We successfully get a numeric value for the identifier.
3975 // Check if it is valid.
3976 int64_t Id = getParser().getTok().getIntVal();
3977 Kind = (MCLOHType)Id;
3978 // Check that Id does not overflow MCLOHType.
3979 if (!isValidMCLOHType(Kind) || Id != Kind)
3980 return TokError("invalid numeric identifier in directive");
3982 StringRef Name = getTok().getIdentifier();
3983 // We successfully parse an identifier.
3984 // Check if it is a recognized one.
3985 int Id = MCLOHNameToId(Name);
3988 return TokError("invalid identifier in directive");
3989 Kind = (MCLOHType)Id;
3991 // Consume the identifier.
3993 // Get the number of arguments of this LOH.
3994 int NbArgs = MCLOHIdToNbArgs(Kind);
3996 assert(NbArgs != -1 && "Invalid number of arguments");
3998 SmallVector<MCSymbol *, 3> Args;
3999 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4001 if (getParser().parseIdentifier(Name))
4002 return TokError("expected identifier in directive");
4003 Args.push_back(getContext().GetOrCreateSymbol(Name));
4005 if (Idx + 1 == NbArgs)
4007 if (getLexer().isNot(AsmToken::Comma))
4008 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4011 if (getLexer().isNot(AsmToken::EndOfStatement))
4012 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4014 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4018 /// parseDirectiveLtorg
4019 /// ::= .ltorg | .pool
4020 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4021 getTargetStreamer().emitCurrentConstantPool();
4025 /// parseDirectiveReq
4026 /// ::= name .req registername
4027 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4028 Parser.Lex(); // Eat the '.req' token.
4029 SMLoc SRegLoc = getLoc();
4030 unsigned RegNum = tryParseRegister();
4031 bool IsVector = false;
4033 if (RegNum == static_cast<unsigned>(-1)) {
4035 RegNum = tryMatchVectorRegister(Kind, false);
4036 if (!Kind.empty()) {
4037 Error(SRegLoc, "vector register without type specifier expected");
4043 if (RegNum == static_cast<unsigned>(-1)) {
4044 Parser.eatToEndOfStatement();
4045 Error(SRegLoc, "register name or alias expected");
4049 // Shouldn't be anything else.
4050 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4051 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4052 Parser.eatToEndOfStatement();
4056 Parser.Lex(); // Consume the EndOfStatement
4058 auto pair = std::make_pair(IsVector, RegNum);
4059 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4060 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4065 /// parseDirectiveUneq
4066 /// ::= .unreq registername
4067 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4068 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4069 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4070 Parser.eatToEndOfStatement();
4073 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4074 Parser.Lex(); // Eat the identifier.
4079 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4080 AArch64MCExpr::VariantKind &ELFRefKind,
4081 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4083 ELFRefKind = AArch64MCExpr::VK_INVALID;
4084 DarwinRefKind = MCSymbolRefExpr::VK_None;
4087 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4088 ELFRefKind = AE->getKind();
4089 Expr = AE->getSubExpr();
4092 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4094 // It's a simple symbol reference with no addend.
4095 DarwinRefKind = SE->getKind();
4099 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4103 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4106 DarwinRefKind = SE->getKind();
4108 if (BE->getOpcode() != MCBinaryExpr::Add &&
4109 BE->getOpcode() != MCBinaryExpr::Sub)
4112 // See if the addend is is a constant, otherwise there's more going
4113 // on here than we can deal with.
4114 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4118 Addend = AddendExpr->getValue();
4119 if (BE->getOpcode() == MCBinaryExpr::Sub)
4122 // It's some symbol reference + a constant addend, but really
4123 // shouldn't use both Darwin and ELF syntax.
4124 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4125 DarwinRefKind == MCSymbolRefExpr::VK_None;
4128 /// Force static initialization.
4129 extern "C" void LLVMInitializeAArch64AsmParser() {
4130 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4131 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4133 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4134 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4137 #define GET_REGISTER_MATCHER
4138 #define GET_SUBTARGET_FEATURE_NAME
4139 #define GET_MATCHER_IMPLEMENTATION
4140 #include "AArch64GenAsmMatcher.inc"
4142 // Define this matcher function after the auto-generated include so we
4143 // have the match class enum definitions.
4144 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4146 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4147 // If the kind is a token for a literal immediate, check if our asm
4148 // operand matches. This is for InstAliases which have a fixed-value
4149 // immediate in the syntax.
4150 int64_t ExpectedVal;
4153 return Match_InvalidOperand;
4195 return Match_InvalidOperand;
4196 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4198 return Match_InvalidOperand;
4199 if (CE->getValue() == ExpectedVal)
4200 return Match_Success;
4201 return Match_InvalidOperand;