1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 // Map of register aliases registers via the .req directive.
47 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49 AArch64TargetStreamer &getTargetStreamer() {
50 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
51 return static_cast<AArch64TargetStreamer &>(TS);
54 MCAsmParser &getParser() const { return Parser; }
55 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
57 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
59 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63 int tryParseRegister();
64 int tryMatchVectorRegister(StringRef &Kind, bool expected);
65 bool parseRegister(OperandVector &Operands);
66 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67 bool parseVectorList(OperandVector &Operands);
68 bool parseOperand(OperandVector &Operands, bool isCondCode,
71 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
72 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
73 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
120 MCAsmParserExtension::Initialize(_Parser);
121 if (Parser.getStreamer().getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(Parser.getStreamer());
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
212 uint64_t FeatureBits; // We need to pass through information about which
213 // core we are compiling for so that the SysReg
214 // Mappers can appropriately conditionalize.
225 struct ShiftExtendOp {
226 AArch64_AM::ShiftExtendType Type;
228 bool HasExplicitAmount;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct CondCodeOp CondCode;
243 struct FPImmOp FPImm;
244 struct BarrierOp Barrier;
245 struct SysRegOp SysReg;
246 struct SysCRImmOp SysCRImm;
247 struct PrefetchOp Prefetch;
248 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
256 AArch64Operand(KindTy K, MCContext &_Ctx)
257 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 StartLoc = o.StartLoc;
271 ShiftedImm = o.ShiftedImm;
274 CondCode = o.CondCode;
286 VectorList = o.VectorList;
289 VectorIndex = o.VectorIndex;
295 SysCRImm = o.SysCRImm;
298 Prefetch = o.Prefetch;
301 ShiftExtend = o.ShiftExtend;
306 /// getStartLoc - Get the location of the first token of this operand.
307 SMLoc getStartLoc() const override { return StartLoc; }
308 /// getEndLoc - Get the location of the last token of this operand.
309 SMLoc getEndLoc() const override { return EndLoc; }
311 StringRef getToken() const {
312 assert(Kind == k_Token && "Invalid access!");
313 return StringRef(Tok.Data, Tok.Length);
316 bool isTokenSuffix() const {
317 assert(Kind == k_Token && "Invalid access!");
321 const MCExpr *getImm() const {
322 assert(Kind == k_Immediate && "Invalid access!");
326 const MCExpr *getShiftedImmVal() const {
327 assert(Kind == k_ShiftedImm && "Invalid access!");
328 return ShiftedImm.Val;
331 unsigned getShiftedImmShift() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.ShiftAmount;
336 AArch64CC::CondCode getCondCode() const {
337 assert(Kind == k_CondCode && "Invalid access!");
338 return CondCode.Code;
341 unsigned getFPImm() const {
342 assert(Kind == k_FPImm && "Invalid access!");
346 unsigned getBarrier() const {
347 assert(Kind == k_Barrier && "Invalid access!");
351 unsigned getReg() const override {
352 assert(Kind == k_Register && "Invalid access!");
356 unsigned getVectorListStart() const {
357 assert(Kind == k_VectorList && "Invalid access!");
358 return VectorList.RegNum;
361 unsigned getVectorListCount() const {
362 assert(Kind == k_VectorList && "Invalid access!");
363 return VectorList.Count;
366 unsigned getVectorIndex() const {
367 assert(Kind == k_VectorIndex && "Invalid access!");
368 return VectorIndex.Val;
371 StringRef getSysReg() const {
372 assert(Kind == k_SysReg && "Invalid access!");
373 return StringRef(SysReg.Data, SysReg.Length);
376 uint64_t getSysRegFeatureBits() const {
377 assert(Kind == k_SysReg && "Invalid access!");
378 return SysReg.FeatureBits;
381 unsigned getSysCR() const {
382 assert(Kind == k_SysCR && "Invalid access!");
386 unsigned getPrefetch() const {
387 assert(Kind == k_Prefetch && "Invalid access!");
391 AArch64_AM::ShiftExtendType getShiftExtendType() const {
392 assert(Kind == k_ShiftExtend && "Invalid access!");
393 return ShiftExtend.Type;
396 unsigned getShiftExtendAmount() const {
397 assert(Kind == k_ShiftExtend && "Invalid access!");
398 return ShiftExtend.Amount;
401 bool hasShiftExtendAmount() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.HasExplicitAmount;
406 bool isImm() const override { return Kind == k_Immediate; }
407 bool isMem() const override { return false; }
408 bool isSImm9() const {
411 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
414 int64_t Val = MCE->getValue();
415 return (Val >= -256 && Val < 256);
417 bool isSImm7s4() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 bool isSImm7s8() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 bool isSImm7s16() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
445 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
446 AArch64MCExpr::VariantKind ELFRefKind;
447 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 // If we don't understand the expression, assume the best and
452 // let the fixup and relocation code deal with it.
456 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
457 ELFRefKind == AArch64MCExpr::VK_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
463 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
465 // Note that we don't range-check the addend. It's adjusted modulo page
466 // size when converted, so there is no "out of range" condition when using
468 return Addend >= 0 && (Addend % Scale) == 0;
469 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
470 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
471 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
478 template <int Scale> bool isUImm12Offset() const {
482 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 return isSymbolicUImm12Offset(getImm(), Scale);
486 int64_t Val = MCE->getValue();
487 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
490 bool isImm0_7() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 0 && Val < 8);
499 bool isImm1_8() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val > 0 && Val < 9);
508 bool isImm0_15() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 16);
517 bool isImm1_16() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 17);
526 bool isImm0_31() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 32);
535 bool isImm1_31() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val >= 1 && Val < 32);
544 bool isImm1_32() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 1 && Val < 33);
553 bool isImm0_63() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 0 && Val < 64);
562 bool isImm1_63() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 64);
571 bool isImm1_64() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 1 && Val < 65);
580 bool isImm0_127() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 0 && Val < 128);
589 bool isImm0_255() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 0 && Val < 256);
598 bool isImm0_65535() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 65536);
607 bool isImm32_63() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 32 && Val < 64);
616 bool isLogicalImm32() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
624 bool isLogicalImm64() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
632 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
633 bool isAddSubImm() const {
634 if (!isShiftedImm() && !isImm())
639 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
640 if (isShiftedImm()) {
641 unsigned Shift = ShiftedImm.ShiftAmount;
642 Expr = ShiftedImm.Val;
643 if (Shift != 0 && Shift != 12)
649 AArch64MCExpr::VariantKind ELFRefKind;
650 MCSymbolRefExpr::VariantKind DarwinRefKind;
652 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
653 DarwinRefKind, Addend)) {
654 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
655 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
656 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
657 || ELFRefKind == AArch64MCExpr::VK_LO12
658 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
659 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
660 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
661 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
662 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
663 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
664 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
667 // Otherwise it should be a real immediate in range:
668 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
669 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
671 bool isCondCode() const { return Kind == k_CondCode; }
672 bool isSIMDImmType10() const {
675 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
678 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
680 bool isBranchTarget26() const {
683 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
686 int64_t Val = MCE->getValue();
689 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
691 bool isPCRelLabel19() const {
694 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
697 int64_t Val = MCE->getValue();
700 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
702 bool isBranchTarget14() const {
705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
708 int64_t Val = MCE->getValue();
711 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
715 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
719 AArch64MCExpr::VariantKind ELFRefKind;
720 MCSymbolRefExpr::VariantKind DarwinRefKind;
722 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
723 DarwinRefKind, Addend)) {
726 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
729 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
730 if (ELFRefKind == AllowedModifiers[i])
737 bool isMovZSymbolG3() const {
738 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
739 return isMovWSymbol(Variants);
742 bool isMovZSymbolG2() const {
743 static AArch64MCExpr::VariantKind Variants[] = {
744 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
745 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
746 return isMovWSymbol(Variants);
749 bool isMovZSymbolG1() const {
750 static AArch64MCExpr::VariantKind Variants[] = {
751 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
752 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
753 AArch64MCExpr::VK_DTPREL_G1,
755 return isMovWSymbol(Variants);
758 bool isMovZSymbolG0() const {
759 static AArch64MCExpr::VariantKind Variants[] = {
760 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
761 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
762 return isMovWSymbol(Variants);
765 bool isMovKSymbolG3() const {
766 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
767 return isMovWSymbol(Variants);
770 bool isMovKSymbolG2() const {
771 static AArch64MCExpr::VariantKind Variants[] = {
772 AArch64MCExpr::VK_ABS_G2_NC};
773 return isMovWSymbol(Variants);
776 bool isMovKSymbolG1() const {
777 static AArch64MCExpr::VariantKind Variants[] = {
778 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
779 AArch64MCExpr::VK_DTPREL_G1_NC
781 return isMovWSymbol(Variants);
784 bool isMovKSymbolG0() const {
785 static AArch64MCExpr::VariantKind Variants[] = {
786 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
787 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
789 return isMovWSymbol(Variants);
792 template<int RegWidth, int Shift>
793 bool isMOVZMovAlias() const {
794 if (!isImm()) return false;
796 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
797 if (!CE) return false;
798 uint64_t Value = CE->getValue();
801 Value &= 0xffffffffULL;
803 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
804 if (Value == 0 && Shift != 0)
807 return (Value & ~(0xffffULL << Shift)) == 0;
810 template<int RegWidth, int Shift>
811 bool isMOVNMovAlias() const {
812 if (!isImm()) return false;
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
818 // MOVZ takes precedence over MOVN.
819 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
820 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
825 Value &= 0xffffffffULL;
827 return (Value & ~(0xffffULL << Shift)) == 0;
830 bool isFPImm() const { return Kind == k_FPImm; }
831 bool isBarrier() const { return Kind == k_Barrier; }
832 bool isSysReg() const { return Kind == k_SysReg; }
833 bool isMRSSystemRegister() const {
834 if (!isSysReg()) return false;
836 bool IsKnownRegister;
837 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
838 Mapper.fromString(getSysReg(), IsKnownRegister);
840 return IsKnownRegister;
842 bool isMSRSystemRegister() const {
843 if (!isSysReg()) return false;
845 bool IsKnownRegister;
846 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
847 Mapper.fromString(getSysReg(), IsKnownRegister);
849 return IsKnownRegister;
851 bool isSystemPStateField() const {
852 if (!isSysReg()) return false;
854 bool IsKnownRegister;
855 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
857 return IsKnownRegister;
859 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
860 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
861 bool isVectorRegLo() const {
862 return Kind == k_Register && Reg.isVector &&
863 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
866 bool isGPR32as64() const {
867 return Kind == k_Register && !Reg.isVector &&
868 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
871 bool isGPR64sp0() const {
872 return Kind == k_Register && !Reg.isVector &&
873 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
876 /// Is this a vector list with the type implicit (presumably attached to the
877 /// instruction itself)?
878 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
879 return Kind == k_VectorList && VectorList.Count == NumRegs &&
880 !VectorList.ElementKind;
883 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
884 bool isTypedVectorList() const {
885 if (Kind != k_VectorList)
887 if (VectorList.Count != NumRegs)
889 if (VectorList.ElementKind != ElementKind)
891 return VectorList.NumElements == NumElements;
894 bool isVectorIndex1() const {
895 return Kind == k_VectorIndex && VectorIndex.Val == 1;
897 bool isVectorIndexB() const {
898 return Kind == k_VectorIndex && VectorIndex.Val < 16;
900 bool isVectorIndexH() const {
901 return Kind == k_VectorIndex && VectorIndex.Val < 8;
903 bool isVectorIndexS() const {
904 return Kind == k_VectorIndex && VectorIndex.Val < 4;
906 bool isVectorIndexD() const {
907 return Kind == k_VectorIndex && VectorIndex.Val < 2;
909 bool isToken() const override { return Kind == k_Token; }
910 bool isTokenEqual(StringRef Str) const {
911 return Kind == k_Token && getToken() == Str;
913 bool isSysCR() const { return Kind == k_SysCR; }
914 bool isPrefetch() const { return Kind == k_Prefetch; }
915 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
916 bool isShifter() const {
917 if (!isShiftExtend())
920 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
921 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
922 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
923 ST == AArch64_AM::MSL);
925 bool isExtend() const {
926 if (!isShiftExtend())
929 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
930 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
931 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
932 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
933 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
934 ET == AArch64_AM::LSL) &&
935 getShiftExtendAmount() <= 4;
938 bool isExtend64() const {
941 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
942 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
943 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
945 bool isExtendLSL64() const {
948 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
949 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
950 ET == AArch64_AM::LSL) &&
951 getShiftExtendAmount() <= 4;
954 template<int Width> bool isMemXExtend() const {
957 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
958 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
959 (getShiftExtendAmount() == Log2_32(Width / 8) ||
960 getShiftExtendAmount() == 0);
963 template<int Width> bool isMemWExtend() const {
966 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
967 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
968 (getShiftExtendAmount() == Log2_32(Width / 8) ||
969 getShiftExtendAmount() == 0);
972 template <unsigned width>
973 bool isArithmeticShifter() const {
977 // An arithmetic shifter is LSL, LSR, or ASR.
978 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
979 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
980 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
983 template <unsigned width>
984 bool isLogicalShifter() const {
988 // A logical shifter is LSL, LSR, ASR or ROR.
989 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
990 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
991 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
992 getShiftExtendAmount() < width;
995 bool isMovImm32Shifter() const {
999 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1000 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001 if (ST != AArch64_AM::LSL)
1003 uint64_t Val = getShiftExtendAmount();
1004 return (Val == 0 || Val == 16);
1007 bool isMovImm64Shifter() const {
1011 // A MOVi shifter is LSL of 0 or 16.
1012 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1013 if (ST != AArch64_AM::LSL)
1015 uint64_t Val = getShiftExtendAmount();
1016 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1019 bool isLogicalVecShifter() const {
1023 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1024 unsigned Shift = getShiftExtendAmount();
1025 return getShiftExtendType() == AArch64_AM::LSL &&
1026 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1029 bool isLogicalVecHalfWordShifter() const {
1030 if (!isLogicalVecShifter())
1033 // A logical vector shifter is a left shift by 0 or 8.
1034 unsigned Shift = getShiftExtendAmount();
1035 return getShiftExtendType() == AArch64_AM::LSL &&
1036 (Shift == 0 || Shift == 8);
1039 bool isMoveVecShifter() const {
1040 if (!isShiftExtend())
1043 // A logical vector shifter is a left shift by 8 or 16.
1044 unsigned Shift = getShiftExtendAmount();
1045 return getShiftExtendType() == AArch64_AM::MSL &&
1046 (Shift == 8 || Shift == 16);
1049 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1050 // to LDUR/STUR when the offset is not legal for the former but is for
1051 // the latter. As such, in addition to checking for being a legal unscaled
1052 // address, also check that it is not a legal scaled address. This avoids
1053 // ambiguity in the matcher.
1055 bool isSImm9OffsetFB() const {
1056 return isSImm9() && !isUImm12Offset<Width / 8>();
1059 bool isAdrpLabel() const {
1060 // Validation was handled during parsing, so we just sanity check that
1061 // something didn't go haywire.
1065 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1066 int64_t Val = CE->getValue();
1067 int64_t Min = - (4096 * (1LL << (21 - 1)));
1068 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1069 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1075 bool isAdrLabel() const {
1076 // Validation was handled during parsing, so we just sanity check that
1077 // something didn't go haywire.
1081 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1082 int64_t Val = CE->getValue();
1083 int64_t Min = - (1LL << (21 - 1));
1084 int64_t Max = ((1LL << (21 - 1)) - 1);
1085 return Val >= Min && Val <= Max;
1091 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1092 // Add as immediates when possible. Null MCExpr = 0.
1094 Inst.addOperand(MCOperand::CreateImm(0));
1095 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1096 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1098 Inst.addOperand(MCOperand::CreateExpr(Expr));
1101 void addRegOperands(MCInst &Inst, unsigned N) const {
1102 assert(N == 1 && "Invalid number of operands!");
1103 Inst.addOperand(MCOperand::CreateReg(getReg()));
1106 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1107 assert(N == 1 && "Invalid number of operands!");
1109 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1111 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1112 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1113 RI->getEncodingValue(getReg()));
1115 Inst.addOperand(MCOperand::CreateReg(Reg));
1118 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1119 assert(N == 1 && "Invalid number of operands!");
1121 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1125 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::CreateReg(getReg()));
1132 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1134 Inst.addOperand(MCOperand::CreateReg(getReg()));
1137 template <unsigned NumRegs>
1138 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1139 assert(N == 1 && "Invalid number of operands!");
1140 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1141 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1142 unsigned FirstReg = FirstRegs[NumRegs - 1];
1145 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1148 template <unsigned NumRegs>
1149 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1152 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1153 unsigned FirstReg = FirstRegs[NumRegs - 1];
1156 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1159 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1164 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1169 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1170 assert(N == 1 && "Invalid number of operands!");
1171 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1174 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1176 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1179 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1180 assert(N == 1 && "Invalid number of operands!");
1181 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1184 void addImmOperands(MCInst &Inst, unsigned N) const {
1185 assert(N == 1 && "Invalid number of operands!");
1186 // If this is a pageoff symrefexpr with an addend, adjust the addend
1187 // to be only the page-offset portion. Otherwise, just add the expr
1189 addExpr(Inst, getImm());
1192 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1193 assert(N == 2 && "Invalid number of operands!");
1194 if (isShiftedImm()) {
1195 addExpr(Inst, getShiftedImmVal());
1196 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1198 addExpr(Inst, getImm());
1199 Inst.addOperand(MCOperand::CreateImm(0));
1203 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1204 assert(N == 1 && "Invalid number of operands!");
1205 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1208 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1209 assert(N == 1 && "Invalid number of operands!");
1210 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1212 addExpr(Inst, getImm());
1214 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1217 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1218 addImmOperands(Inst, N);
1222 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1227 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1230 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1233 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1234 assert(N == 1 && "Invalid number of operands!");
1235 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1236 assert(MCE && "Invalid constant immediate operand!");
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1240 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1243 assert(MCE && "Invalid constant immediate operand!");
1244 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1247 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1249 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1250 assert(MCE && "Invalid constant immediate operand!");
1251 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1254 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1257 assert(MCE && "Invalid constant immediate operand!");
1258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1261 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264 assert(MCE && "Invalid constant immediate operand!");
1265 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1268 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1271 assert(MCE && "Invalid constant immediate operand!");
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1275 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1278 assert(MCE && "Invalid constant immediate operand!");
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 assert(MCE && "Invalid constant immediate operand!");
1293 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1296 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1303 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1306 assert(MCE && "Invalid constant immediate operand!");
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1313 assert(MCE && "Invalid constant immediate operand!");
1314 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1318 assert(N == 1 && "Invalid number of operands!");
1319 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1320 assert(MCE && "Invalid constant immediate operand!");
1321 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1324 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1327 assert(MCE && "Invalid constant immediate operand!");
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1334 assert(MCE && "Invalid constant immediate operand!");
1335 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1341 assert(MCE && "Invalid constant immediate operand!");
1342 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1345 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1348 assert(MCE && "Invalid constant immediate operand!");
1349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1352 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355 assert(MCE && "Invalid constant immediate operand!");
1356 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1359 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1362 assert(MCE && "Invalid logical immediate operand!");
1363 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1364 Inst.addOperand(MCOperand::CreateImm(encoding));
1367 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1368 assert(N == 1 && "Invalid number of operands!");
1369 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1370 assert(MCE && "Invalid logical immediate operand!");
1371 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1372 Inst.addOperand(MCOperand::CreateImm(encoding));
1375 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1376 assert(N == 1 && "Invalid number of operands!");
1377 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1378 assert(MCE && "Invalid immediate operand!");
1379 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1380 Inst.addOperand(MCOperand::CreateImm(encoding));
1383 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1384 // Branch operands don't encode the low bits, so shift them off
1385 // here. If it's a label, however, just put it on directly as there's
1386 // not enough information now to do anything.
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1390 addExpr(Inst, getImm());
1393 assert(MCE && "Invalid constant immediate operand!");
1394 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1397 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1398 // Branch operands don't encode the low bits, so shift them off
1399 // here. If it's a label, however, just put it on directly as there's
1400 // not enough information now to do anything.
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1404 addExpr(Inst, getImm());
1407 assert(MCE && "Invalid constant immediate operand!");
1408 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1411 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1412 // Branch operands don't encode the low bits, so shift them off
1413 // here. If it's a label, however, just put it on directly as there's
1414 // not enough information now to do anything.
1415 assert(N == 1 && "Invalid number of operands!");
1416 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1418 addExpr(Inst, getImm());
1421 assert(MCE && "Invalid constant immediate operand!");
1422 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1425 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1426 assert(N == 1 && "Invalid number of operands!");
1427 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1430 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1432 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1435 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1439 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1440 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1442 Inst.addOperand(MCOperand::CreateImm(Bits));
1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1449 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1450 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1452 Inst.addOperand(MCOperand::CreateImm(Bits));
1455 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1460 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1462 Inst.addOperand(MCOperand::CreateImm(Bits));
1465 void addSysCROperands(MCInst &Inst, unsigned N) const {
1466 assert(N == 1 && "Invalid number of operands!");
1467 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1470 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1471 assert(N == 1 && "Invalid number of operands!");
1472 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1475 void addShifterOperands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1478 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::CreateImm(Imm));
1482 void addExtendOperands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::CreateImm(Imm));
1490 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1491 assert(N == 1 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1494 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1495 Inst.addOperand(MCOperand::CreateImm(Imm));
1498 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 2 && "Invalid number of operands!");
1500 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1501 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1502 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1503 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1506 // For 8-bit load/store instructions with a register offset, both the
1507 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1508 // they're disambiguated by whether the shift was explicit or implicit rather
1510 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1511 assert(N == 2 && "Invalid number of operands!");
1512 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1513 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1514 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1515 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1519 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!");
1522 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1523 uint64_t Value = CE->getValue();
1524 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1528 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1529 assert(N == 1 && "Invalid number of operands!");
1531 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1532 uint64_t Value = CE->getValue();
1533 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1536 void print(raw_ostream &OS) const override;
1538 static std::unique_ptr<AArch64Operand>
1539 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1540 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1541 Op->Tok.Data = Str.data();
1542 Op->Tok.Length = Str.size();
1543 Op->Tok.IsSuffix = IsSuffix;
1549 static std::unique_ptr<AArch64Operand>
1550 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1551 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1552 Op->Reg.RegNum = RegNum;
1553 Op->Reg.isVector = isVector;
1559 static std::unique_ptr<AArch64Operand>
1560 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1561 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1562 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1563 Op->VectorList.RegNum = RegNum;
1564 Op->VectorList.Count = Count;
1565 Op->VectorList.NumElements = NumElements;
1566 Op->VectorList.ElementKind = ElementKind;
1572 static std::unique_ptr<AArch64Operand>
1573 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1574 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1575 Op->VectorIndex.Val = Idx;
1581 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1582 SMLoc E, MCContext &Ctx) {
1583 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1590 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1591 unsigned ShiftAmount,
1594 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1595 Op->ShiftedImm .Val = Val;
1596 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1602 static std::unique_ptr<AArch64Operand>
1603 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1604 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1605 Op->CondCode.Code = Code;
1611 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1613 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1614 Op->FPImm.Val = Val;
1620 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1622 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1623 Op->Barrier.Val = Val;
1629 static std::unique_ptr<AArch64Operand>
1630 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1631 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1632 Op->SysReg.Data = Str.data();
1633 Op->SysReg.Length = Str.size();
1634 Op->SysReg.FeatureBits = FeatureBits;
1640 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1641 SMLoc E, MCContext &Ctx) {
1642 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1643 Op->SysCRImm.Val = Val;
1649 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1651 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1652 Op->Prefetch.Val = Val;
1658 static std::unique_ptr<AArch64Operand>
1659 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1660 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1661 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1662 Op->ShiftExtend.Type = ShOp;
1663 Op->ShiftExtend.Amount = Val;
1664 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1671 } // end anonymous namespace.
1673 void AArch64Operand::print(raw_ostream &OS) const {
1676 OS << "<fpimm " << getFPImm() << "("
1677 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1681 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1683 OS << "<barrier " << Name << ">";
1685 OS << "<barrier invalid #" << getBarrier() << ">";
1689 getImm()->print(OS);
1691 case k_ShiftedImm: {
1692 unsigned Shift = getShiftedImmShift();
1693 OS << "<shiftedimm ";
1694 getShiftedImmVal()->print(OS);
1695 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1699 OS << "<condcode " << getCondCode() << ">";
1702 OS << "<register " << getReg() << ">";
1704 case k_VectorList: {
1705 OS << "<vectorlist ";
1706 unsigned Reg = getVectorListStart();
1707 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1708 OS << Reg + i << " ";
1713 OS << "<vectorindex " << getVectorIndex() << ">";
1716 OS << "<sysreg: " << getSysReg() << '>';
1719 OS << "'" << getToken() << "'";
1722 OS << "c" << getSysCR();
1726 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1728 OS << "<prfop " << Name << ">";
1730 OS << "<prfop invalid #" << getPrefetch() << ">";
1733 case k_ShiftExtend: {
1734 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1735 << getShiftExtendAmount();
1736 if (!hasShiftExtendAmount())
1744 /// @name Auto-generated Match Functions
1747 static unsigned MatchRegisterName(StringRef Name);
1751 static unsigned matchVectorRegName(StringRef Name) {
1752 return StringSwitch<unsigned>(Name)
1753 .Case("v0", AArch64::Q0)
1754 .Case("v1", AArch64::Q1)
1755 .Case("v2", AArch64::Q2)
1756 .Case("v3", AArch64::Q3)
1757 .Case("v4", AArch64::Q4)
1758 .Case("v5", AArch64::Q5)
1759 .Case("v6", AArch64::Q6)
1760 .Case("v7", AArch64::Q7)
1761 .Case("v8", AArch64::Q8)
1762 .Case("v9", AArch64::Q9)
1763 .Case("v10", AArch64::Q10)
1764 .Case("v11", AArch64::Q11)
1765 .Case("v12", AArch64::Q12)
1766 .Case("v13", AArch64::Q13)
1767 .Case("v14", AArch64::Q14)
1768 .Case("v15", AArch64::Q15)
1769 .Case("v16", AArch64::Q16)
1770 .Case("v17", AArch64::Q17)
1771 .Case("v18", AArch64::Q18)
1772 .Case("v19", AArch64::Q19)
1773 .Case("v20", AArch64::Q20)
1774 .Case("v21", AArch64::Q21)
1775 .Case("v22", AArch64::Q22)
1776 .Case("v23", AArch64::Q23)
1777 .Case("v24", AArch64::Q24)
1778 .Case("v25", AArch64::Q25)
1779 .Case("v26", AArch64::Q26)
1780 .Case("v27", AArch64::Q27)
1781 .Case("v28", AArch64::Q28)
1782 .Case("v29", AArch64::Q29)
1783 .Case("v30", AArch64::Q30)
1784 .Case("v31", AArch64::Q31)
1788 static bool isValidVectorKind(StringRef Name) {
1789 return StringSwitch<bool>(Name.lower())
1799 // Accept the width neutral ones, too, for verbose syntax. If those
1800 // aren't used in the right places, the token operand won't match so
1801 // all will work out.
1809 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1810 char &ElementKind) {
1811 assert(isValidVectorKind(Name));
1813 ElementKind = Name.lower()[Name.size() - 1];
1816 if (Name.size() == 2)
1819 // Parse the lane count
1820 Name = Name.drop_front();
1821 while (isdigit(Name.front())) {
1822 NumElements = 10 * NumElements + (Name.front() - '0');
1823 Name = Name.drop_front();
1827 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1829 StartLoc = getLoc();
1830 RegNo = tryParseRegister();
1831 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1832 return (RegNo == (unsigned)-1);
1835 // Matches a register name or register alias previously defined by '.req'
1836 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1838 unsigned RegNum = isVector ? matchVectorRegName(Name)
1839 : MatchRegisterName(Name);
1842 // Check for aliases registered via .req. Canonicalize to lower case.
1843 // That's more consistent since register names are case insensitive, and
1844 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1845 auto Entry = RegisterReqs.find(Name.lower());
1846 if (Entry == RegisterReqs.end())
1848 // set RegNum if the match is the right kind of register
1849 if (isVector == Entry->getValue().first)
1850 RegNum = Entry->getValue().second;
1855 /// tryParseRegister - Try to parse a register name. The token must be an
1856 /// Identifier when called, and if it is a register name the token is eaten and
1857 /// the register is added to the operand list.
1858 int AArch64AsmParser::tryParseRegister() {
1859 const AsmToken &Tok = Parser.getTok();
1860 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1862 std::string lowerCase = Tok.getString().lower();
1863 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1864 // Also handle a few aliases of registers.
1866 RegNum = StringSwitch<unsigned>(lowerCase)
1867 .Case("fp", AArch64::FP)
1868 .Case("lr", AArch64::LR)
1869 .Case("x31", AArch64::XZR)
1870 .Case("w31", AArch64::WZR)
1876 Parser.Lex(); // Eat identifier token.
1880 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1881 /// kind specifier. If it is a register specifier, eat the token and return it.
1882 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1883 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1884 TokError("vector register expected");
1888 StringRef Name = Parser.getTok().getString();
1889 // If there is a kind specifier, it's separated from the register name by
1891 size_t Start = 0, Next = Name.find('.');
1892 StringRef Head = Name.slice(Start, Next);
1893 unsigned RegNum = matchRegisterNameAlias(Head, true);
1896 if (Next != StringRef::npos) {
1897 Kind = Name.slice(Next, StringRef::npos);
1898 if (!isValidVectorKind(Kind)) {
1899 TokError("invalid vector kind qualifier");
1903 Parser.Lex(); // Eat the register token.
1908 TokError("vector register expected");
1912 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1913 AArch64AsmParser::OperandMatchResultTy
1914 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1917 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1918 Error(S, "Expected cN operand where 0 <= N <= 15");
1919 return MatchOperand_ParseFail;
1922 StringRef Tok = Parser.getTok().getIdentifier();
1923 if (Tok[0] != 'c' && Tok[0] != 'C') {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1929 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1930 if (BadNum || CRNum > 15) {
1931 Error(S, "Expected cN operand where 0 <= N <= 15");
1932 return MatchOperand_ParseFail;
1935 Parser.Lex(); // Eat identifier token.
1937 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1938 return MatchOperand_Success;
1941 /// tryParsePrefetch - Try to parse a prefetch operand.
1942 AArch64AsmParser::OperandMatchResultTy
1943 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1945 const AsmToken &Tok = Parser.getTok();
1946 // Either an identifier for named values or a 5-bit immediate.
1947 bool Hash = Tok.is(AsmToken::Hash);
1948 if (Hash || Tok.is(AsmToken::Integer)) {
1950 Parser.Lex(); // Eat hash token.
1951 const MCExpr *ImmVal;
1952 if (getParser().parseExpression(ImmVal))
1953 return MatchOperand_ParseFail;
1955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1957 TokError("immediate value expected for prefetch operand");
1958 return MatchOperand_ParseFail;
1960 unsigned prfop = MCE->getValue();
1962 TokError("prefetch operand out of range, [0,31] expected");
1963 return MatchOperand_ParseFail;
1966 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1967 return MatchOperand_Success;
1970 if (Tok.isNot(AsmToken::Identifier)) {
1971 TokError("pre-fetch hint expected");
1972 return MatchOperand_ParseFail;
1976 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1978 TokError("pre-fetch hint expected");
1979 return MatchOperand_ParseFail;
1982 Parser.Lex(); // Eat identifier token.
1983 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1984 return MatchOperand_Success;
1987 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1989 AArch64AsmParser::OperandMatchResultTy
1990 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1994 if (Parser.getTok().is(AsmToken::Hash)) {
1995 Parser.Lex(); // Eat hash token.
1998 if (parseSymbolicImmVal(Expr))
1999 return MatchOperand_ParseFail;
2001 AArch64MCExpr::VariantKind ELFRefKind;
2002 MCSymbolRefExpr::VariantKind DarwinRefKind;
2004 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2005 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2006 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2007 // No modifier was specified at all; this is the syntax for an ELF basic
2008 // ADRP relocation (unfortunately).
2010 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2011 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2012 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2014 Error(S, "gotpage label reference not allowed an addend");
2015 return MatchOperand_ParseFail;
2016 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2017 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2018 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2019 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2020 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2021 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2022 // The operand must be an @page or @gotpage qualified symbolref.
2023 Error(S, "page or gotpage label reference expected");
2024 return MatchOperand_ParseFail;
2028 // We have either a label reference possibly with addend or an immediate. The
2029 // addend is a raw value here. The linker will adjust it to only reference the
2031 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2032 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2034 return MatchOperand_Success;
2037 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2039 AArch64AsmParser::OperandMatchResultTy
2040 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2044 if (Parser.getTok().is(AsmToken::Hash)) {
2045 Parser.Lex(); // Eat hash token.
2048 if (getParser().parseExpression(Expr))
2049 return MatchOperand_ParseFail;
2051 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2052 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2054 return MatchOperand_Success;
2057 /// tryParseFPImm - A floating point immediate expression operand.
2058 AArch64AsmParser::OperandMatchResultTy
2059 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2063 if (Parser.getTok().is(AsmToken::Hash)) {
2064 Parser.Lex(); // Eat '#'
2068 // Handle negation, as that still comes through as a separate token.
2069 bool isNegative = false;
2070 if (Parser.getTok().is(AsmToken::Minus)) {
2074 const AsmToken &Tok = Parser.getTok();
2075 if (Tok.is(AsmToken::Real)) {
2076 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2077 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2078 // If we had a '-' in front, toggle the sign bit.
2079 IntVal ^= (uint64_t)isNegative << 63;
2080 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2081 Parser.Lex(); // Eat the token.
2082 // Check for out of range values. As an exception, we let Zero through,
2083 // as we handle that special case in post-processing before matching in
2084 // order to use the zero register for it.
2085 if (Val == -1 && !RealVal.isZero()) {
2086 TokError("expected compatible register or floating-point constant");
2087 return MatchOperand_ParseFail;
2089 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2090 return MatchOperand_Success;
2092 if (Tok.is(AsmToken::Integer)) {
2094 if (!isNegative && Tok.getString().startswith("0x")) {
2095 Val = Tok.getIntVal();
2096 if (Val > 255 || Val < 0) {
2097 TokError("encoded floating point value out of range");
2098 return MatchOperand_ParseFail;
2101 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2102 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2103 // If we had a '-' in front, toggle the sign bit.
2104 IntVal ^= (uint64_t)isNegative << 63;
2105 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2107 Parser.Lex(); // Eat the token.
2108 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2109 return MatchOperand_Success;
2113 return MatchOperand_NoMatch;
2115 TokError("invalid floating point immediate");
2116 return MatchOperand_ParseFail;
2119 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2120 AArch64AsmParser::OperandMatchResultTy
2121 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2124 if (Parser.getTok().is(AsmToken::Hash))
2125 Parser.Lex(); // Eat '#'
2126 else if (Parser.getTok().isNot(AsmToken::Integer))
2127 // Operand should start from # or should be integer, emit error otherwise.
2128 return MatchOperand_NoMatch;
2131 if (parseSymbolicImmVal(Imm))
2132 return MatchOperand_ParseFail;
2133 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2134 uint64_t ShiftAmount = 0;
2135 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2137 int64_t Val = MCE->getValue();
2138 if (Val > 0xfff && (Val & 0xfff) == 0) {
2139 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2143 SMLoc E = Parser.getTok().getLoc();
2144 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2146 return MatchOperand_Success;
2152 // The optional operand must be "lsl #N" where N is non-negative.
2153 if (!Parser.getTok().is(AsmToken::Identifier) ||
2154 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2155 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2156 return MatchOperand_ParseFail;
2162 if (Parser.getTok().is(AsmToken::Hash)) {
2166 if (Parser.getTok().isNot(AsmToken::Integer)) {
2167 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2168 return MatchOperand_ParseFail;
2171 int64_t ShiftAmount = Parser.getTok().getIntVal();
2173 if (ShiftAmount < 0) {
2174 Error(Parser.getTok().getLoc(), "positive shift amount required");
2175 return MatchOperand_ParseFail;
2177 Parser.Lex(); // Eat the number
2179 SMLoc E = Parser.getTok().getLoc();
2180 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2181 S, E, getContext()));
2182 return MatchOperand_Success;
2185 /// parseCondCodeString - Parse a Condition Code string.
2186 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2187 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2188 .Case("eq", AArch64CC::EQ)
2189 .Case("ne", AArch64CC::NE)
2190 .Case("cs", AArch64CC::HS)
2191 .Case("hs", AArch64CC::HS)
2192 .Case("cc", AArch64CC::LO)
2193 .Case("lo", AArch64CC::LO)
2194 .Case("mi", AArch64CC::MI)
2195 .Case("pl", AArch64CC::PL)
2196 .Case("vs", AArch64CC::VS)
2197 .Case("vc", AArch64CC::VC)
2198 .Case("hi", AArch64CC::HI)
2199 .Case("ls", AArch64CC::LS)
2200 .Case("ge", AArch64CC::GE)
2201 .Case("lt", AArch64CC::LT)
2202 .Case("gt", AArch64CC::GT)
2203 .Case("le", AArch64CC::LE)
2204 .Case("al", AArch64CC::AL)
2205 .Case("nv", AArch64CC::NV)
2206 .Default(AArch64CC::Invalid);
2210 /// parseCondCode - Parse a Condition Code operand.
2211 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2212 bool invertCondCode) {
2214 const AsmToken &Tok = Parser.getTok();
2215 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2217 StringRef Cond = Tok.getString();
2218 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2219 if (CC == AArch64CC::Invalid)
2220 return TokError("invalid condition code");
2221 Parser.Lex(); // Eat identifier token.
2223 if (invertCondCode) {
2224 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2225 return TokError("condition codes AL and NV are invalid for this instruction");
2226 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2230 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2234 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2235 /// them if present.
2236 AArch64AsmParser::OperandMatchResultTy
2237 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2238 const AsmToken &Tok = Parser.getTok();
2239 std::string LowerID = Tok.getString().lower();
2240 AArch64_AM::ShiftExtendType ShOp =
2241 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2242 .Case("lsl", AArch64_AM::LSL)
2243 .Case("lsr", AArch64_AM::LSR)
2244 .Case("asr", AArch64_AM::ASR)
2245 .Case("ror", AArch64_AM::ROR)
2246 .Case("msl", AArch64_AM::MSL)
2247 .Case("uxtb", AArch64_AM::UXTB)
2248 .Case("uxth", AArch64_AM::UXTH)
2249 .Case("uxtw", AArch64_AM::UXTW)
2250 .Case("uxtx", AArch64_AM::UXTX)
2251 .Case("sxtb", AArch64_AM::SXTB)
2252 .Case("sxth", AArch64_AM::SXTH)
2253 .Case("sxtw", AArch64_AM::SXTW)
2254 .Case("sxtx", AArch64_AM::SXTX)
2255 .Default(AArch64_AM::InvalidShiftExtend);
2257 if (ShOp == AArch64_AM::InvalidShiftExtend)
2258 return MatchOperand_NoMatch;
2260 SMLoc S = Tok.getLoc();
2263 bool Hash = getLexer().is(AsmToken::Hash);
2264 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2265 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2266 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2267 ShOp == AArch64_AM::MSL) {
2268 // We expect a number here.
2269 TokError("expected #imm after shift specifier");
2270 return MatchOperand_ParseFail;
2273 // "extend" type operatoins don't need an immediate, #0 is implicit.
2274 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2276 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2277 return MatchOperand_Success;
2281 Parser.Lex(); // Eat the '#'.
2283 // Make sure we do actually have a number
2284 if (!Parser.getTok().is(AsmToken::Integer)) {
2285 Error(Parser.getTok().getLoc(),
2286 "expected integer shift amount");
2287 return MatchOperand_ParseFail;
2290 const MCExpr *ImmVal;
2291 if (getParser().parseExpression(ImmVal))
2292 return MatchOperand_ParseFail;
2294 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2296 TokError("expected #imm after shift specifier");
2297 return MatchOperand_ParseFail;
2300 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2301 Operands.push_back(AArch64Operand::CreateShiftExtend(
2302 ShOp, MCE->getValue(), true, S, E, getContext()));
2303 return MatchOperand_Success;
2306 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2307 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2308 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2309 OperandVector &Operands) {
2310 if (Name.find('.') != StringRef::npos)
2311 return TokError("invalid operand");
2315 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2317 const AsmToken &Tok = Parser.getTok();
2318 StringRef Op = Tok.getString();
2319 SMLoc S = Tok.getLoc();
2321 const MCExpr *Expr = nullptr;
2323 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2325 Expr = MCConstantExpr::Create(op1, getContext()); \
2326 Operands.push_back( \
2327 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2328 Operands.push_back( \
2329 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2330 Operands.push_back( \
2331 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2332 Expr = MCConstantExpr::Create(op2, getContext()); \
2333 Operands.push_back( \
2334 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2337 if (Mnemonic == "ic") {
2338 if (!Op.compare_lower("ialluis")) {
2339 // SYS #0, C7, C1, #0
2340 SYS_ALIAS(0, 7, 1, 0);
2341 } else if (!Op.compare_lower("iallu")) {
2342 // SYS #0, C7, C5, #0
2343 SYS_ALIAS(0, 7, 5, 0);
2344 } else if (!Op.compare_lower("ivau")) {
2345 // SYS #3, C7, C5, #1
2346 SYS_ALIAS(3, 7, 5, 1);
2348 return TokError("invalid operand for IC instruction");
2350 } else if (Mnemonic == "dc") {
2351 if (!Op.compare_lower("zva")) {
2352 // SYS #3, C7, C4, #1
2353 SYS_ALIAS(3, 7, 4, 1);
2354 } else if (!Op.compare_lower("ivac")) {
2355 // SYS #3, C7, C6, #1
2356 SYS_ALIAS(0, 7, 6, 1);
2357 } else if (!Op.compare_lower("isw")) {
2358 // SYS #0, C7, C6, #2
2359 SYS_ALIAS(0, 7, 6, 2);
2360 } else if (!Op.compare_lower("cvac")) {
2361 // SYS #3, C7, C10, #1
2362 SYS_ALIAS(3, 7, 10, 1);
2363 } else if (!Op.compare_lower("csw")) {
2364 // SYS #0, C7, C10, #2
2365 SYS_ALIAS(0, 7, 10, 2);
2366 } else if (!Op.compare_lower("cvau")) {
2367 // SYS #3, C7, C11, #1
2368 SYS_ALIAS(3, 7, 11, 1);
2369 } else if (!Op.compare_lower("civac")) {
2370 // SYS #3, C7, C14, #1
2371 SYS_ALIAS(3, 7, 14, 1);
2372 } else if (!Op.compare_lower("cisw")) {
2373 // SYS #0, C7, C14, #2
2374 SYS_ALIAS(0, 7, 14, 2);
2376 return TokError("invalid operand for DC instruction");
2378 } else if (Mnemonic == "at") {
2379 if (!Op.compare_lower("s1e1r")) {
2380 // SYS #0, C7, C8, #0
2381 SYS_ALIAS(0, 7, 8, 0);
2382 } else if (!Op.compare_lower("s1e2r")) {
2383 // SYS #4, C7, C8, #0
2384 SYS_ALIAS(4, 7, 8, 0);
2385 } else if (!Op.compare_lower("s1e3r")) {
2386 // SYS #6, C7, C8, #0
2387 SYS_ALIAS(6, 7, 8, 0);
2388 } else if (!Op.compare_lower("s1e1w")) {
2389 // SYS #0, C7, C8, #1
2390 SYS_ALIAS(0, 7, 8, 1);
2391 } else if (!Op.compare_lower("s1e2w")) {
2392 // SYS #4, C7, C8, #1
2393 SYS_ALIAS(4, 7, 8, 1);
2394 } else if (!Op.compare_lower("s1e3w")) {
2395 // SYS #6, C7, C8, #1
2396 SYS_ALIAS(6, 7, 8, 1);
2397 } else if (!Op.compare_lower("s1e0r")) {
2398 // SYS #0, C7, C8, #3
2399 SYS_ALIAS(0, 7, 8, 2);
2400 } else if (!Op.compare_lower("s1e0w")) {
2401 // SYS #0, C7, C8, #3
2402 SYS_ALIAS(0, 7, 8, 3);
2403 } else if (!Op.compare_lower("s12e1r")) {
2404 // SYS #4, C7, C8, #4
2405 SYS_ALIAS(4, 7, 8, 4);
2406 } else if (!Op.compare_lower("s12e1w")) {
2407 // SYS #4, C7, C8, #5
2408 SYS_ALIAS(4, 7, 8, 5);
2409 } else if (!Op.compare_lower("s12e0r")) {
2410 // SYS #4, C7, C8, #6
2411 SYS_ALIAS(4, 7, 8, 6);
2412 } else if (!Op.compare_lower("s12e0w")) {
2413 // SYS #4, C7, C8, #7
2414 SYS_ALIAS(4, 7, 8, 7);
2416 return TokError("invalid operand for AT instruction");
2418 } else if (Mnemonic == "tlbi") {
2419 if (!Op.compare_lower("vmalle1is")) {
2420 // SYS #0, C8, C3, #0
2421 SYS_ALIAS(0, 8, 3, 0);
2422 } else if (!Op.compare_lower("alle2is")) {
2423 // SYS #4, C8, C3, #0
2424 SYS_ALIAS(4, 8, 3, 0);
2425 } else if (!Op.compare_lower("alle3is")) {
2426 // SYS #6, C8, C3, #0
2427 SYS_ALIAS(6, 8, 3, 0);
2428 } else if (!Op.compare_lower("vae1is")) {
2429 // SYS #0, C8, C3, #1
2430 SYS_ALIAS(0, 8, 3, 1);
2431 } else if (!Op.compare_lower("vae2is")) {
2432 // SYS #4, C8, C3, #1
2433 SYS_ALIAS(4, 8, 3, 1);
2434 } else if (!Op.compare_lower("vae3is")) {
2435 // SYS #6, C8, C3, #1
2436 SYS_ALIAS(6, 8, 3, 1);
2437 } else if (!Op.compare_lower("aside1is")) {
2438 // SYS #0, C8, C3, #2
2439 SYS_ALIAS(0, 8, 3, 2);
2440 } else if (!Op.compare_lower("vaae1is")) {
2441 // SYS #0, C8, C3, #3
2442 SYS_ALIAS(0, 8, 3, 3);
2443 } else if (!Op.compare_lower("alle1is")) {
2444 // SYS #4, C8, C3, #4
2445 SYS_ALIAS(4, 8, 3, 4);
2446 } else if (!Op.compare_lower("vale1is")) {
2447 // SYS #0, C8, C3, #5
2448 SYS_ALIAS(0, 8, 3, 5);
2449 } else if (!Op.compare_lower("vaale1is")) {
2450 // SYS #0, C8, C3, #7
2451 SYS_ALIAS(0, 8, 3, 7);
2452 } else if (!Op.compare_lower("vmalle1")) {
2453 // SYS #0, C8, C7, #0
2454 SYS_ALIAS(0, 8, 7, 0);
2455 } else if (!Op.compare_lower("alle2")) {
2456 // SYS #4, C8, C7, #0
2457 SYS_ALIAS(4, 8, 7, 0);
2458 } else if (!Op.compare_lower("vale2is")) {
2459 // SYS #4, C8, C3, #5
2460 SYS_ALIAS(4, 8, 3, 5);
2461 } else if (!Op.compare_lower("vale3is")) {
2462 // SYS #6, C8, C3, #5
2463 SYS_ALIAS(6, 8, 3, 5);
2464 } else if (!Op.compare_lower("alle3")) {
2465 // SYS #6, C8, C7, #0
2466 SYS_ALIAS(6, 8, 7, 0);
2467 } else if (!Op.compare_lower("vae1")) {
2468 // SYS #0, C8, C7, #1
2469 SYS_ALIAS(0, 8, 7, 1);
2470 } else if (!Op.compare_lower("vae2")) {
2471 // SYS #4, C8, C7, #1
2472 SYS_ALIAS(4, 8, 7, 1);
2473 } else if (!Op.compare_lower("vae3")) {
2474 // SYS #6, C8, C7, #1
2475 SYS_ALIAS(6, 8, 7, 1);
2476 } else if (!Op.compare_lower("aside1")) {
2477 // SYS #0, C8, C7, #2
2478 SYS_ALIAS(0, 8, 7, 2);
2479 } else if (!Op.compare_lower("vaae1")) {
2480 // SYS #0, C8, C7, #3
2481 SYS_ALIAS(0, 8, 7, 3);
2482 } else if (!Op.compare_lower("alle1")) {
2483 // SYS #4, C8, C7, #4
2484 SYS_ALIAS(4, 8, 7, 4);
2485 } else if (!Op.compare_lower("vale1")) {
2486 // SYS #0, C8, C7, #5
2487 SYS_ALIAS(0, 8, 7, 5);
2488 } else if (!Op.compare_lower("vale2")) {
2489 // SYS #4, C8, C7, #5
2490 SYS_ALIAS(4, 8, 7, 5);
2491 } else if (!Op.compare_lower("vale3")) {
2492 // SYS #6, C8, C7, #5
2493 SYS_ALIAS(6, 8, 7, 5);
2494 } else if (!Op.compare_lower("vaale1")) {
2495 // SYS #0, C8, C7, #7
2496 SYS_ALIAS(0, 8, 7, 7);
2497 } else if (!Op.compare_lower("ipas2e1")) {
2498 // SYS #4, C8, C4, #1
2499 SYS_ALIAS(4, 8, 4, 1);
2500 } else if (!Op.compare_lower("ipas2le1")) {
2501 // SYS #4, C8, C4, #5
2502 SYS_ALIAS(4, 8, 4, 5);
2503 } else if (!Op.compare_lower("ipas2e1is")) {
2504 // SYS #4, C8, C4, #1
2505 SYS_ALIAS(4, 8, 0, 1);
2506 } else if (!Op.compare_lower("ipas2le1is")) {
2507 // SYS #4, C8, C4, #5
2508 SYS_ALIAS(4, 8, 0, 5);
2509 } else if (!Op.compare_lower("vmalls12e1")) {
2510 // SYS #4, C8, C7, #6
2511 SYS_ALIAS(4, 8, 7, 6);
2512 } else if (!Op.compare_lower("vmalls12e1is")) {
2513 // SYS #4, C8, C3, #6
2514 SYS_ALIAS(4, 8, 3, 6);
2516 return TokError("invalid operand for TLBI instruction");
2522 Parser.Lex(); // Eat operand.
2524 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2525 bool HasRegister = false;
2527 // Check for the optional register operand.
2528 if (getLexer().is(AsmToken::Comma)) {
2529 Parser.Lex(); // Eat comma.
2531 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2532 return TokError("expected register operand");
2537 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2538 Parser.eatToEndOfStatement();
2539 return TokError("unexpected token in argument list");
2542 if (ExpectRegister && !HasRegister) {
2543 return TokError("specified " + Mnemonic + " op requires a register");
2545 else if (!ExpectRegister && HasRegister) {
2546 return TokError("specified " + Mnemonic + " op does not use a register");
2549 Parser.Lex(); // Consume the EndOfStatement
2553 AArch64AsmParser::OperandMatchResultTy
2554 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2555 const AsmToken &Tok = Parser.getTok();
2557 // Can be either a #imm style literal or an option name
2558 bool Hash = Tok.is(AsmToken::Hash);
2559 if (Hash || Tok.is(AsmToken::Integer)) {
2560 // Immediate operand.
2562 Parser.Lex(); // Eat the '#'
2563 const MCExpr *ImmVal;
2564 SMLoc ExprLoc = getLoc();
2565 if (getParser().parseExpression(ImmVal))
2566 return MatchOperand_ParseFail;
2567 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2569 Error(ExprLoc, "immediate value expected for barrier operand");
2570 return MatchOperand_ParseFail;
2572 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2573 Error(ExprLoc, "barrier operand out of range");
2574 return MatchOperand_ParseFail;
2577 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2578 return MatchOperand_Success;
2581 if (Tok.isNot(AsmToken::Identifier)) {
2582 TokError("invalid operand for instruction");
2583 return MatchOperand_ParseFail;
2587 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2589 TokError("invalid barrier option name");
2590 return MatchOperand_ParseFail;
2593 // The only valid named option for ISB is 'sy'
2594 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2595 TokError("'sy' or #imm operand expected");
2596 return MatchOperand_ParseFail;
2600 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2601 Parser.Lex(); // Consume the option
2603 return MatchOperand_Success;
2606 AArch64AsmParser::OperandMatchResultTy
2607 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2608 const AsmToken &Tok = Parser.getTok();
2610 if (Tok.isNot(AsmToken::Identifier))
2611 return MatchOperand_NoMatch;
2613 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2614 STI.getFeatureBits(), getContext()));
2615 Parser.Lex(); // Eat identifier
2617 return MatchOperand_Success;
2620 /// tryParseVectorRegister - Parse a vector register operand.
2621 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2622 if (Parser.getTok().isNot(AsmToken::Identifier))
2626 // Check for a vector register specifier first.
2628 int64_t Reg = tryMatchVectorRegister(Kind, false);
2632 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2633 // If there was an explicit qualifier, that goes on as a literal text
2637 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2639 // If there is an index specifier following the register, parse that too.
2640 if (Parser.getTok().is(AsmToken::LBrac)) {
2641 SMLoc SIdx = getLoc();
2642 Parser.Lex(); // Eat left bracket token.
2644 const MCExpr *ImmVal;
2645 if (getParser().parseExpression(ImmVal))
2647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2649 TokError("immediate value expected for vector index");
2654 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2655 Error(E, "']' expected");
2659 Parser.Lex(); // Eat right bracket token.
2661 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2668 /// parseRegister - Parse a non-vector register operand.
2669 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2671 // Try for a vector register.
2672 if (!tryParseVectorRegister(Operands))
2675 // Try for a scalar register.
2676 int64_t Reg = tryParseRegister();
2680 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2682 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2683 // as a string token in the instruction itself.
2684 if (getLexer().getKind() == AsmToken::LBrac) {
2685 SMLoc LBracS = getLoc();
2687 const AsmToken &Tok = Parser.getTok();
2688 if (Tok.is(AsmToken::Integer)) {
2689 SMLoc IntS = getLoc();
2690 int64_t Val = Tok.getIntVal();
2693 if (getLexer().getKind() == AsmToken::RBrac) {
2694 SMLoc RBracS = getLoc();
2697 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2699 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2701 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2711 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2712 bool HasELFModifier = false;
2713 AArch64MCExpr::VariantKind RefKind;
2715 if (Parser.getTok().is(AsmToken::Colon)) {
2716 Parser.Lex(); // Eat ':"
2717 HasELFModifier = true;
2719 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2720 Error(Parser.getTok().getLoc(),
2721 "expect relocation specifier in operand after ':'");
2725 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2726 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2727 .Case("lo12", AArch64MCExpr::VK_LO12)
2728 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2729 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2730 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2731 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2732 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2733 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2734 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2735 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2736 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2737 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2738 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2739 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2740 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2741 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2742 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2743 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2744 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2745 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2746 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2747 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2748 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2749 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2750 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2751 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2752 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2753 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2754 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2755 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2756 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2757 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2758 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2759 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2760 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2761 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2762 .Default(AArch64MCExpr::VK_INVALID);
2764 if (RefKind == AArch64MCExpr::VK_INVALID) {
2765 Error(Parser.getTok().getLoc(),
2766 "expect relocation specifier in operand after ':'");
2770 Parser.Lex(); // Eat identifier
2772 if (Parser.getTok().isNot(AsmToken::Colon)) {
2773 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2776 Parser.Lex(); // Eat ':'
2779 if (getParser().parseExpression(ImmVal))
2783 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2788 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2789 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2790 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2792 Parser.Lex(); // Eat left bracket token.
2794 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2797 int64_t PrevReg = FirstReg;
2800 if (Parser.getTok().is(AsmToken::Minus)) {
2801 Parser.Lex(); // Eat the minus.
2803 SMLoc Loc = getLoc();
2805 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2808 // Any Kind suffices must match on all regs in the list.
2809 if (Kind != NextKind)
2810 return Error(Loc, "mismatched register size suffix");
2812 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2814 if (Space == 0 || Space > 3) {
2815 return Error(Loc, "invalid number of vectors");
2821 while (Parser.getTok().is(AsmToken::Comma)) {
2822 Parser.Lex(); // Eat the comma token.
2824 SMLoc Loc = getLoc();
2826 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2829 // Any Kind suffices must match on all regs in the list.
2830 if (Kind != NextKind)
2831 return Error(Loc, "mismatched register size suffix");
2833 // Registers must be incremental (with wraparound at 31)
2834 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2835 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2836 return Error(Loc, "registers must be sequential");
2843 if (Parser.getTok().isNot(AsmToken::RCurly))
2844 return Error(getLoc(), "'}' expected");
2845 Parser.Lex(); // Eat the '}' token.
2848 return Error(S, "invalid number of vectors");
2850 unsigned NumElements = 0;
2851 char ElementKind = 0;
2853 parseValidVectorKind(Kind, NumElements, ElementKind);
2855 Operands.push_back(AArch64Operand::CreateVectorList(
2856 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2858 // If there is an index specifier following the list, parse that too.
2859 if (Parser.getTok().is(AsmToken::LBrac)) {
2860 SMLoc SIdx = getLoc();
2861 Parser.Lex(); // Eat left bracket token.
2863 const MCExpr *ImmVal;
2864 if (getParser().parseExpression(ImmVal))
2866 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2868 TokError("immediate value expected for vector index");
2873 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2874 Error(E, "']' expected");
2878 Parser.Lex(); // Eat right bracket token.
2880 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2886 AArch64AsmParser::OperandMatchResultTy
2887 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2888 const AsmToken &Tok = Parser.getTok();
2889 if (!Tok.is(AsmToken::Identifier))
2890 return MatchOperand_NoMatch;
2892 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2894 MCContext &Ctx = getContext();
2895 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2896 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2897 return MatchOperand_NoMatch;
2900 Parser.Lex(); // Eat register
2902 if (Parser.getTok().isNot(AsmToken::Comma)) {
2904 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2905 return MatchOperand_Success;
2907 Parser.Lex(); // Eat comma.
2909 if (Parser.getTok().is(AsmToken::Hash))
2910 Parser.Lex(); // Eat hash
2912 if (Parser.getTok().isNot(AsmToken::Integer)) {
2913 Error(getLoc(), "index must be absent or #0");
2914 return MatchOperand_ParseFail;
2917 const MCExpr *ImmVal;
2918 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2919 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2920 Error(getLoc(), "index must be absent or #0");
2921 return MatchOperand_ParseFail;
2925 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2926 return MatchOperand_Success;
2929 /// parseOperand - Parse a arm instruction operand. For now this parses the
2930 /// operand regardless of the mnemonic.
2931 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2932 bool invertCondCode) {
2933 // Check if the current operand has a custom associated parser, if so, try to
2934 // custom parse the operand, or fallback to the general approach.
2935 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2936 if (ResTy == MatchOperand_Success)
2938 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2939 // there was a match, but an error occurred, in which case, just return that
2940 // the operand parsing failed.
2941 if (ResTy == MatchOperand_ParseFail)
2944 // Nothing custom, so do general case parsing.
2946 switch (getLexer().getKind()) {
2950 if (parseSymbolicImmVal(Expr))
2951 return Error(S, "invalid operand");
2953 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2954 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2957 case AsmToken::LBrac: {
2958 SMLoc Loc = Parser.getTok().getLoc();
2959 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2961 Parser.Lex(); // Eat '['
2963 // There's no comma after a '[', so we can parse the next operand
2965 return parseOperand(Operands, false, false);
2967 case AsmToken::LCurly:
2968 return parseVectorList(Operands);
2969 case AsmToken::Identifier: {
2970 // If we're expecting a Condition Code operand, then just parse that.
2972 return parseCondCode(Operands, invertCondCode);
2974 // If it's a register name, parse it.
2975 if (!parseRegister(Operands))
2978 // This could be an optional "shift" or "extend" operand.
2979 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2980 // We can only continue if no tokens were eaten.
2981 if (GotShift != MatchOperand_NoMatch)
2984 // This was not a register so parse other operands that start with an
2985 // identifier (like labels) as expressions and create them as immediates.
2986 const MCExpr *IdVal;
2988 if (getParser().parseExpression(IdVal))
2991 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2992 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2995 case AsmToken::Integer:
2996 case AsmToken::Real:
2997 case AsmToken::Hash: {
2998 // #42 -> immediate.
3000 if (getLexer().is(AsmToken::Hash))
3003 // Parse a negative sign
3004 bool isNegative = false;
3005 if (Parser.getTok().is(AsmToken::Minus)) {
3007 // We need to consume this token only when we have a Real, otherwise
3008 // we let parseSymbolicImmVal take care of it
3009 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3013 // The only Real that should come through here is a literal #0.0 for
3014 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3015 // so convert the value.
3016 const AsmToken &Tok = Parser.getTok();
3017 if (Tok.is(AsmToken::Real)) {
3018 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3019 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3020 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3021 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3022 Mnemonic != "fcmlt")
3023 return TokError("unexpected floating point literal");
3024 else if (IntVal != 0 || isNegative)
3025 return TokError("expected floating-point constant #0.0");
3026 Parser.Lex(); // Eat the token.
3029 AArch64Operand::CreateToken("#0", false, S, getContext()));
3031 AArch64Operand::CreateToken(".0", false, S, getContext()));
3035 const MCExpr *ImmVal;
3036 if (parseSymbolicImmVal(ImmVal))
3039 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3040 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3043 case AsmToken::Equal: {
3044 SMLoc Loc = Parser.getTok().getLoc();
3045 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3046 return Error(Loc, "unexpected token in operand");
3047 Parser.Lex(); // Eat '='
3048 const MCExpr *SubExprVal;
3049 if (getParser().parseExpression(SubExprVal))
3052 MCContext& Ctx = getContext();
3053 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3054 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3055 if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3056 static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3057 bool IsXReg = AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3058 Operands[1]->getReg());
3059 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3060 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3061 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3065 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3066 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3067 Operands.push_back(AArch64Operand::CreateImm(
3068 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3070 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3071 ShiftAmt, true, S, E, Ctx));
3075 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3076 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3077 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3083 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3085 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3086 StringRef Name, SMLoc NameLoc,
3087 OperandVector &Operands) {
3088 Name = StringSwitch<StringRef>(Name.lower())
3089 .Case("beq", "b.eq")
3090 .Case("bne", "b.ne")
3091 .Case("bhs", "b.hs")
3092 .Case("bcs", "b.cs")
3093 .Case("blo", "b.lo")
3094 .Case("bcc", "b.cc")
3095 .Case("bmi", "b.mi")
3096 .Case("bpl", "b.pl")
3097 .Case("bvs", "b.vs")
3098 .Case("bvc", "b.vc")
3099 .Case("bhi", "b.hi")
3100 .Case("bls", "b.ls")
3101 .Case("bge", "b.ge")
3102 .Case("blt", "b.lt")
3103 .Case("bgt", "b.gt")
3104 .Case("ble", "b.le")
3105 .Case("bal", "b.al")
3106 .Case("bnv", "b.nv")
3109 // First check for the AArch64-specific .req directive.
3110 if (Parser.getTok().is(AsmToken::Identifier) &&
3111 Parser.getTok().getIdentifier() == ".req") {
3112 parseDirectiveReq(Name, NameLoc);
3113 // We always return 'error' for this, as we're done with this
3114 // statement and don't need to match the 'instruction."
3118 // Create the leading tokens for the mnemonic, split by '.' characters.
3119 size_t Start = 0, Next = Name.find('.');
3120 StringRef Head = Name.slice(Start, Next);
3122 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3123 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3124 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3125 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3126 Parser.eatToEndOfStatement();
3131 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3134 // Handle condition codes for a branch mnemonic
3135 if (Head == "b" && Next != StringRef::npos) {
3137 Next = Name.find('.', Start + 1);
3138 Head = Name.slice(Start + 1, Next);
3140 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3141 (Head.data() - Name.data()));
3142 AArch64CC::CondCode CC = parseCondCodeString(Head);
3143 if (CC == AArch64CC::Invalid)
3144 return Error(SuffixLoc, "invalid condition code");
3146 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3148 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3151 // Add the remaining tokens in the mnemonic.
3152 while (Next != StringRef::npos) {
3154 Next = Name.find('.', Start + 1);
3155 Head = Name.slice(Start, Next);
3156 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3157 (Head.data() - Name.data()) + 1);
3159 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3162 // Conditional compare instructions have a Condition Code operand, which needs
3163 // to be parsed and an immediate operand created.
3164 bool condCodeFourthOperand =
3165 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3166 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3167 Head == "csinc" || Head == "csinv" || Head == "csneg");
3169 // These instructions are aliases to some of the conditional select
3170 // instructions. However, the condition code is inverted in the aliased
3173 // FIXME: Is this the correct way to handle these? Or should the parser
3174 // generate the aliased instructions directly?
3175 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3176 bool condCodeThirdOperand =
3177 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3179 // Read the remaining operands.
3180 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3181 // Read the first operand.
3182 if (parseOperand(Operands, false, false)) {
3183 Parser.eatToEndOfStatement();
3188 while (getLexer().is(AsmToken::Comma)) {
3189 Parser.Lex(); // Eat the comma.
3191 // Parse and remember the operand.
3192 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3193 (N == 3 && condCodeThirdOperand) ||
3194 (N == 2 && condCodeSecondOperand),
3195 condCodeSecondOperand || condCodeThirdOperand)) {
3196 Parser.eatToEndOfStatement();
3200 // After successfully parsing some operands there are two special cases to
3201 // consider (i.e. notional operands not separated by commas). Both are due
3202 // to memory specifiers:
3203 // + An RBrac will end an address for load/store/prefetch
3204 // + An '!' will indicate a pre-indexed operation.
3206 // It's someone else's responsibility to make sure these tokens are sane
3207 // in the given context!
3208 if (Parser.getTok().is(AsmToken::RBrac)) {
3209 SMLoc Loc = Parser.getTok().getLoc();
3210 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3215 if (Parser.getTok().is(AsmToken::Exclaim)) {
3216 SMLoc Loc = Parser.getTok().getLoc();
3217 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3226 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3227 SMLoc Loc = Parser.getTok().getLoc();
3228 Parser.eatToEndOfStatement();
3229 return Error(Loc, "unexpected token in argument list");
3232 Parser.Lex(); // Consume the EndOfStatement
3236 // FIXME: This entire function is a giant hack to provide us with decent
3237 // operand range validation/diagnostics until TableGen/MC can be extended
3238 // to support autogeneration of this kind of validation.
3239 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3240 SmallVectorImpl<SMLoc> &Loc) {
3241 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3242 // Check for indexed addressing modes w/ the base register being the
3243 // same as a destination/source register or pair load where
3244 // the Rt == Rt2. All of those are undefined behaviour.
3245 switch (Inst.getOpcode()) {
3246 case AArch64::LDPSWpre:
3247 case AArch64::LDPWpost:
3248 case AArch64::LDPWpre:
3249 case AArch64::LDPXpost:
3250 case AArch64::LDPXpre: {
3251 unsigned Rt = Inst.getOperand(1).getReg();
3252 unsigned Rt2 = Inst.getOperand(2).getReg();
3253 unsigned Rn = Inst.getOperand(3).getReg();
3254 if (RI->isSubRegisterEq(Rn, Rt))
3255 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3256 "is also a destination");
3257 if (RI->isSubRegisterEq(Rn, Rt2))
3258 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3259 "is also a destination");
3262 case AArch64::LDPDi:
3263 case AArch64::LDPQi:
3264 case AArch64::LDPSi:
3265 case AArch64::LDPSWi:
3266 case AArch64::LDPWi:
3267 case AArch64::LDPXi: {
3268 unsigned Rt = Inst.getOperand(0).getReg();
3269 unsigned Rt2 = Inst.getOperand(1).getReg();
3271 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3274 case AArch64::LDPDpost:
3275 case AArch64::LDPDpre:
3276 case AArch64::LDPQpost:
3277 case AArch64::LDPQpre:
3278 case AArch64::LDPSpost:
3279 case AArch64::LDPSpre:
3280 case AArch64::LDPSWpost: {
3281 unsigned Rt = Inst.getOperand(1).getReg();
3282 unsigned Rt2 = Inst.getOperand(2).getReg();
3284 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3287 case AArch64::STPDpost:
3288 case AArch64::STPDpre:
3289 case AArch64::STPQpost:
3290 case AArch64::STPQpre:
3291 case AArch64::STPSpost:
3292 case AArch64::STPSpre:
3293 case AArch64::STPWpost:
3294 case AArch64::STPWpre:
3295 case AArch64::STPXpost:
3296 case AArch64::STPXpre: {
3297 unsigned Rt = Inst.getOperand(1).getReg();
3298 unsigned Rt2 = Inst.getOperand(2).getReg();
3299 unsigned Rn = Inst.getOperand(3).getReg();
3300 if (RI->isSubRegisterEq(Rn, Rt))
3301 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3302 "is also a source");
3303 if (RI->isSubRegisterEq(Rn, Rt2))
3304 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3305 "is also a source");
3308 case AArch64::LDRBBpre:
3309 case AArch64::LDRBpre:
3310 case AArch64::LDRHHpre:
3311 case AArch64::LDRHpre:
3312 case AArch64::LDRSBWpre:
3313 case AArch64::LDRSBXpre:
3314 case AArch64::LDRSHWpre:
3315 case AArch64::LDRSHXpre:
3316 case AArch64::LDRSWpre:
3317 case AArch64::LDRWpre:
3318 case AArch64::LDRXpre:
3319 case AArch64::LDRBBpost:
3320 case AArch64::LDRBpost:
3321 case AArch64::LDRHHpost:
3322 case AArch64::LDRHpost:
3323 case AArch64::LDRSBWpost:
3324 case AArch64::LDRSBXpost:
3325 case AArch64::LDRSHWpost:
3326 case AArch64::LDRSHXpost:
3327 case AArch64::LDRSWpost:
3328 case AArch64::LDRWpost:
3329 case AArch64::LDRXpost: {
3330 unsigned Rt = Inst.getOperand(1).getReg();
3331 unsigned Rn = Inst.getOperand(2).getReg();
3332 if (RI->isSubRegisterEq(Rn, Rt))
3333 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3334 "is also a source");
3337 case AArch64::STRBBpost:
3338 case AArch64::STRBpost:
3339 case AArch64::STRHHpost:
3340 case AArch64::STRHpost:
3341 case AArch64::STRWpost:
3342 case AArch64::STRXpost:
3343 case AArch64::STRBBpre:
3344 case AArch64::STRBpre:
3345 case AArch64::STRHHpre:
3346 case AArch64::STRHpre:
3347 case AArch64::STRWpre:
3348 case AArch64::STRXpre: {
3349 unsigned Rt = Inst.getOperand(1).getReg();
3350 unsigned Rn = Inst.getOperand(2).getReg();
3351 if (RI->isSubRegisterEq(Rn, Rt))
3352 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3353 "is also a source");
3358 // Now check immediate ranges. Separate from the above as there is overlap
3359 // in the instructions being checked and this keeps the nested conditionals
3361 switch (Inst.getOpcode()) {
3362 case AArch64::ADDSWri:
3363 case AArch64::ADDSXri:
3364 case AArch64::ADDWri:
3365 case AArch64::ADDXri:
3366 case AArch64::SUBSWri:
3367 case AArch64::SUBSXri:
3368 case AArch64::SUBWri:
3369 case AArch64::SUBXri: {
3370 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3371 // some slight duplication here.
3372 if (Inst.getOperand(2).isExpr()) {
3373 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3374 AArch64MCExpr::VariantKind ELFRefKind;
3375 MCSymbolRefExpr::VariantKind DarwinRefKind;
3377 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3378 return Error(Loc[2], "invalid immediate expression");
3381 // Only allow these with ADDXri.
3382 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3383 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3384 Inst.getOpcode() == AArch64::ADDXri)
3387 // Only allow these with ADDXri/ADDWri
3388 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3389 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3390 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3391 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3392 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3393 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3394 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3395 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3396 (Inst.getOpcode() == AArch64::ADDXri ||
3397 Inst.getOpcode() == AArch64::ADDWri))
3400 // Don't allow expressions in the immediate field otherwise
3401 return Error(Loc[2], "invalid immediate expression");
3410 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3412 case Match_MissingFeature:
3414 "instruction requires a CPU feature not currently enabled");
3415 case Match_InvalidOperand:
3416 return Error(Loc, "invalid operand for instruction");
3417 case Match_InvalidSuffix:
3418 return Error(Loc, "invalid type suffix for instruction");
3419 case Match_InvalidCondCode:
3420 return Error(Loc, "expected AArch64 condition code");
3421 case Match_AddSubRegExtendSmall:
3423 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3424 case Match_AddSubRegExtendLarge:
3426 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3427 case Match_AddSubSecondSource:
3429 "expected compatible register, symbol or integer in range [0, 4095]");
3430 case Match_LogicalSecondSource:
3431 return Error(Loc, "expected compatible register or logical immediate");
3432 case Match_InvalidMovImm32Shift:
3433 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3434 case Match_InvalidMovImm64Shift:
3435 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3436 case Match_AddSubRegShift32:
3438 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3439 case Match_AddSubRegShift64:
3441 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3442 case Match_InvalidFPImm:
3444 "expected compatible register or floating-point constant");
3445 case Match_InvalidMemoryIndexedSImm9:
3446 return Error(Loc, "index must be an integer in range [-256, 255].");
3447 case Match_InvalidMemoryIndexed4SImm7:
3448 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3449 case Match_InvalidMemoryIndexed8SImm7:
3450 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3451 case Match_InvalidMemoryIndexed16SImm7:
3452 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3453 case Match_InvalidMemoryWExtend8:
3455 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3456 case Match_InvalidMemoryWExtend16:
3458 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3459 case Match_InvalidMemoryWExtend32:
3461 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3462 case Match_InvalidMemoryWExtend64:
3464 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3465 case Match_InvalidMemoryWExtend128:
3467 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3468 case Match_InvalidMemoryXExtend8:
3470 "expected 'lsl' or 'sxtx' with optional shift of #0");
3471 case Match_InvalidMemoryXExtend16:
3473 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3474 case Match_InvalidMemoryXExtend32:
3476 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3477 case Match_InvalidMemoryXExtend64:
3479 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3480 case Match_InvalidMemoryXExtend128:
3482 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3483 case Match_InvalidMemoryIndexed1:
3484 return Error(Loc, "index must be an integer in range [0, 4095].");
3485 case Match_InvalidMemoryIndexed2:
3486 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3487 case Match_InvalidMemoryIndexed4:
3488 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3489 case Match_InvalidMemoryIndexed8:
3490 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3491 case Match_InvalidMemoryIndexed16:
3492 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3493 case Match_InvalidImm0_7:
3494 return Error(Loc, "immediate must be an integer in range [0, 7].");
3495 case Match_InvalidImm0_15:
3496 return Error(Loc, "immediate must be an integer in range [0, 15].");
3497 case Match_InvalidImm0_31:
3498 return Error(Loc, "immediate must be an integer in range [0, 31].");
3499 case Match_InvalidImm0_63:
3500 return Error(Loc, "immediate must be an integer in range [0, 63].");
3501 case Match_InvalidImm0_127:
3502 return Error(Loc, "immediate must be an integer in range [0, 127].");
3503 case Match_InvalidImm0_65535:
3504 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3505 case Match_InvalidImm1_8:
3506 return Error(Loc, "immediate must be an integer in range [1, 8].");
3507 case Match_InvalidImm1_16:
3508 return Error(Loc, "immediate must be an integer in range [1, 16].");
3509 case Match_InvalidImm1_32:
3510 return Error(Loc, "immediate must be an integer in range [1, 32].");
3511 case Match_InvalidImm1_64:
3512 return Error(Loc, "immediate must be an integer in range [1, 64].");
3513 case Match_InvalidIndex1:
3514 return Error(Loc, "expected lane specifier '[1]'");
3515 case Match_InvalidIndexB:
3516 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3517 case Match_InvalidIndexH:
3518 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3519 case Match_InvalidIndexS:
3520 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3521 case Match_InvalidIndexD:
3522 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3523 case Match_InvalidLabel:
3524 return Error(Loc, "expected label or encodable integer pc offset");
3526 return Error(Loc, "expected readable system register");
3528 return Error(Loc, "expected writable system register or pstate");
3529 case Match_MnemonicFail:
3530 return Error(Loc, "unrecognized instruction mnemonic");
3532 llvm_unreachable("unexpected error code!");
3536 static const char *getSubtargetFeatureName(unsigned Val);
3538 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3539 OperandVector &Operands,
3541 unsigned &ErrorInfo,
3542 bool MatchingInlineAsm) {
3543 assert(!Operands.empty() && "Unexpect empty operand list!");
3544 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3545 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3547 StringRef Tok = Op.getToken();
3548 unsigned NumOperands = Operands.size();
3550 if (NumOperands == 4 && Tok == "lsl") {
3551 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3552 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3553 if (Op2.isReg() && Op3.isImm()) {
3554 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3556 uint64_t Op3Val = Op3CE->getValue();
3557 uint64_t NewOp3Val = 0;
3558 uint64_t NewOp4Val = 0;
3559 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3561 NewOp3Val = (32 - Op3Val) & 0x1f;
3562 NewOp4Val = 31 - Op3Val;
3564 NewOp3Val = (64 - Op3Val) & 0x3f;
3565 NewOp4Val = 63 - Op3Val;
3568 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3569 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3571 Operands[0] = AArch64Operand::CreateToken(
3572 "ubfm", false, Op.getStartLoc(), getContext());
3573 Operands.push_back(AArch64Operand::CreateImm(
3574 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3575 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3576 Op3.getEndLoc(), getContext());
3579 } else if (NumOperands == 5) {
3580 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3581 // UBFIZ -> UBFM aliases.
3582 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3583 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3584 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3585 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3587 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3588 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3589 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3591 if (Op3CE && Op4CE) {
3592 uint64_t Op3Val = Op3CE->getValue();
3593 uint64_t Op4Val = Op4CE->getValue();
3595 uint64_t RegWidth = 0;
3596 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3602 if (Op3Val >= RegWidth)
3603 return Error(Op3.getStartLoc(),
3604 "expected integer in range [0, 31]");
3605 if (Op4Val < 1 || Op4Val > RegWidth)
3606 return Error(Op4.getStartLoc(),
3607 "expected integer in range [1, 32]");
3609 uint64_t NewOp3Val = 0;
3610 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3612 NewOp3Val = (32 - Op3Val) & 0x1f;
3614 NewOp3Val = (64 - Op3Val) & 0x3f;
3616 uint64_t NewOp4Val = Op4Val - 1;
3618 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3619 return Error(Op4.getStartLoc(),
3620 "requested insert overflows register");
3622 const MCExpr *NewOp3 =
3623 MCConstantExpr::Create(NewOp3Val, getContext());
3624 const MCExpr *NewOp4 =
3625 MCConstantExpr::Create(NewOp4Val, getContext());
3626 Operands[3] = AArch64Operand::CreateImm(
3627 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3628 Operands[4] = AArch64Operand::CreateImm(
3629 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3631 Operands[0] = AArch64Operand::CreateToken(
3632 "bfm", false, Op.getStartLoc(), getContext());
3633 else if (Tok == "sbfiz")
3634 Operands[0] = AArch64Operand::CreateToken(
3635 "sbfm", false, Op.getStartLoc(), getContext());
3636 else if (Tok == "ubfiz")
3637 Operands[0] = AArch64Operand::CreateToken(
3638 "ubfm", false, Op.getStartLoc(), getContext());
3640 llvm_unreachable("No valid mnemonic for alias?");
3644 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3645 // UBFX -> UBFM aliases.
3646 } else if (NumOperands == 5 &&
3647 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3648 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3649 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3650 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3652 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3653 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3654 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3656 if (Op3CE && Op4CE) {
3657 uint64_t Op3Val = Op3CE->getValue();
3658 uint64_t Op4Val = Op4CE->getValue();
3660 uint64_t RegWidth = 0;
3661 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3667 if (Op3Val >= RegWidth)
3668 return Error(Op3.getStartLoc(),
3669 "expected integer in range [0, 31]");
3670 if (Op4Val < 1 || Op4Val > RegWidth)
3671 return Error(Op4.getStartLoc(),
3672 "expected integer in range [1, 32]");
3674 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3676 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3677 return Error(Op4.getStartLoc(),
3678 "requested extract overflows register");
3680 const MCExpr *NewOp4 =
3681 MCConstantExpr::Create(NewOp4Val, getContext());
3682 Operands[4] = AArch64Operand::CreateImm(
3683 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3685 Operands[0] = AArch64Operand::CreateToken(
3686 "bfm", false, Op.getStartLoc(), getContext());
3687 else if (Tok == "sbfx")
3688 Operands[0] = AArch64Operand::CreateToken(
3689 "sbfm", false, Op.getStartLoc(), getContext());
3690 else if (Tok == "ubfx")
3691 Operands[0] = AArch64Operand::CreateToken(
3692 "ubfm", false, Op.getStartLoc(), getContext());
3694 llvm_unreachable("No valid mnemonic for alias?");
3699 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3700 // InstAlias can't quite handle this since the reg classes aren't
3702 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3703 // The source register can be Wn here, but the matcher expects a
3704 // GPR64. Twiddle it here if necessary.
3705 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3707 unsigned Reg = getXRegFromWReg(Op.getReg());
3708 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3709 Op.getEndLoc(), getContext());
3712 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3713 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3714 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3716 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3718 // The source register can be Wn here, but the matcher expects a
3719 // GPR64. Twiddle it here if necessary.
3720 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3722 unsigned Reg = getXRegFromWReg(Op.getReg());
3723 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3724 Op.getEndLoc(), getContext());
3728 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3729 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3730 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3732 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3734 // The source register can be Wn here, but the matcher expects a
3735 // GPR32. Twiddle it here if necessary.
3736 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3738 unsigned Reg = getWRegFromXReg(Op.getReg());
3739 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3740 Op.getEndLoc(), getContext());
3745 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3746 if (NumOperands == 3 && Tok == "fmov") {
3747 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3748 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3749 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3751 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3755 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3756 Op.getEndLoc(), getContext());
3761 // First try to match against the secondary set of tables containing the
3762 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3763 unsigned MatchResult =
3764 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3766 // If that fails, try against the alternate table containing long-form NEON:
3767 // "fadd v0.2s, v1.2s, v2.2s"
3768 if (MatchResult != Match_Success)
3770 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3772 switch (MatchResult) {
3773 case Match_Success: {
3774 // Perform range checking and other semantic validations
3775 SmallVector<SMLoc, 8> OperandLocs;
3776 NumOperands = Operands.size();
3777 for (unsigned i = 1; i < NumOperands; ++i)
3778 OperandLocs.push_back(Operands[i]->getStartLoc());
3779 if (validateInstruction(Inst, OperandLocs))
3783 Out.EmitInstruction(Inst, STI);
3786 case Match_MissingFeature: {
3787 assert(ErrorInfo && "Unknown missing feature!");
3788 // Special case the error message for the very common case where only
3789 // a single subtarget feature is missing (neon, e.g.).
3790 std::string Msg = "instruction requires:";
3792 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3793 if (ErrorInfo & Mask) {
3795 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3799 return Error(IDLoc, Msg);
3801 case Match_MnemonicFail:
3802 return showMatchError(IDLoc, MatchResult);
3803 case Match_InvalidOperand: {
3804 SMLoc ErrorLoc = IDLoc;
3805 if (ErrorInfo != ~0U) {
3806 if (ErrorInfo >= Operands.size())
3807 return Error(IDLoc, "too few operands for instruction");
3809 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3810 if (ErrorLoc == SMLoc())
3813 // If the match failed on a suffix token operand, tweak the diagnostic
3815 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3816 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3817 MatchResult = Match_InvalidSuffix;
3819 return showMatchError(ErrorLoc, MatchResult);
3821 case Match_InvalidMemoryIndexed1:
3822 case Match_InvalidMemoryIndexed2:
3823 case Match_InvalidMemoryIndexed4:
3824 case Match_InvalidMemoryIndexed8:
3825 case Match_InvalidMemoryIndexed16:
3826 case Match_InvalidCondCode:
3827 case Match_AddSubRegExtendSmall:
3828 case Match_AddSubRegExtendLarge:
3829 case Match_AddSubSecondSource:
3830 case Match_LogicalSecondSource:
3831 case Match_AddSubRegShift32:
3832 case Match_AddSubRegShift64:
3833 case Match_InvalidMovImm32Shift:
3834 case Match_InvalidMovImm64Shift:
3835 case Match_InvalidFPImm:
3836 case Match_InvalidMemoryWExtend8:
3837 case Match_InvalidMemoryWExtend16:
3838 case Match_InvalidMemoryWExtend32:
3839 case Match_InvalidMemoryWExtend64:
3840 case Match_InvalidMemoryWExtend128:
3841 case Match_InvalidMemoryXExtend8:
3842 case Match_InvalidMemoryXExtend16:
3843 case Match_InvalidMemoryXExtend32:
3844 case Match_InvalidMemoryXExtend64:
3845 case Match_InvalidMemoryXExtend128:
3846 case Match_InvalidMemoryIndexed4SImm7:
3847 case Match_InvalidMemoryIndexed8SImm7:
3848 case Match_InvalidMemoryIndexed16SImm7:
3849 case Match_InvalidMemoryIndexedSImm9:
3850 case Match_InvalidImm0_7:
3851 case Match_InvalidImm0_15:
3852 case Match_InvalidImm0_31:
3853 case Match_InvalidImm0_63:
3854 case Match_InvalidImm0_127:
3855 case Match_InvalidImm0_65535:
3856 case Match_InvalidImm1_8:
3857 case Match_InvalidImm1_16:
3858 case Match_InvalidImm1_32:
3859 case Match_InvalidImm1_64:
3860 case Match_InvalidIndex1:
3861 case Match_InvalidIndexB:
3862 case Match_InvalidIndexH:
3863 case Match_InvalidIndexS:
3864 case Match_InvalidIndexD:
3865 case Match_InvalidLabel:
3868 if (ErrorInfo >= Operands.size())
3869 return Error(IDLoc, "too few operands for instruction");
3870 // Any time we get here, there's nothing fancy to do. Just get the
3871 // operand SMLoc and display the diagnostic.
3872 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3873 if (ErrorLoc == SMLoc())
3875 return showMatchError(ErrorLoc, MatchResult);
3879 llvm_unreachable("Implement any new match types added!");
3883 /// ParseDirective parses the arm specific directives
3884 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3885 StringRef IDVal = DirectiveID.getIdentifier();
3886 SMLoc Loc = DirectiveID.getLoc();
3887 if (IDVal == ".hword")
3888 return parseDirectiveWord(2, Loc);
3889 if (IDVal == ".word")
3890 return parseDirectiveWord(4, Loc);
3891 if (IDVal == ".xword")
3892 return parseDirectiveWord(8, Loc);
3893 if (IDVal == ".tlsdesccall")
3894 return parseDirectiveTLSDescCall(Loc);
3895 if (IDVal == ".ltorg" || IDVal == ".pool")
3896 return parseDirectiveLtorg(Loc);
3897 if (IDVal == ".unreq")
3898 return parseDirectiveUnreq(DirectiveID.getLoc());
3900 return parseDirectiveLOH(IDVal, Loc);
3903 /// parseDirectiveWord
3904 /// ::= .word [ expression (, expression)* ]
3905 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3906 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3908 const MCExpr *Value;
3909 if (getParser().parseExpression(Value))
3912 getParser().getStreamer().EmitValue(Value, Size);
3914 if (getLexer().is(AsmToken::EndOfStatement))
3917 // FIXME: Improve diagnostic.
3918 if (getLexer().isNot(AsmToken::Comma))
3919 return Error(L, "unexpected token in directive");
3928 // parseDirectiveTLSDescCall:
3929 // ::= .tlsdesccall symbol
3930 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3932 if (getParser().parseIdentifier(Name))
3933 return Error(L, "expected symbol after directive");
3935 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3936 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3937 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3940 Inst.setOpcode(AArch64::TLSDESCCALL);
3941 Inst.addOperand(MCOperand::CreateExpr(Expr));
3943 getParser().getStreamer().EmitInstruction(Inst, STI);
3947 /// ::= .loh <lohName | lohId> label1, ..., labelN
3948 /// The number of arguments depends on the loh identifier.
3949 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3950 if (IDVal != MCLOHDirectiveName())
3953 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3954 if (getParser().getTok().isNot(AsmToken::Integer))
3955 return TokError("expected an identifier or a number in directive");
3956 // We successfully get a numeric value for the identifier.
3957 // Check if it is valid.
3958 int64_t Id = getParser().getTok().getIntVal();
3959 Kind = (MCLOHType)Id;
3960 // Check that Id does not overflow MCLOHType.
3961 if (!isValidMCLOHType(Kind) || Id != Kind)
3962 return TokError("invalid numeric identifier in directive");
3964 StringRef Name = getTok().getIdentifier();
3965 // We successfully parse an identifier.
3966 // Check if it is a recognized one.
3967 int Id = MCLOHNameToId(Name);
3970 return TokError("invalid identifier in directive");
3971 Kind = (MCLOHType)Id;
3973 // Consume the identifier.
3975 // Get the number of arguments of this LOH.
3976 int NbArgs = MCLOHIdToNbArgs(Kind);
3978 assert(NbArgs != -1 && "Invalid number of arguments");
3980 SmallVector<MCSymbol *, 3> Args;
3981 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3983 if (getParser().parseIdentifier(Name))
3984 return TokError("expected identifier in directive");
3985 Args.push_back(getContext().GetOrCreateSymbol(Name));
3987 if (Idx + 1 == NbArgs)
3989 if (getLexer().isNot(AsmToken::Comma))
3990 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3993 if (getLexer().isNot(AsmToken::EndOfStatement))
3994 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3996 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4000 /// parseDirectiveLtorg
4001 /// ::= .ltorg | .pool
4002 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4003 getTargetStreamer().emitCurrentConstantPool();
4007 /// parseDirectiveReq
4008 /// ::= name .req registername
4009 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4010 Parser.Lex(); // Eat the '.req' token.
4011 SMLoc SRegLoc = getLoc();
4012 unsigned RegNum = tryParseRegister();
4013 bool IsVector = false;
4015 if (RegNum == static_cast<unsigned>(-1)) {
4017 RegNum = tryMatchVectorRegister(Kind, false);
4018 if (!Kind.empty()) {
4019 Error(SRegLoc, "vector register without type specifier expected");
4025 if (RegNum == static_cast<unsigned>(-1)) {
4026 Parser.eatToEndOfStatement();
4027 Error(SRegLoc, "register name or alias expected");
4031 // Shouldn't be anything else.
4032 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4033 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4034 Parser.eatToEndOfStatement();
4038 Parser.Lex(); // Consume the EndOfStatement
4040 auto pair = std::make_pair(IsVector, RegNum);
4041 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4042 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4047 /// parseDirectiveUneq
4048 /// ::= .unreq registername
4049 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4050 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4051 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4052 Parser.eatToEndOfStatement();
4055 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4056 Parser.Lex(); // Eat the identifier.
4061 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4062 AArch64MCExpr::VariantKind &ELFRefKind,
4063 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4065 ELFRefKind = AArch64MCExpr::VK_INVALID;
4066 DarwinRefKind = MCSymbolRefExpr::VK_None;
4069 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4070 ELFRefKind = AE->getKind();
4071 Expr = AE->getSubExpr();
4074 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4076 // It's a simple symbol reference with no addend.
4077 DarwinRefKind = SE->getKind();
4081 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4085 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4088 DarwinRefKind = SE->getKind();
4090 if (BE->getOpcode() != MCBinaryExpr::Add &&
4091 BE->getOpcode() != MCBinaryExpr::Sub)
4094 // See if the addend is is a constant, otherwise there's more going
4095 // on here than we can deal with.
4096 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4100 Addend = AddendExpr->getValue();
4101 if (BE->getOpcode() == MCBinaryExpr::Sub)
4104 // It's some symbol reference + a constant addend, but really
4105 // shouldn't use both Darwin and ELF syntax.
4106 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4107 DarwinRefKind == MCSymbolRefExpr::VK_None;
4110 /// Force static initialization.
4111 extern "C" void LLVMInitializeAArch64AsmParser() {
4112 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4113 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4115 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4116 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4119 #define GET_REGISTER_MATCHER
4120 #define GET_SUBTARGET_FEATURE_NAME
4121 #define GET_MATCHER_IMPLEMENTATION
4122 #include "AArch64GenAsmMatcher.inc"
4124 // Define this matcher function after the auto-generated include so we
4125 // have the match class enum definitions.
4126 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4128 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4129 // If the kind is a token for a literal immediate, check if our asm
4130 // operand matches. This is for InstAliases which have a fixed-value
4131 // immediate in the syntax.
4132 int64_t ExpectedVal;
4135 return Match_InvalidOperand;
4177 return Match_InvalidOperand;
4178 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4180 return Match_InvalidOperand;
4181 if (CE->getValue() == ExpectedVal)
4182 return Match_Success;
4183 return Match_InvalidOperand;