1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 // Map of register aliases registers via the .req directive.
47 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49 AArch64TargetStreamer &getTargetStreamer() {
50 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
51 return static_cast<AArch64TargetStreamer &>(TS);
54 MCAsmParser &getParser() const { return Parser; }
55 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
57 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
59 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63 int tryParseRegister();
64 int tryMatchVectorRegister(StringRef &Kind, bool expected);
65 bool parseRegister(OperandVector &Operands);
66 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67 bool parseVectorList(OperandVector &Operands);
68 bool parseOperand(OperandVector &Operands, bool isCondCode,
71 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
72 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
73 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
120 MCAsmParserExtension::Initialize(_Parser);
121 if (Parser.getStreamer().getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(Parser.getStreamer());
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
212 uint64_t FeatureBits; // We need to pass through information about which
213 // core we are compiling for so that the SysReg
214 // Mappers can appropriately conditionalize.
225 struct ShiftExtendOp {
226 AArch64_AM::ShiftExtendType Type;
228 bool HasExplicitAmount;
238 struct VectorListOp VectorList;
239 struct VectorIndexOp VectorIndex;
241 struct ShiftedImmOp ShiftedImm;
242 struct CondCodeOp CondCode;
243 struct FPImmOp FPImm;
244 struct BarrierOp Barrier;
245 struct SysRegOp SysReg;
246 struct SysCRImmOp SysCRImm;
247 struct PrefetchOp Prefetch;
248 struct ShiftExtendOp ShiftExtend;
251 // Keep the MCContext around as the MCExprs may need manipulated during
252 // the add<>Operands() calls.
256 AArch64Operand(KindTy K, MCContext &_Ctx)
257 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 StartLoc = o.StartLoc;
271 ShiftedImm = o.ShiftedImm;
274 CondCode = o.CondCode;
286 VectorList = o.VectorList;
289 VectorIndex = o.VectorIndex;
295 SysCRImm = o.SysCRImm;
298 Prefetch = o.Prefetch;
301 ShiftExtend = o.ShiftExtend;
306 /// getStartLoc - Get the location of the first token of this operand.
307 SMLoc getStartLoc() const override { return StartLoc; }
308 /// getEndLoc - Get the location of the last token of this operand.
309 SMLoc getEndLoc() const override { return EndLoc; }
311 StringRef getToken() const {
312 assert(Kind == k_Token && "Invalid access!");
313 return StringRef(Tok.Data, Tok.Length);
316 bool isTokenSuffix() const {
317 assert(Kind == k_Token && "Invalid access!");
321 const MCExpr *getImm() const {
322 assert(Kind == k_Immediate && "Invalid access!");
326 const MCExpr *getShiftedImmVal() const {
327 assert(Kind == k_ShiftedImm && "Invalid access!");
328 return ShiftedImm.Val;
331 unsigned getShiftedImmShift() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.ShiftAmount;
336 AArch64CC::CondCode getCondCode() const {
337 assert(Kind == k_CondCode && "Invalid access!");
338 return CondCode.Code;
341 unsigned getFPImm() const {
342 assert(Kind == k_FPImm && "Invalid access!");
346 unsigned getBarrier() const {
347 assert(Kind == k_Barrier && "Invalid access!");
351 unsigned getReg() const override {
352 assert(Kind == k_Register && "Invalid access!");
356 unsigned getVectorListStart() const {
357 assert(Kind == k_VectorList && "Invalid access!");
358 return VectorList.RegNum;
361 unsigned getVectorListCount() const {
362 assert(Kind == k_VectorList && "Invalid access!");
363 return VectorList.Count;
366 unsigned getVectorIndex() const {
367 assert(Kind == k_VectorIndex && "Invalid access!");
368 return VectorIndex.Val;
371 StringRef getSysReg() const {
372 assert(Kind == k_SysReg && "Invalid access!");
373 return StringRef(SysReg.Data, SysReg.Length);
376 uint64_t getSysRegFeatureBits() const {
377 assert(Kind == k_SysReg && "Invalid access!");
378 return SysReg.FeatureBits;
381 unsigned getSysCR() const {
382 assert(Kind == k_SysCR && "Invalid access!");
386 unsigned getPrefetch() const {
387 assert(Kind == k_Prefetch && "Invalid access!");
391 AArch64_AM::ShiftExtendType getShiftExtendType() const {
392 assert(Kind == k_ShiftExtend && "Invalid access!");
393 return ShiftExtend.Type;
396 unsigned getShiftExtendAmount() const {
397 assert(Kind == k_ShiftExtend && "Invalid access!");
398 return ShiftExtend.Amount;
401 bool hasShiftExtendAmount() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.HasExplicitAmount;
406 bool isImm() const override { return Kind == k_Immediate; }
407 bool isMem() const override { return false; }
408 bool isSImm9() const {
411 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
414 int64_t Val = MCE->getValue();
415 return (Val >= -256 && Val < 256);
417 bool isSImm7s4() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 bool isSImm7s8() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 bool isSImm7s16() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
445 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
446 AArch64MCExpr::VariantKind ELFRefKind;
447 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 // If we don't understand the expression, assume the best and
452 // let the fixup and relocation code deal with it.
456 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
457 ELFRefKind == AArch64MCExpr::VK_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
463 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
465 // Note that we don't range-check the addend. It's adjusted modulo page
466 // size when converted, so there is no "out of range" condition when using
468 return Addend >= 0 && (Addend % Scale) == 0;
469 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
470 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
471 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
478 template <int Scale> bool isUImm12Offset() const {
482 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 return isSymbolicUImm12Offset(getImm(), Scale);
486 int64_t Val = MCE->getValue();
487 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
490 bool isImm0_7() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 0 && Val < 8);
499 bool isImm1_8() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val > 0 && Val < 9);
508 bool isImm0_15() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 16);
517 bool isImm1_16() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val > 0 && Val < 17);
526 bool isImm0_31() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val >= 0 && Val < 32);
535 bool isImm1_31() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val >= 1 && Val < 32);
544 bool isImm1_32() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 1 && Val < 33);
553 bool isImm0_63() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 0 && Val < 64);
562 bool isImm1_63() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 1 && Val < 64);
571 bool isImm1_64() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 1 && Val < 65);
580 bool isImm0_127() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 0 && Val < 128);
589 bool isImm0_255() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 0 && Val < 256);
598 bool isImm0_65535() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 65536);
607 bool isImm32_63() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 32 && Val < 64);
616 bool isLogicalImm32() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
626 return AArch64_AM::isLogicalImmediate(Val, 32);
628 bool isLogicalImm64() const {
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
634 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
636 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
637 bool isAddSubImm() const {
638 if (!isShiftedImm() && !isImm())
643 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
644 if (isShiftedImm()) {
645 unsigned Shift = ShiftedImm.ShiftAmount;
646 Expr = ShiftedImm.Val;
647 if (Shift != 0 && Shift != 12)
653 AArch64MCExpr::VariantKind ELFRefKind;
654 MCSymbolRefExpr::VariantKind DarwinRefKind;
656 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
657 DarwinRefKind, Addend)) {
658 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
659 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
660 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
661 || ELFRefKind == AArch64MCExpr::VK_LO12
662 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
663 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
664 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
665 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
666 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
667 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
668 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
671 // Otherwise it should be a real immediate in range:
672 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
673 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
675 bool isCondCode() const { return Kind == k_CondCode; }
676 bool isSIMDImmType10() const {
679 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
682 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
684 bool isBranchTarget26() const {
687 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
690 int64_t Val = MCE->getValue();
693 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
695 bool isPCRelLabel19() const {
698 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
701 int64_t Val = MCE->getValue();
704 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
706 bool isBranchTarget14() const {
709 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
712 int64_t Val = MCE->getValue();
715 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
719 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
723 AArch64MCExpr::VariantKind ELFRefKind;
724 MCSymbolRefExpr::VariantKind DarwinRefKind;
726 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
727 DarwinRefKind, Addend)) {
730 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
733 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
734 if (ELFRefKind == AllowedModifiers[i])
741 bool isMovZSymbolG3() const {
742 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
743 return isMovWSymbol(Variants);
746 bool isMovZSymbolG2() const {
747 static AArch64MCExpr::VariantKind Variants[] = {
748 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
749 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
750 return isMovWSymbol(Variants);
753 bool isMovZSymbolG1() const {
754 static AArch64MCExpr::VariantKind Variants[] = {
755 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
756 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
757 AArch64MCExpr::VK_DTPREL_G1,
759 return isMovWSymbol(Variants);
762 bool isMovZSymbolG0() const {
763 static AArch64MCExpr::VariantKind Variants[] = {
764 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
765 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
766 return isMovWSymbol(Variants);
769 bool isMovKSymbolG3() const {
770 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
771 return isMovWSymbol(Variants);
774 bool isMovKSymbolG2() const {
775 static AArch64MCExpr::VariantKind Variants[] = {
776 AArch64MCExpr::VK_ABS_G2_NC};
777 return isMovWSymbol(Variants);
780 bool isMovKSymbolG1() const {
781 static AArch64MCExpr::VariantKind Variants[] = {
782 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
783 AArch64MCExpr::VK_DTPREL_G1_NC
785 return isMovWSymbol(Variants);
788 bool isMovKSymbolG0() const {
789 static AArch64MCExpr::VariantKind Variants[] = {
790 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
791 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
793 return isMovWSymbol(Variants);
796 template<int RegWidth, int Shift>
797 bool isMOVZMovAlias() const {
798 if (!isImm()) return false;
800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
801 if (!CE) return false;
802 uint64_t Value = CE->getValue();
805 Value &= 0xffffffffULL;
807 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
808 if (Value == 0 && Shift != 0)
811 return (Value & ~(0xffffULL << Shift)) == 0;
814 template<int RegWidth, int Shift>
815 bool isMOVNMovAlias() const {
816 if (!isImm()) return false;
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
822 // MOVZ takes precedence over MOVN.
823 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
824 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
829 Value &= 0xffffffffULL;
831 return (Value & ~(0xffffULL << Shift)) == 0;
834 bool isFPImm() const { return Kind == k_FPImm; }
835 bool isBarrier() const { return Kind == k_Barrier; }
836 bool isSysReg() const { return Kind == k_SysReg; }
837 bool isMRSSystemRegister() const {
838 if (!isSysReg()) return false;
840 bool IsKnownRegister;
841 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
842 Mapper.fromString(getSysReg(), IsKnownRegister);
844 return IsKnownRegister;
846 bool isMSRSystemRegister() const {
847 if (!isSysReg()) return false;
849 bool IsKnownRegister;
850 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
851 Mapper.fromString(getSysReg(), IsKnownRegister);
853 return IsKnownRegister;
855 bool isSystemPStateField() const {
856 if (!isSysReg()) return false;
858 bool IsKnownRegister;
859 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
861 return IsKnownRegister;
863 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
864 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
865 bool isVectorRegLo() const {
866 return Kind == k_Register && Reg.isVector &&
867 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
870 bool isGPR32as64() const {
871 return Kind == k_Register && !Reg.isVector &&
872 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
875 bool isGPR64sp0() const {
876 return Kind == k_Register && !Reg.isVector &&
877 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
880 /// Is this a vector list with the type implicit (presumably attached to the
881 /// instruction itself)?
882 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
883 return Kind == k_VectorList && VectorList.Count == NumRegs &&
884 !VectorList.ElementKind;
887 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
888 bool isTypedVectorList() const {
889 if (Kind != k_VectorList)
891 if (VectorList.Count != NumRegs)
893 if (VectorList.ElementKind != ElementKind)
895 return VectorList.NumElements == NumElements;
898 bool isVectorIndex1() const {
899 return Kind == k_VectorIndex && VectorIndex.Val == 1;
901 bool isVectorIndexB() const {
902 return Kind == k_VectorIndex && VectorIndex.Val < 16;
904 bool isVectorIndexH() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 8;
907 bool isVectorIndexS() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 4;
910 bool isVectorIndexD() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 2;
913 bool isToken() const override { return Kind == k_Token; }
914 bool isTokenEqual(StringRef Str) const {
915 return Kind == k_Token && getToken() == Str;
917 bool isSysCR() const { return Kind == k_SysCR; }
918 bool isPrefetch() const { return Kind == k_Prefetch; }
919 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
920 bool isShifter() const {
921 if (!isShiftExtend())
924 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
925 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
926 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
927 ST == AArch64_AM::MSL);
929 bool isExtend() const {
930 if (!isShiftExtend())
933 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
934 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
935 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
936 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
937 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
938 ET == AArch64_AM::LSL) &&
939 getShiftExtendAmount() <= 4;
942 bool isExtend64() const {
945 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
946 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
947 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
949 bool isExtendLSL64() const {
952 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
953 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
954 ET == AArch64_AM::LSL) &&
955 getShiftExtendAmount() <= 4;
958 template<int Width> bool isMemXExtend() const {
961 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
962 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
963 (getShiftExtendAmount() == Log2_32(Width / 8) ||
964 getShiftExtendAmount() == 0);
967 template<int Width> bool isMemWExtend() const {
970 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
971 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
972 (getShiftExtendAmount() == Log2_32(Width / 8) ||
973 getShiftExtendAmount() == 0);
976 template <unsigned width>
977 bool isArithmeticShifter() const {
981 // An arithmetic shifter is LSL, LSR, or ASR.
982 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
983 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
984 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
987 template <unsigned width>
988 bool isLogicalShifter() const {
992 // A logical shifter is LSL, LSR, ASR or ROR.
993 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
994 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
995 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
996 getShiftExtendAmount() < width;
999 bool isMovImm32Shifter() const {
1003 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1004 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1005 if (ST != AArch64_AM::LSL)
1007 uint64_t Val = getShiftExtendAmount();
1008 return (Val == 0 || Val == 16);
1011 bool isMovImm64Shifter() const {
1015 // A MOVi shifter is LSL of 0 or 16.
1016 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1017 if (ST != AArch64_AM::LSL)
1019 uint64_t Val = getShiftExtendAmount();
1020 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1023 bool isLogicalVecShifter() const {
1027 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1028 unsigned Shift = getShiftExtendAmount();
1029 return getShiftExtendType() == AArch64_AM::LSL &&
1030 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1033 bool isLogicalVecHalfWordShifter() const {
1034 if (!isLogicalVecShifter())
1037 // A logical vector shifter is a left shift by 0 or 8.
1038 unsigned Shift = getShiftExtendAmount();
1039 return getShiftExtendType() == AArch64_AM::LSL &&
1040 (Shift == 0 || Shift == 8);
1043 bool isMoveVecShifter() const {
1044 if (!isShiftExtend())
1047 // A logical vector shifter is a left shift by 8 or 16.
1048 unsigned Shift = getShiftExtendAmount();
1049 return getShiftExtendType() == AArch64_AM::MSL &&
1050 (Shift == 8 || Shift == 16);
1053 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1054 // to LDUR/STUR when the offset is not legal for the former but is for
1055 // the latter. As such, in addition to checking for being a legal unscaled
1056 // address, also check that it is not a legal scaled address. This avoids
1057 // ambiguity in the matcher.
1059 bool isSImm9OffsetFB() const {
1060 return isSImm9() && !isUImm12Offset<Width / 8>();
1063 bool isAdrpLabel() const {
1064 // Validation was handled during parsing, so we just sanity check that
1065 // something didn't go haywire.
1069 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1070 int64_t Val = CE->getValue();
1071 int64_t Min = - (4096 * (1LL << (21 - 1)));
1072 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1073 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1079 bool isAdrLabel() const {
1080 // Validation was handled during parsing, so we just sanity check that
1081 // something didn't go haywire.
1085 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1086 int64_t Val = CE->getValue();
1087 int64_t Min = - (1LL << (21 - 1));
1088 int64_t Max = ((1LL << (21 - 1)) - 1);
1089 return Val >= Min && Val <= Max;
1095 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1096 // Add as immediates when possible. Null MCExpr = 0.
1098 Inst.addOperand(MCOperand::CreateImm(0));
1099 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1100 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1102 Inst.addOperand(MCOperand::CreateExpr(Expr));
1105 void addRegOperands(MCInst &Inst, unsigned N) const {
1106 assert(N == 1 && "Invalid number of operands!");
1107 Inst.addOperand(MCOperand::CreateReg(getReg()));
1110 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1111 assert(N == 1 && "Invalid number of operands!");
1113 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1115 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1116 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1117 RI->getEncodingValue(getReg()));
1119 Inst.addOperand(MCOperand::CreateReg(Reg));
1122 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1125 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1126 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1129 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1130 assert(N == 1 && "Invalid number of operands!");
1132 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1133 Inst.addOperand(MCOperand::CreateReg(getReg()));
1136 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1137 assert(N == 1 && "Invalid number of operands!");
1138 Inst.addOperand(MCOperand::CreateReg(getReg()));
1141 template <unsigned NumRegs>
1142 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1143 assert(N == 1 && "Invalid number of operands!");
1144 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1145 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1146 unsigned FirstReg = FirstRegs[NumRegs - 1];
1149 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1152 template <unsigned NumRegs>
1153 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1156 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1157 unsigned FirstReg = FirstRegs[NumRegs - 1];
1160 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1163 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1164 assert(N == 1 && "Invalid number of operands!");
1165 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1168 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1170 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1173 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1175 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1178 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!");
1180 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1183 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1184 assert(N == 1 && "Invalid number of operands!");
1185 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1188 void addImmOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 // If this is a pageoff symrefexpr with an addend, adjust the addend
1191 // to be only the page-offset portion. Otherwise, just add the expr
1193 addExpr(Inst, getImm());
1196 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 2 && "Invalid number of operands!");
1198 if (isShiftedImm()) {
1199 addExpr(Inst, getShiftedImmVal());
1200 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1202 addExpr(Inst, getImm());
1203 Inst.addOperand(MCOperand::CreateImm(0));
1207 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1208 assert(N == 1 && "Invalid number of operands!");
1209 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1212 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1216 addExpr(Inst, getImm());
1218 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1221 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1222 addImmOperands(Inst, N);
1226 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1231 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1234 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1237 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1240 assert(MCE && "Invalid constant immediate operand!");
1241 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1244 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1247 assert(MCE && "Invalid constant immediate operand!");
1248 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1251 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1252 assert(N == 1 && "Invalid number of operands!");
1253 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1254 assert(MCE && "Invalid constant immediate operand!");
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1261 assert(MCE && "Invalid constant immediate operand!");
1262 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1265 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1266 assert(N == 1 && "Invalid number of operands!");
1267 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1268 assert(MCE && "Invalid constant immediate operand!");
1269 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1272 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1275 assert(MCE && "Invalid constant immediate operand!");
1276 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1279 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1280 assert(N == 1 && "Invalid number of operands!");
1281 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1282 assert(MCE && "Invalid constant immediate operand!");
1283 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1286 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1287 assert(N == 1 && "Invalid number of operands!");
1288 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1289 assert(MCE && "Invalid constant immediate operand!");
1290 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1293 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1294 assert(N == 1 && "Invalid number of operands!");
1295 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1296 assert(MCE && "Invalid constant immediate operand!");
1297 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1300 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1303 assert(MCE && "Invalid constant immediate operand!");
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1307 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1310 assert(MCE && "Invalid constant immediate operand!");
1311 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1314 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1315 assert(N == 1 && "Invalid number of operands!");
1316 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1317 assert(MCE && "Invalid constant immediate operand!");
1318 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1321 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1324 assert(MCE && "Invalid constant immediate operand!");
1325 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1328 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1331 assert(MCE && "Invalid constant immediate operand!");
1332 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1335 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1336 assert(N == 1 && "Invalid number of operands!");
1337 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1338 assert(MCE && "Invalid constant immediate operand!");
1339 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1342 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1343 assert(N == 1 && "Invalid number of operands!");
1344 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1345 assert(MCE && "Invalid constant immediate operand!");
1346 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1349 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1352 assert(MCE && "Invalid constant immediate operand!");
1353 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1356 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1359 assert(MCE && "Invalid constant immediate operand!");
1360 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1363 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1366 assert(MCE && "Invalid logical immediate operand!");
1368 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1372 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1375 assert(MCE && "Invalid logical immediate operand!");
1376 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::CreateImm(encoding));
1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1383 assert(MCE && "Invalid immediate operand!");
1384 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1385 Inst.addOperand(MCOperand::CreateImm(encoding));
1388 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1389 // Branch operands don't encode the low bits, so shift them off
1390 // here. If it's a label, however, just put it on directly as there's
1391 // not enough information now to do anything.
1392 assert(N == 1 && "Invalid number of operands!");
1393 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1395 addExpr(Inst, getImm());
1398 assert(MCE && "Invalid constant immediate operand!");
1399 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1402 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1409 addExpr(Inst, getImm());
1412 assert(MCE && "Invalid constant immediate operand!");
1413 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1416 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!");
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1423 addExpr(Inst, getImm());
1426 assert(MCE && "Invalid constant immediate operand!");
1427 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1430 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1432 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1435 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1437 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1440 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1444 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1445 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1447 Inst.addOperand(MCOperand::CreateImm(Bits));
1450 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1454 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1455 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1457 Inst.addOperand(MCOperand::CreateImm(Bits));
1460 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 1 && "Invalid number of operands!");
1465 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1467 Inst.addOperand(MCOperand::CreateImm(Bits));
1470 void addSysCROperands(MCInst &Inst, unsigned N) const {
1471 assert(N == 1 && "Invalid number of operands!");
1472 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1475 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1477 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1480 void addShifterOperands(MCInst &Inst, unsigned N) const {
1481 assert(N == 1 && "Invalid number of operands!");
1483 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1484 Inst.addOperand(MCOperand::CreateImm(Imm));
1487 void addExtendOperands(MCInst &Inst, unsigned N) const {
1488 assert(N == 1 && "Invalid number of operands!");
1489 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1490 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1491 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1492 Inst.addOperand(MCOperand::CreateImm(Imm));
1495 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1496 assert(N == 1 && "Invalid number of operands!");
1497 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1498 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1499 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1500 Inst.addOperand(MCOperand::CreateImm(Imm));
1503 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 2 && "Invalid number of operands!");
1505 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1506 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1507 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1508 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1511 // For 8-bit load/store instructions with a register offset, both the
1512 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1513 // they're disambiguated by whether the shift was explicit or implicit rather
1515 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 2 && "Invalid number of operands!");
1517 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1518 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1519 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1520 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1524 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1525 assert(N == 1 && "Invalid number of operands!");
1527 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1528 uint64_t Value = CE->getValue();
1529 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1533 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1534 assert(N == 1 && "Invalid number of operands!");
1536 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1537 uint64_t Value = CE->getValue();
1538 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1541 void print(raw_ostream &OS) const override;
1543 static std::unique_ptr<AArch64Operand>
1544 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1545 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1546 Op->Tok.Data = Str.data();
1547 Op->Tok.Length = Str.size();
1548 Op->Tok.IsSuffix = IsSuffix;
1554 static std::unique_ptr<AArch64Operand>
1555 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1556 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1557 Op->Reg.RegNum = RegNum;
1558 Op->Reg.isVector = isVector;
1564 static std::unique_ptr<AArch64Operand>
1565 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1566 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1567 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1568 Op->VectorList.RegNum = RegNum;
1569 Op->VectorList.Count = Count;
1570 Op->VectorList.NumElements = NumElements;
1571 Op->VectorList.ElementKind = ElementKind;
1577 static std::unique_ptr<AArch64Operand>
1578 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1579 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1580 Op->VectorIndex.Val = Idx;
1586 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1587 SMLoc E, MCContext &Ctx) {
1588 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1595 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1596 unsigned ShiftAmount,
1599 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1600 Op->ShiftedImm .Val = Val;
1601 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1607 static std::unique_ptr<AArch64Operand>
1608 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1609 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1610 Op->CondCode.Code = Code;
1616 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1618 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1619 Op->FPImm.Val = Val;
1625 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1627 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1628 Op->Barrier.Val = Val;
1634 static std::unique_ptr<AArch64Operand>
1635 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1636 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1637 Op->SysReg.Data = Str.data();
1638 Op->SysReg.Length = Str.size();
1639 Op->SysReg.FeatureBits = FeatureBits;
1645 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1646 SMLoc E, MCContext &Ctx) {
1647 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1648 Op->SysCRImm.Val = Val;
1654 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1656 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1657 Op->Prefetch.Val = Val;
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1686 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1688 OS << "<barrier " << Name << ">";
1690 OS << "<barrier invalid #" << getBarrier() << ">";
1694 getImm()->print(OS);
1696 case k_ShiftedImm: {
1697 unsigned Shift = getShiftedImmShift();
1698 OS << "<shiftedimm ";
1699 getShiftedImmVal()->print(OS);
1700 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1704 OS << "<condcode " << getCondCode() << ">";
1707 OS << "<register " << getReg() << ">";
1709 case k_VectorList: {
1710 OS << "<vectorlist ";
1711 unsigned Reg = getVectorListStart();
1712 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1713 OS << Reg + i << " ";
1718 OS << "<vectorindex " << getVectorIndex() << ">";
1721 OS << "<sysreg: " << getSysReg() << '>';
1724 OS << "'" << getToken() << "'";
1727 OS << "c" << getSysCR();
1731 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1733 OS << "<prfop " << Name << ">";
1735 OS << "<prfop invalid #" << getPrefetch() << ">";
1738 case k_ShiftExtend: {
1739 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1740 << getShiftExtendAmount();
1741 if (!hasShiftExtendAmount())
1749 /// @name Auto-generated Match Functions
1752 static unsigned MatchRegisterName(StringRef Name);
1756 static unsigned matchVectorRegName(StringRef Name) {
1757 return StringSwitch<unsigned>(Name)
1758 .Case("v0", AArch64::Q0)
1759 .Case("v1", AArch64::Q1)
1760 .Case("v2", AArch64::Q2)
1761 .Case("v3", AArch64::Q3)
1762 .Case("v4", AArch64::Q4)
1763 .Case("v5", AArch64::Q5)
1764 .Case("v6", AArch64::Q6)
1765 .Case("v7", AArch64::Q7)
1766 .Case("v8", AArch64::Q8)
1767 .Case("v9", AArch64::Q9)
1768 .Case("v10", AArch64::Q10)
1769 .Case("v11", AArch64::Q11)
1770 .Case("v12", AArch64::Q12)
1771 .Case("v13", AArch64::Q13)
1772 .Case("v14", AArch64::Q14)
1773 .Case("v15", AArch64::Q15)
1774 .Case("v16", AArch64::Q16)
1775 .Case("v17", AArch64::Q17)
1776 .Case("v18", AArch64::Q18)
1777 .Case("v19", AArch64::Q19)
1778 .Case("v20", AArch64::Q20)
1779 .Case("v21", AArch64::Q21)
1780 .Case("v22", AArch64::Q22)
1781 .Case("v23", AArch64::Q23)
1782 .Case("v24", AArch64::Q24)
1783 .Case("v25", AArch64::Q25)
1784 .Case("v26", AArch64::Q26)
1785 .Case("v27", AArch64::Q27)
1786 .Case("v28", AArch64::Q28)
1787 .Case("v29", AArch64::Q29)
1788 .Case("v30", AArch64::Q30)
1789 .Case("v31", AArch64::Q31)
1793 static bool isValidVectorKind(StringRef Name) {
1794 return StringSwitch<bool>(Name.lower())
1804 // Accept the width neutral ones, too, for verbose syntax. If those
1805 // aren't used in the right places, the token operand won't match so
1806 // all will work out.
1814 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1815 char &ElementKind) {
1816 assert(isValidVectorKind(Name));
1818 ElementKind = Name.lower()[Name.size() - 1];
1821 if (Name.size() == 2)
1824 // Parse the lane count
1825 Name = Name.drop_front();
1826 while (isdigit(Name.front())) {
1827 NumElements = 10 * NumElements + (Name.front() - '0');
1828 Name = Name.drop_front();
1832 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1834 StartLoc = getLoc();
1835 RegNo = tryParseRegister();
1836 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1837 return (RegNo == (unsigned)-1);
1840 // Matches a register name or register alias previously defined by '.req'
1841 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1843 unsigned RegNum = isVector ? matchVectorRegName(Name)
1844 : MatchRegisterName(Name);
1847 // Check for aliases registered via .req. Canonicalize to lower case.
1848 // That's more consistent since register names are case insensitive, and
1849 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1850 auto Entry = RegisterReqs.find(Name.lower());
1851 if (Entry == RegisterReqs.end())
1853 // set RegNum if the match is the right kind of register
1854 if (isVector == Entry->getValue().first)
1855 RegNum = Entry->getValue().second;
1860 /// tryParseRegister - Try to parse a register name. The token must be an
1861 /// Identifier when called, and if it is a register name the token is eaten and
1862 /// the register is added to the operand list.
1863 int AArch64AsmParser::tryParseRegister() {
1864 const AsmToken &Tok = Parser.getTok();
1865 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1867 std::string lowerCase = Tok.getString().lower();
1868 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1869 // Also handle a few aliases of registers.
1871 RegNum = StringSwitch<unsigned>(lowerCase)
1872 .Case("fp", AArch64::FP)
1873 .Case("lr", AArch64::LR)
1874 .Case("x31", AArch64::XZR)
1875 .Case("w31", AArch64::WZR)
1881 Parser.Lex(); // Eat identifier token.
1885 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1886 /// kind specifier. If it is a register specifier, eat the token and return it.
1887 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1908 Parser.Lex(); // Eat the register token.
1913 TokError("vector register expected");
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1922 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1923 Error(S, "Expected cN operand where 0 <= N <= 15");
1924 return MatchOperand_ParseFail;
1927 StringRef Tok = Parser.getTok().getIdentifier();
1928 if (Tok[0] != 'c' && Tok[0] != 'C') {
1929 Error(S, "Expected cN operand where 0 <= N <= 15");
1930 return MatchOperand_ParseFail;
1934 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1935 if (BadNum || CRNum > 15) {
1936 Error(S, "Expected cN operand where 0 <= N <= 15");
1937 return MatchOperand_ParseFail;
1940 Parser.Lex(); // Eat identifier token.
1942 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1943 return MatchOperand_Success;
1946 /// tryParsePrefetch - Try to parse a prefetch operand.
1947 AArch64AsmParser::OperandMatchResultTy
1948 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 const AsmToken &Tok = Parser.getTok();
1951 // Either an identifier for named values or a 5-bit immediate.
1952 bool Hash = Tok.is(AsmToken::Hash);
1953 if (Hash || Tok.is(AsmToken::Integer)) {
1955 Parser.Lex(); // Eat hash token.
1956 const MCExpr *ImmVal;
1957 if (getParser().parseExpression(ImmVal))
1958 return MatchOperand_ParseFail;
1960 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1962 TokError("immediate value expected for prefetch operand");
1963 return MatchOperand_ParseFail;
1965 unsigned prfop = MCE->getValue();
1967 TokError("prefetch operand out of range, [0,31] expected");
1968 return MatchOperand_ParseFail;
1971 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1972 return MatchOperand_Success;
1975 if (Tok.isNot(AsmToken::Identifier)) {
1976 TokError("pre-fetch hint expected");
1977 return MatchOperand_ParseFail;
1981 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1983 TokError("pre-fetch hint expected");
1984 return MatchOperand_ParseFail;
1987 Parser.Lex(); // Eat identifier token.
1988 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1989 return MatchOperand_Success;
1992 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1994 AArch64AsmParser::OperandMatchResultTy
1995 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1999 if (Parser.getTok().is(AsmToken::Hash)) {
2000 Parser.Lex(); // Eat hash token.
2003 if (parseSymbolicImmVal(Expr))
2004 return MatchOperand_ParseFail;
2006 AArch64MCExpr::VariantKind ELFRefKind;
2007 MCSymbolRefExpr::VariantKind DarwinRefKind;
2009 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2010 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2011 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2012 // No modifier was specified at all; this is the syntax for an ELF basic
2013 // ADRP relocation (unfortunately).
2015 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2016 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2017 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2019 Error(S, "gotpage label reference not allowed an addend");
2020 return MatchOperand_ParseFail;
2021 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2022 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2023 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2024 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2025 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2026 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2027 // The operand must be an @page or @gotpage qualified symbolref.
2028 Error(S, "page or gotpage label reference expected");
2029 return MatchOperand_ParseFail;
2033 // We have either a label reference possibly with addend or an immediate. The
2034 // addend is a raw value here. The linker will adjust it to only reference the
2036 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2037 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2039 return MatchOperand_Success;
2042 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2044 AArch64AsmParser::OperandMatchResultTy
2045 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2049 if (Parser.getTok().is(AsmToken::Hash)) {
2050 Parser.Lex(); // Eat hash token.
2053 if (getParser().parseExpression(Expr))
2054 return MatchOperand_ParseFail;
2056 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2057 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2059 return MatchOperand_Success;
2062 /// tryParseFPImm - A floating point immediate expression operand.
2063 AArch64AsmParser::OperandMatchResultTy
2064 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2068 if (Parser.getTok().is(AsmToken::Hash)) {
2069 Parser.Lex(); // Eat '#'
2073 // Handle negation, as that still comes through as a separate token.
2074 bool isNegative = false;
2075 if (Parser.getTok().is(AsmToken::Minus)) {
2079 const AsmToken &Tok = Parser.getTok();
2080 if (Tok.is(AsmToken::Real)) {
2081 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2082 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2083 // If we had a '-' in front, toggle the sign bit.
2084 IntVal ^= (uint64_t)isNegative << 63;
2085 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2086 Parser.Lex(); // Eat the token.
2087 // Check for out of range values. As an exception, we let Zero through,
2088 // as we handle that special case in post-processing before matching in
2089 // order to use the zero register for it.
2090 if (Val == -1 && !RealVal.isZero()) {
2091 TokError("expected compatible register or floating-point constant");
2092 return MatchOperand_ParseFail;
2094 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2095 return MatchOperand_Success;
2097 if (Tok.is(AsmToken::Integer)) {
2099 if (!isNegative && Tok.getString().startswith("0x")) {
2100 Val = Tok.getIntVal();
2101 if (Val > 255 || Val < 0) {
2102 TokError("encoded floating point value out of range");
2103 return MatchOperand_ParseFail;
2106 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2107 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2108 // If we had a '-' in front, toggle the sign bit.
2109 IntVal ^= (uint64_t)isNegative << 63;
2110 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2112 Parser.Lex(); // Eat the token.
2113 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2114 return MatchOperand_Success;
2118 return MatchOperand_NoMatch;
2120 TokError("invalid floating point immediate");
2121 return MatchOperand_ParseFail;
2124 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2125 AArch64AsmParser::OperandMatchResultTy
2126 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2129 if (Parser.getTok().is(AsmToken::Hash))
2130 Parser.Lex(); // Eat '#'
2131 else if (Parser.getTok().isNot(AsmToken::Integer))
2132 // Operand should start from # or should be integer, emit error otherwise.
2133 return MatchOperand_NoMatch;
2136 if (parseSymbolicImmVal(Imm))
2137 return MatchOperand_ParseFail;
2138 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2139 uint64_t ShiftAmount = 0;
2140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2142 int64_t Val = MCE->getValue();
2143 if (Val > 0xfff && (Val & 0xfff) == 0) {
2144 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2148 SMLoc E = Parser.getTok().getLoc();
2149 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2151 return MatchOperand_Success;
2157 // The optional operand must be "lsl #N" where N is non-negative.
2158 if (!Parser.getTok().is(AsmToken::Identifier) ||
2159 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2160 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2161 return MatchOperand_ParseFail;
2167 if (Parser.getTok().is(AsmToken::Hash)) {
2171 if (Parser.getTok().isNot(AsmToken::Integer)) {
2172 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2173 return MatchOperand_ParseFail;
2176 int64_t ShiftAmount = Parser.getTok().getIntVal();
2178 if (ShiftAmount < 0) {
2179 Error(Parser.getTok().getLoc(), "positive shift amount required");
2180 return MatchOperand_ParseFail;
2182 Parser.Lex(); // Eat the number
2184 SMLoc E = Parser.getTok().getLoc();
2185 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2186 S, E, getContext()));
2187 return MatchOperand_Success;
2190 /// parseCondCodeString - Parse a Condition Code string.
2191 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2192 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2193 .Case("eq", AArch64CC::EQ)
2194 .Case("ne", AArch64CC::NE)
2195 .Case("cs", AArch64CC::HS)
2196 .Case("hs", AArch64CC::HS)
2197 .Case("cc", AArch64CC::LO)
2198 .Case("lo", AArch64CC::LO)
2199 .Case("mi", AArch64CC::MI)
2200 .Case("pl", AArch64CC::PL)
2201 .Case("vs", AArch64CC::VS)
2202 .Case("vc", AArch64CC::VC)
2203 .Case("hi", AArch64CC::HI)
2204 .Case("ls", AArch64CC::LS)
2205 .Case("ge", AArch64CC::GE)
2206 .Case("lt", AArch64CC::LT)
2207 .Case("gt", AArch64CC::GT)
2208 .Case("le", AArch64CC::LE)
2209 .Case("al", AArch64CC::AL)
2210 .Case("nv", AArch64CC::NV)
2211 .Default(AArch64CC::Invalid);
2215 /// parseCondCode - Parse a Condition Code operand.
2216 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2217 bool invertCondCode) {
2219 const AsmToken &Tok = Parser.getTok();
2220 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2222 StringRef Cond = Tok.getString();
2223 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2224 if (CC == AArch64CC::Invalid)
2225 return TokError("invalid condition code");
2226 Parser.Lex(); // Eat identifier token.
2228 if (invertCondCode) {
2229 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2230 return TokError("condition codes AL and NV are invalid for this instruction");
2231 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2235 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2239 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2240 /// them if present.
2241 AArch64AsmParser::OperandMatchResultTy
2242 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2243 const AsmToken &Tok = Parser.getTok();
2244 std::string LowerID = Tok.getString().lower();
2245 AArch64_AM::ShiftExtendType ShOp =
2246 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2247 .Case("lsl", AArch64_AM::LSL)
2248 .Case("lsr", AArch64_AM::LSR)
2249 .Case("asr", AArch64_AM::ASR)
2250 .Case("ror", AArch64_AM::ROR)
2251 .Case("msl", AArch64_AM::MSL)
2252 .Case("uxtb", AArch64_AM::UXTB)
2253 .Case("uxth", AArch64_AM::UXTH)
2254 .Case("uxtw", AArch64_AM::UXTW)
2255 .Case("uxtx", AArch64_AM::UXTX)
2256 .Case("sxtb", AArch64_AM::SXTB)
2257 .Case("sxth", AArch64_AM::SXTH)
2258 .Case("sxtw", AArch64_AM::SXTW)
2259 .Case("sxtx", AArch64_AM::SXTX)
2260 .Default(AArch64_AM::InvalidShiftExtend);
2262 if (ShOp == AArch64_AM::InvalidShiftExtend)
2263 return MatchOperand_NoMatch;
2265 SMLoc S = Tok.getLoc();
2268 bool Hash = getLexer().is(AsmToken::Hash);
2269 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2270 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2271 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2272 ShOp == AArch64_AM::MSL) {
2273 // We expect a number here.
2274 TokError("expected #imm after shift specifier");
2275 return MatchOperand_ParseFail;
2278 // "extend" type operatoins don't need an immediate, #0 is implicit.
2279 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2281 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2282 return MatchOperand_Success;
2286 Parser.Lex(); // Eat the '#'.
2288 // Make sure we do actually have a number
2289 if (!Parser.getTok().is(AsmToken::Integer)) {
2290 Error(Parser.getTok().getLoc(),
2291 "expected integer shift amount");
2292 return MatchOperand_ParseFail;
2295 const MCExpr *ImmVal;
2296 if (getParser().parseExpression(ImmVal))
2297 return MatchOperand_ParseFail;
2299 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2301 TokError("expected #imm after shift specifier");
2302 return MatchOperand_ParseFail;
2305 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2306 Operands.push_back(AArch64Operand::CreateShiftExtend(
2307 ShOp, MCE->getValue(), true, S, E, getContext()));
2308 return MatchOperand_Success;
2311 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2312 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2313 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2314 OperandVector &Operands) {
2315 if (Name.find('.') != StringRef::npos)
2316 return TokError("invalid operand");
2320 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2322 const AsmToken &Tok = Parser.getTok();
2323 StringRef Op = Tok.getString();
2324 SMLoc S = Tok.getLoc();
2326 const MCExpr *Expr = nullptr;
2328 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2330 Expr = MCConstantExpr::Create(op1, getContext()); \
2331 Operands.push_back( \
2332 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2333 Operands.push_back( \
2334 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2335 Operands.push_back( \
2336 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2337 Expr = MCConstantExpr::Create(op2, getContext()); \
2338 Operands.push_back( \
2339 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2342 if (Mnemonic == "ic") {
2343 if (!Op.compare_lower("ialluis")) {
2344 // SYS #0, C7, C1, #0
2345 SYS_ALIAS(0, 7, 1, 0);
2346 } else if (!Op.compare_lower("iallu")) {
2347 // SYS #0, C7, C5, #0
2348 SYS_ALIAS(0, 7, 5, 0);
2349 } else if (!Op.compare_lower("ivau")) {
2350 // SYS #3, C7, C5, #1
2351 SYS_ALIAS(3, 7, 5, 1);
2353 return TokError("invalid operand for IC instruction");
2355 } else if (Mnemonic == "dc") {
2356 if (!Op.compare_lower("zva")) {
2357 // SYS #3, C7, C4, #1
2358 SYS_ALIAS(3, 7, 4, 1);
2359 } else if (!Op.compare_lower("ivac")) {
2360 // SYS #3, C7, C6, #1
2361 SYS_ALIAS(0, 7, 6, 1);
2362 } else if (!Op.compare_lower("isw")) {
2363 // SYS #0, C7, C6, #2
2364 SYS_ALIAS(0, 7, 6, 2);
2365 } else if (!Op.compare_lower("cvac")) {
2366 // SYS #3, C7, C10, #1
2367 SYS_ALIAS(3, 7, 10, 1);
2368 } else if (!Op.compare_lower("csw")) {
2369 // SYS #0, C7, C10, #2
2370 SYS_ALIAS(0, 7, 10, 2);
2371 } else if (!Op.compare_lower("cvau")) {
2372 // SYS #3, C7, C11, #1
2373 SYS_ALIAS(3, 7, 11, 1);
2374 } else if (!Op.compare_lower("civac")) {
2375 // SYS #3, C7, C14, #1
2376 SYS_ALIAS(3, 7, 14, 1);
2377 } else if (!Op.compare_lower("cisw")) {
2378 // SYS #0, C7, C14, #2
2379 SYS_ALIAS(0, 7, 14, 2);
2381 return TokError("invalid operand for DC instruction");
2383 } else if (Mnemonic == "at") {
2384 if (!Op.compare_lower("s1e1r")) {
2385 // SYS #0, C7, C8, #0
2386 SYS_ALIAS(0, 7, 8, 0);
2387 } else if (!Op.compare_lower("s1e2r")) {
2388 // SYS #4, C7, C8, #0
2389 SYS_ALIAS(4, 7, 8, 0);
2390 } else if (!Op.compare_lower("s1e3r")) {
2391 // SYS #6, C7, C8, #0
2392 SYS_ALIAS(6, 7, 8, 0);
2393 } else if (!Op.compare_lower("s1e1w")) {
2394 // SYS #0, C7, C8, #1
2395 SYS_ALIAS(0, 7, 8, 1);
2396 } else if (!Op.compare_lower("s1e2w")) {
2397 // SYS #4, C7, C8, #1
2398 SYS_ALIAS(4, 7, 8, 1);
2399 } else if (!Op.compare_lower("s1e3w")) {
2400 // SYS #6, C7, C8, #1
2401 SYS_ALIAS(6, 7, 8, 1);
2402 } else if (!Op.compare_lower("s1e0r")) {
2403 // SYS #0, C7, C8, #3
2404 SYS_ALIAS(0, 7, 8, 2);
2405 } else if (!Op.compare_lower("s1e0w")) {
2406 // SYS #0, C7, C8, #3
2407 SYS_ALIAS(0, 7, 8, 3);
2408 } else if (!Op.compare_lower("s12e1r")) {
2409 // SYS #4, C7, C8, #4
2410 SYS_ALIAS(4, 7, 8, 4);
2411 } else if (!Op.compare_lower("s12e1w")) {
2412 // SYS #4, C7, C8, #5
2413 SYS_ALIAS(4, 7, 8, 5);
2414 } else if (!Op.compare_lower("s12e0r")) {
2415 // SYS #4, C7, C8, #6
2416 SYS_ALIAS(4, 7, 8, 6);
2417 } else if (!Op.compare_lower("s12e0w")) {
2418 // SYS #4, C7, C8, #7
2419 SYS_ALIAS(4, 7, 8, 7);
2421 return TokError("invalid operand for AT instruction");
2423 } else if (Mnemonic == "tlbi") {
2424 if (!Op.compare_lower("vmalle1is")) {
2425 // SYS #0, C8, C3, #0
2426 SYS_ALIAS(0, 8, 3, 0);
2427 } else if (!Op.compare_lower("alle2is")) {
2428 // SYS #4, C8, C3, #0
2429 SYS_ALIAS(4, 8, 3, 0);
2430 } else if (!Op.compare_lower("alle3is")) {
2431 // SYS #6, C8, C3, #0
2432 SYS_ALIAS(6, 8, 3, 0);
2433 } else if (!Op.compare_lower("vae1is")) {
2434 // SYS #0, C8, C3, #1
2435 SYS_ALIAS(0, 8, 3, 1);
2436 } else if (!Op.compare_lower("vae2is")) {
2437 // SYS #4, C8, C3, #1
2438 SYS_ALIAS(4, 8, 3, 1);
2439 } else if (!Op.compare_lower("vae3is")) {
2440 // SYS #6, C8, C3, #1
2441 SYS_ALIAS(6, 8, 3, 1);
2442 } else if (!Op.compare_lower("aside1is")) {
2443 // SYS #0, C8, C3, #2
2444 SYS_ALIAS(0, 8, 3, 2);
2445 } else if (!Op.compare_lower("vaae1is")) {
2446 // SYS #0, C8, C3, #3
2447 SYS_ALIAS(0, 8, 3, 3);
2448 } else if (!Op.compare_lower("alle1is")) {
2449 // SYS #4, C8, C3, #4
2450 SYS_ALIAS(4, 8, 3, 4);
2451 } else if (!Op.compare_lower("vale1is")) {
2452 // SYS #0, C8, C3, #5
2453 SYS_ALIAS(0, 8, 3, 5);
2454 } else if (!Op.compare_lower("vaale1is")) {
2455 // SYS #0, C8, C3, #7
2456 SYS_ALIAS(0, 8, 3, 7);
2457 } else if (!Op.compare_lower("vmalle1")) {
2458 // SYS #0, C8, C7, #0
2459 SYS_ALIAS(0, 8, 7, 0);
2460 } else if (!Op.compare_lower("alle2")) {
2461 // SYS #4, C8, C7, #0
2462 SYS_ALIAS(4, 8, 7, 0);
2463 } else if (!Op.compare_lower("vale2is")) {
2464 // SYS #4, C8, C3, #5
2465 SYS_ALIAS(4, 8, 3, 5);
2466 } else if (!Op.compare_lower("vale3is")) {
2467 // SYS #6, C8, C3, #5
2468 SYS_ALIAS(6, 8, 3, 5);
2469 } else if (!Op.compare_lower("alle3")) {
2470 // SYS #6, C8, C7, #0
2471 SYS_ALIAS(6, 8, 7, 0);
2472 } else if (!Op.compare_lower("vae1")) {
2473 // SYS #0, C8, C7, #1
2474 SYS_ALIAS(0, 8, 7, 1);
2475 } else if (!Op.compare_lower("vae2")) {
2476 // SYS #4, C8, C7, #1
2477 SYS_ALIAS(4, 8, 7, 1);
2478 } else if (!Op.compare_lower("vae3")) {
2479 // SYS #6, C8, C7, #1
2480 SYS_ALIAS(6, 8, 7, 1);
2481 } else if (!Op.compare_lower("aside1")) {
2482 // SYS #0, C8, C7, #2
2483 SYS_ALIAS(0, 8, 7, 2);
2484 } else if (!Op.compare_lower("vaae1")) {
2485 // SYS #0, C8, C7, #3
2486 SYS_ALIAS(0, 8, 7, 3);
2487 } else if (!Op.compare_lower("alle1")) {
2488 // SYS #4, C8, C7, #4
2489 SYS_ALIAS(4, 8, 7, 4);
2490 } else if (!Op.compare_lower("vale1")) {
2491 // SYS #0, C8, C7, #5
2492 SYS_ALIAS(0, 8, 7, 5);
2493 } else if (!Op.compare_lower("vale2")) {
2494 // SYS #4, C8, C7, #5
2495 SYS_ALIAS(4, 8, 7, 5);
2496 } else if (!Op.compare_lower("vale3")) {
2497 // SYS #6, C8, C7, #5
2498 SYS_ALIAS(6, 8, 7, 5);
2499 } else if (!Op.compare_lower("vaale1")) {
2500 // SYS #0, C8, C7, #7
2501 SYS_ALIAS(0, 8, 7, 7);
2502 } else if (!Op.compare_lower("ipas2e1")) {
2503 // SYS #4, C8, C4, #1
2504 SYS_ALIAS(4, 8, 4, 1);
2505 } else if (!Op.compare_lower("ipas2le1")) {
2506 // SYS #4, C8, C4, #5
2507 SYS_ALIAS(4, 8, 4, 5);
2508 } else if (!Op.compare_lower("ipas2e1is")) {
2509 // SYS #4, C8, C4, #1
2510 SYS_ALIAS(4, 8, 0, 1);
2511 } else if (!Op.compare_lower("ipas2le1is")) {
2512 // SYS #4, C8, C4, #5
2513 SYS_ALIAS(4, 8, 0, 5);
2514 } else if (!Op.compare_lower("vmalls12e1")) {
2515 // SYS #4, C8, C7, #6
2516 SYS_ALIAS(4, 8, 7, 6);
2517 } else if (!Op.compare_lower("vmalls12e1is")) {
2518 // SYS #4, C8, C3, #6
2519 SYS_ALIAS(4, 8, 3, 6);
2521 return TokError("invalid operand for TLBI instruction");
2527 Parser.Lex(); // Eat operand.
2529 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2530 bool HasRegister = false;
2532 // Check for the optional register operand.
2533 if (getLexer().is(AsmToken::Comma)) {
2534 Parser.Lex(); // Eat comma.
2536 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2537 return TokError("expected register operand");
2542 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2543 Parser.eatToEndOfStatement();
2544 return TokError("unexpected token in argument list");
2547 if (ExpectRegister && !HasRegister) {
2548 return TokError("specified " + Mnemonic + " op requires a register");
2550 else if (!ExpectRegister && HasRegister) {
2551 return TokError("specified " + Mnemonic + " op does not use a register");
2554 Parser.Lex(); // Consume the EndOfStatement
2558 AArch64AsmParser::OperandMatchResultTy
2559 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2560 const AsmToken &Tok = Parser.getTok();
2562 // Can be either a #imm style literal or an option name
2563 bool Hash = Tok.is(AsmToken::Hash);
2564 if (Hash || Tok.is(AsmToken::Integer)) {
2565 // Immediate operand.
2567 Parser.Lex(); // Eat the '#'
2568 const MCExpr *ImmVal;
2569 SMLoc ExprLoc = getLoc();
2570 if (getParser().parseExpression(ImmVal))
2571 return MatchOperand_ParseFail;
2572 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2574 Error(ExprLoc, "immediate value expected for barrier operand");
2575 return MatchOperand_ParseFail;
2577 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2578 Error(ExprLoc, "barrier operand out of range");
2579 return MatchOperand_ParseFail;
2582 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2583 return MatchOperand_Success;
2586 if (Tok.isNot(AsmToken::Identifier)) {
2587 TokError("invalid operand for instruction");
2588 return MatchOperand_ParseFail;
2592 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2594 TokError("invalid barrier option name");
2595 return MatchOperand_ParseFail;
2598 // The only valid named option for ISB is 'sy'
2599 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2600 TokError("'sy' or #imm operand expected");
2601 return MatchOperand_ParseFail;
2605 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2606 Parser.Lex(); // Consume the option
2608 return MatchOperand_Success;
2611 AArch64AsmParser::OperandMatchResultTy
2612 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2613 const AsmToken &Tok = Parser.getTok();
2615 if (Tok.isNot(AsmToken::Identifier))
2616 return MatchOperand_NoMatch;
2618 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2619 STI.getFeatureBits(), getContext()));
2620 Parser.Lex(); // Eat identifier
2622 return MatchOperand_Success;
2625 /// tryParseVectorRegister - Parse a vector register operand.
2626 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2627 if (Parser.getTok().isNot(AsmToken::Identifier))
2631 // Check for a vector register specifier first.
2633 int64_t Reg = tryMatchVectorRegister(Kind, false);
2637 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2638 // If there was an explicit qualifier, that goes on as a literal text
2642 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2644 // If there is an index specifier following the register, parse that too.
2645 if (Parser.getTok().is(AsmToken::LBrac)) {
2646 SMLoc SIdx = getLoc();
2647 Parser.Lex(); // Eat left bracket token.
2649 const MCExpr *ImmVal;
2650 if (getParser().parseExpression(ImmVal))
2652 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2654 TokError("immediate value expected for vector index");
2659 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2660 Error(E, "']' expected");
2664 Parser.Lex(); // Eat right bracket token.
2666 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2673 /// parseRegister - Parse a non-vector register operand.
2674 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2676 // Try for a vector register.
2677 if (!tryParseVectorRegister(Operands))
2680 // Try for a scalar register.
2681 int64_t Reg = tryParseRegister();
2685 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2687 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2688 // as a string token in the instruction itself.
2689 if (getLexer().getKind() == AsmToken::LBrac) {
2690 SMLoc LBracS = getLoc();
2692 const AsmToken &Tok = Parser.getTok();
2693 if (Tok.is(AsmToken::Integer)) {
2694 SMLoc IntS = getLoc();
2695 int64_t Val = Tok.getIntVal();
2698 if (getLexer().getKind() == AsmToken::RBrac) {
2699 SMLoc RBracS = getLoc();
2702 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2704 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2706 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2716 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2717 bool HasELFModifier = false;
2718 AArch64MCExpr::VariantKind RefKind;
2720 if (Parser.getTok().is(AsmToken::Colon)) {
2721 Parser.Lex(); // Eat ':"
2722 HasELFModifier = true;
2724 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2725 Error(Parser.getTok().getLoc(),
2726 "expect relocation specifier in operand after ':'");
2730 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2731 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2732 .Case("lo12", AArch64MCExpr::VK_LO12)
2733 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2734 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2735 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2736 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2737 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2738 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2739 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2740 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2741 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2742 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2743 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2744 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2745 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2746 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2747 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2748 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2749 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2750 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2751 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2752 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2753 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2754 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2755 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2756 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2757 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2758 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2759 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2760 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2761 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2762 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2763 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2764 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2765 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2766 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2767 .Default(AArch64MCExpr::VK_INVALID);
2769 if (RefKind == AArch64MCExpr::VK_INVALID) {
2770 Error(Parser.getTok().getLoc(),
2771 "expect relocation specifier in operand after ':'");
2775 Parser.Lex(); // Eat identifier
2777 if (Parser.getTok().isNot(AsmToken::Colon)) {
2778 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2781 Parser.Lex(); // Eat ':'
2784 if (getParser().parseExpression(ImmVal))
2788 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2793 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2794 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2795 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2797 Parser.Lex(); // Eat left bracket token.
2799 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2802 int64_t PrevReg = FirstReg;
2805 if (Parser.getTok().is(AsmToken::Minus)) {
2806 Parser.Lex(); // Eat the minus.
2808 SMLoc Loc = getLoc();
2810 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2813 // Any Kind suffices must match on all regs in the list.
2814 if (Kind != NextKind)
2815 return Error(Loc, "mismatched register size suffix");
2817 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2819 if (Space == 0 || Space > 3) {
2820 return Error(Loc, "invalid number of vectors");
2826 while (Parser.getTok().is(AsmToken::Comma)) {
2827 Parser.Lex(); // Eat the comma token.
2829 SMLoc Loc = getLoc();
2831 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2834 // Any Kind suffices must match on all regs in the list.
2835 if (Kind != NextKind)
2836 return Error(Loc, "mismatched register size suffix");
2838 // Registers must be incremental (with wraparound at 31)
2839 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2840 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2841 return Error(Loc, "registers must be sequential");
2848 if (Parser.getTok().isNot(AsmToken::RCurly))
2849 return Error(getLoc(), "'}' expected");
2850 Parser.Lex(); // Eat the '}' token.
2853 return Error(S, "invalid number of vectors");
2855 unsigned NumElements = 0;
2856 char ElementKind = 0;
2858 parseValidVectorKind(Kind, NumElements, ElementKind);
2860 Operands.push_back(AArch64Operand::CreateVectorList(
2861 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2863 // If there is an index specifier following the list, parse that too.
2864 if (Parser.getTok().is(AsmToken::LBrac)) {
2865 SMLoc SIdx = getLoc();
2866 Parser.Lex(); // Eat left bracket token.
2868 const MCExpr *ImmVal;
2869 if (getParser().parseExpression(ImmVal))
2871 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2873 TokError("immediate value expected for vector index");
2878 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2879 Error(E, "']' expected");
2883 Parser.Lex(); // Eat right bracket token.
2885 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2891 AArch64AsmParser::OperandMatchResultTy
2892 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2893 const AsmToken &Tok = Parser.getTok();
2894 if (!Tok.is(AsmToken::Identifier))
2895 return MatchOperand_NoMatch;
2897 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2899 MCContext &Ctx = getContext();
2900 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2901 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2902 return MatchOperand_NoMatch;
2905 Parser.Lex(); // Eat register
2907 if (Parser.getTok().isNot(AsmToken::Comma)) {
2909 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2910 return MatchOperand_Success;
2912 Parser.Lex(); // Eat comma.
2914 if (Parser.getTok().is(AsmToken::Hash))
2915 Parser.Lex(); // Eat hash
2917 if (Parser.getTok().isNot(AsmToken::Integer)) {
2918 Error(getLoc(), "index must be absent or #0");
2919 return MatchOperand_ParseFail;
2922 const MCExpr *ImmVal;
2923 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2924 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2925 Error(getLoc(), "index must be absent or #0");
2926 return MatchOperand_ParseFail;
2930 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2931 return MatchOperand_Success;
2934 /// parseOperand - Parse a arm instruction operand. For now this parses the
2935 /// operand regardless of the mnemonic.
2936 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2937 bool invertCondCode) {
2938 // Check if the current operand has a custom associated parser, if so, try to
2939 // custom parse the operand, or fallback to the general approach.
2940 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2941 if (ResTy == MatchOperand_Success)
2943 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2944 // there was a match, but an error occurred, in which case, just return that
2945 // the operand parsing failed.
2946 if (ResTy == MatchOperand_ParseFail)
2949 // Nothing custom, so do general case parsing.
2951 switch (getLexer().getKind()) {
2955 if (parseSymbolicImmVal(Expr))
2956 return Error(S, "invalid operand");
2958 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2959 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2962 case AsmToken::LBrac: {
2963 SMLoc Loc = Parser.getTok().getLoc();
2964 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2966 Parser.Lex(); // Eat '['
2968 // There's no comma after a '[', so we can parse the next operand
2970 return parseOperand(Operands, false, false);
2972 case AsmToken::LCurly:
2973 return parseVectorList(Operands);
2974 case AsmToken::Identifier: {
2975 // If we're expecting a Condition Code operand, then just parse that.
2977 return parseCondCode(Operands, invertCondCode);
2979 // If it's a register name, parse it.
2980 if (!parseRegister(Operands))
2983 // This could be an optional "shift" or "extend" operand.
2984 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2985 // We can only continue if no tokens were eaten.
2986 if (GotShift != MatchOperand_NoMatch)
2989 // This was not a register so parse other operands that start with an
2990 // identifier (like labels) as expressions and create them as immediates.
2991 const MCExpr *IdVal;
2993 if (getParser().parseExpression(IdVal))
2996 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2997 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3000 case AsmToken::Integer:
3001 case AsmToken::Real:
3002 case AsmToken::Hash: {
3003 // #42 -> immediate.
3005 if (getLexer().is(AsmToken::Hash))
3008 // Parse a negative sign
3009 bool isNegative = false;
3010 if (Parser.getTok().is(AsmToken::Minus)) {
3012 // We need to consume this token only when we have a Real, otherwise
3013 // we let parseSymbolicImmVal take care of it
3014 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3018 // The only Real that should come through here is a literal #0.0 for
3019 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3020 // so convert the value.
3021 const AsmToken &Tok = Parser.getTok();
3022 if (Tok.is(AsmToken::Real)) {
3023 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3024 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3025 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3026 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3027 Mnemonic != "fcmlt")
3028 return TokError("unexpected floating point literal");
3029 else if (IntVal != 0 || isNegative)
3030 return TokError("expected floating-point constant #0.0");
3031 Parser.Lex(); // Eat the token.
3034 AArch64Operand::CreateToken("#0", false, S, getContext()));
3036 AArch64Operand::CreateToken(".0", false, S, getContext()));
3040 const MCExpr *ImmVal;
3041 if (parseSymbolicImmVal(ImmVal))
3044 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3045 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3048 case AsmToken::Equal: {
3049 SMLoc Loc = Parser.getTok().getLoc();
3050 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3051 return Error(Loc, "unexpected token in operand");
3052 Parser.Lex(); // Eat '='
3053 const MCExpr *SubExprVal;
3054 if (getParser().parseExpression(SubExprVal))
3057 MCContext& Ctx = getContext();
3058 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3059 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3060 if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3061 static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3062 bool IsXReg = AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3063 Operands[1]->getReg());
3064 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3065 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3066 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3070 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3071 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3072 Operands.push_back(AArch64Operand::CreateImm(
3073 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3075 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3076 ShiftAmt, true, S, E, Ctx));
3080 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3081 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3082 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3088 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3090 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3091 StringRef Name, SMLoc NameLoc,
3092 OperandVector &Operands) {
3093 Name = StringSwitch<StringRef>(Name.lower())
3094 .Case("beq", "b.eq")
3095 .Case("bne", "b.ne")
3096 .Case("bhs", "b.hs")
3097 .Case("bcs", "b.cs")
3098 .Case("blo", "b.lo")
3099 .Case("bcc", "b.cc")
3100 .Case("bmi", "b.mi")
3101 .Case("bpl", "b.pl")
3102 .Case("bvs", "b.vs")
3103 .Case("bvc", "b.vc")
3104 .Case("bhi", "b.hi")
3105 .Case("bls", "b.ls")
3106 .Case("bge", "b.ge")
3107 .Case("blt", "b.lt")
3108 .Case("bgt", "b.gt")
3109 .Case("ble", "b.le")
3110 .Case("bal", "b.al")
3111 .Case("bnv", "b.nv")
3114 // First check for the AArch64-specific .req directive.
3115 if (Parser.getTok().is(AsmToken::Identifier) &&
3116 Parser.getTok().getIdentifier() == ".req") {
3117 parseDirectiveReq(Name, NameLoc);
3118 // We always return 'error' for this, as we're done with this
3119 // statement and don't need to match the 'instruction."
3123 // Create the leading tokens for the mnemonic, split by '.' characters.
3124 size_t Start = 0, Next = Name.find('.');
3125 StringRef Head = Name.slice(Start, Next);
3127 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3128 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3129 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3130 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3131 Parser.eatToEndOfStatement();
3136 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3139 // Handle condition codes for a branch mnemonic
3140 if (Head == "b" && Next != StringRef::npos) {
3142 Next = Name.find('.', Start + 1);
3143 Head = Name.slice(Start + 1, Next);
3145 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3146 (Head.data() - Name.data()));
3147 AArch64CC::CondCode CC = parseCondCodeString(Head);
3148 if (CC == AArch64CC::Invalid)
3149 return Error(SuffixLoc, "invalid condition code");
3151 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3153 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3156 // Add the remaining tokens in the mnemonic.
3157 while (Next != StringRef::npos) {
3159 Next = Name.find('.', Start + 1);
3160 Head = Name.slice(Start, Next);
3161 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3162 (Head.data() - Name.data()) + 1);
3164 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3167 // Conditional compare instructions have a Condition Code operand, which needs
3168 // to be parsed and an immediate operand created.
3169 bool condCodeFourthOperand =
3170 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3171 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3172 Head == "csinc" || Head == "csinv" || Head == "csneg");
3174 // These instructions are aliases to some of the conditional select
3175 // instructions. However, the condition code is inverted in the aliased
3178 // FIXME: Is this the correct way to handle these? Or should the parser
3179 // generate the aliased instructions directly?
3180 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3181 bool condCodeThirdOperand =
3182 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3184 // Read the remaining operands.
3185 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3186 // Read the first operand.
3187 if (parseOperand(Operands, false, false)) {
3188 Parser.eatToEndOfStatement();
3193 while (getLexer().is(AsmToken::Comma)) {
3194 Parser.Lex(); // Eat the comma.
3196 // Parse and remember the operand.
3197 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3198 (N == 3 && condCodeThirdOperand) ||
3199 (N == 2 && condCodeSecondOperand),
3200 condCodeSecondOperand || condCodeThirdOperand)) {
3201 Parser.eatToEndOfStatement();
3205 // After successfully parsing some operands there are two special cases to
3206 // consider (i.e. notional operands not separated by commas). Both are due
3207 // to memory specifiers:
3208 // + An RBrac will end an address for load/store/prefetch
3209 // + An '!' will indicate a pre-indexed operation.
3211 // It's someone else's responsibility to make sure these tokens are sane
3212 // in the given context!
3213 if (Parser.getTok().is(AsmToken::RBrac)) {
3214 SMLoc Loc = Parser.getTok().getLoc();
3215 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3220 if (Parser.getTok().is(AsmToken::Exclaim)) {
3221 SMLoc Loc = Parser.getTok().getLoc();
3222 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3231 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3232 SMLoc Loc = Parser.getTok().getLoc();
3233 Parser.eatToEndOfStatement();
3234 return Error(Loc, "unexpected token in argument list");
3237 Parser.Lex(); // Consume the EndOfStatement
3241 // FIXME: This entire function is a giant hack to provide us with decent
3242 // operand range validation/diagnostics until TableGen/MC can be extended
3243 // to support autogeneration of this kind of validation.
3244 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3245 SmallVectorImpl<SMLoc> &Loc) {
3246 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3247 // Check for indexed addressing modes w/ the base register being the
3248 // same as a destination/source register or pair load where
3249 // the Rt == Rt2. All of those are undefined behaviour.
3250 switch (Inst.getOpcode()) {
3251 case AArch64::LDPSWpre:
3252 case AArch64::LDPWpost:
3253 case AArch64::LDPWpre:
3254 case AArch64::LDPXpost:
3255 case AArch64::LDPXpre: {
3256 unsigned Rt = Inst.getOperand(1).getReg();
3257 unsigned Rt2 = Inst.getOperand(2).getReg();
3258 unsigned Rn = Inst.getOperand(3).getReg();
3259 if (RI->isSubRegisterEq(Rn, Rt))
3260 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3261 "is also a destination");
3262 if (RI->isSubRegisterEq(Rn, Rt2))
3263 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3264 "is also a destination");
3267 case AArch64::LDPDi:
3268 case AArch64::LDPQi:
3269 case AArch64::LDPSi:
3270 case AArch64::LDPSWi:
3271 case AArch64::LDPWi:
3272 case AArch64::LDPXi: {
3273 unsigned Rt = Inst.getOperand(0).getReg();
3274 unsigned Rt2 = Inst.getOperand(1).getReg();
3276 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3279 case AArch64::LDPDpost:
3280 case AArch64::LDPDpre:
3281 case AArch64::LDPQpost:
3282 case AArch64::LDPQpre:
3283 case AArch64::LDPSpost:
3284 case AArch64::LDPSpre:
3285 case AArch64::LDPSWpost: {
3286 unsigned Rt = Inst.getOperand(1).getReg();
3287 unsigned Rt2 = Inst.getOperand(2).getReg();
3289 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3292 case AArch64::STPDpost:
3293 case AArch64::STPDpre:
3294 case AArch64::STPQpost:
3295 case AArch64::STPQpre:
3296 case AArch64::STPSpost:
3297 case AArch64::STPSpre:
3298 case AArch64::STPWpost:
3299 case AArch64::STPWpre:
3300 case AArch64::STPXpost:
3301 case AArch64::STPXpre: {
3302 unsigned Rt = Inst.getOperand(1).getReg();
3303 unsigned Rt2 = Inst.getOperand(2).getReg();
3304 unsigned Rn = Inst.getOperand(3).getReg();
3305 if (RI->isSubRegisterEq(Rn, Rt))
3306 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3307 "is also a source");
3308 if (RI->isSubRegisterEq(Rn, Rt2))
3309 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3310 "is also a source");
3313 case AArch64::LDRBBpre:
3314 case AArch64::LDRBpre:
3315 case AArch64::LDRHHpre:
3316 case AArch64::LDRHpre:
3317 case AArch64::LDRSBWpre:
3318 case AArch64::LDRSBXpre:
3319 case AArch64::LDRSHWpre:
3320 case AArch64::LDRSHXpre:
3321 case AArch64::LDRSWpre:
3322 case AArch64::LDRWpre:
3323 case AArch64::LDRXpre:
3324 case AArch64::LDRBBpost:
3325 case AArch64::LDRBpost:
3326 case AArch64::LDRHHpost:
3327 case AArch64::LDRHpost:
3328 case AArch64::LDRSBWpost:
3329 case AArch64::LDRSBXpost:
3330 case AArch64::LDRSHWpost:
3331 case AArch64::LDRSHXpost:
3332 case AArch64::LDRSWpost:
3333 case AArch64::LDRWpost:
3334 case AArch64::LDRXpost: {
3335 unsigned Rt = Inst.getOperand(1).getReg();
3336 unsigned Rn = Inst.getOperand(2).getReg();
3337 if (RI->isSubRegisterEq(Rn, Rt))
3338 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3339 "is also a source");
3342 case AArch64::STRBBpost:
3343 case AArch64::STRBpost:
3344 case AArch64::STRHHpost:
3345 case AArch64::STRHpost:
3346 case AArch64::STRWpost:
3347 case AArch64::STRXpost:
3348 case AArch64::STRBBpre:
3349 case AArch64::STRBpre:
3350 case AArch64::STRHHpre:
3351 case AArch64::STRHpre:
3352 case AArch64::STRWpre:
3353 case AArch64::STRXpre: {
3354 unsigned Rt = Inst.getOperand(1).getReg();
3355 unsigned Rn = Inst.getOperand(2).getReg();
3356 if (RI->isSubRegisterEq(Rn, Rt))
3357 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3358 "is also a source");
3363 // Now check immediate ranges. Separate from the above as there is overlap
3364 // in the instructions being checked and this keeps the nested conditionals
3366 switch (Inst.getOpcode()) {
3367 case AArch64::ADDSWri:
3368 case AArch64::ADDSXri:
3369 case AArch64::ADDWri:
3370 case AArch64::ADDXri:
3371 case AArch64::SUBSWri:
3372 case AArch64::SUBSXri:
3373 case AArch64::SUBWri:
3374 case AArch64::SUBXri: {
3375 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3376 // some slight duplication here.
3377 if (Inst.getOperand(2).isExpr()) {
3378 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3379 AArch64MCExpr::VariantKind ELFRefKind;
3380 MCSymbolRefExpr::VariantKind DarwinRefKind;
3382 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3383 return Error(Loc[2], "invalid immediate expression");
3386 // Only allow these with ADDXri.
3387 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3388 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3389 Inst.getOpcode() == AArch64::ADDXri)
3392 // Only allow these with ADDXri/ADDWri
3393 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3394 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3395 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3396 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3397 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3398 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3399 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3400 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3401 (Inst.getOpcode() == AArch64::ADDXri ||
3402 Inst.getOpcode() == AArch64::ADDWri))
3405 // Don't allow expressions in the immediate field otherwise
3406 return Error(Loc[2], "invalid immediate expression");
3415 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3417 case Match_MissingFeature:
3419 "instruction requires a CPU feature not currently enabled");
3420 case Match_InvalidOperand:
3421 return Error(Loc, "invalid operand for instruction");
3422 case Match_InvalidSuffix:
3423 return Error(Loc, "invalid type suffix for instruction");
3424 case Match_InvalidCondCode:
3425 return Error(Loc, "expected AArch64 condition code");
3426 case Match_AddSubRegExtendSmall:
3428 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3429 case Match_AddSubRegExtendLarge:
3431 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3432 case Match_AddSubSecondSource:
3434 "expected compatible register, symbol or integer in range [0, 4095]");
3435 case Match_LogicalSecondSource:
3436 return Error(Loc, "expected compatible register or logical immediate");
3437 case Match_InvalidMovImm32Shift:
3438 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3439 case Match_InvalidMovImm64Shift:
3440 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3441 case Match_AddSubRegShift32:
3443 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3444 case Match_AddSubRegShift64:
3446 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3447 case Match_InvalidFPImm:
3449 "expected compatible register or floating-point constant");
3450 case Match_InvalidMemoryIndexedSImm9:
3451 return Error(Loc, "index must be an integer in range [-256, 255].");
3452 case Match_InvalidMemoryIndexed4SImm7:
3453 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3454 case Match_InvalidMemoryIndexed8SImm7:
3455 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3456 case Match_InvalidMemoryIndexed16SImm7:
3457 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3458 case Match_InvalidMemoryWExtend8:
3460 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3461 case Match_InvalidMemoryWExtend16:
3463 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3464 case Match_InvalidMemoryWExtend32:
3466 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3467 case Match_InvalidMemoryWExtend64:
3469 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3470 case Match_InvalidMemoryWExtend128:
3472 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3473 case Match_InvalidMemoryXExtend8:
3475 "expected 'lsl' or 'sxtx' with optional shift of #0");
3476 case Match_InvalidMemoryXExtend16:
3478 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3479 case Match_InvalidMemoryXExtend32:
3481 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3482 case Match_InvalidMemoryXExtend64:
3484 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3485 case Match_InvalidMemoryXExtend128:
3487 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3488 case Match_InvalidMemoryIndexed1:
3489 return Error(Loc, "index must be an integer in range [0, 4095].");
3490 case Match_InvalidMemoryIndexed2:
3491 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3492 case Match_InvalidMemoryIndexed4:
3493 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3494 case Match_InvalidMemoryIndexed8:
3495 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3496 case Match_InvalidMemoryIndexed16:
3497 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3498 case Match_InvalidImm0_7:
3499 return Error(Loc, "immediate must be an integer in range [0, 7].");
3500 case Match_InvalidImm0_15:
3501 return Error(Loc, "immediate must be an integer in range [0, 15].");
3502 case Match_InvalidImm0_31:
3503 return Error(Loc, "immediate must be an integer in range [0, 31].");
3504 case Match_InvalidImm0_63:
3505 return Error(Loc, "immediate must be an integer in range [0, 63].");
3506 case Match_InvalidImm0_127:
3507 return Error(Loc, "immediate must be an integer in range [0, 127].");
3508 case Match_InvalidImm0_65535:
3509 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3510 case Match_InvalidImm1_8:
3511 return Error(Loc, "immediate must be an integer in range [1, 8].");
3512 case Match_InvalidImm1_16:
3513 return Error(Loc, "immediate must be an integer in range [1, 16].");
3514 case Match_InvalidImm1_32:
3515 return Error(Loc, "immediate must be an integer in range [1, 32].");
3516 case Match_InvalidImm1_64:
3517 return Error(Loc, "immediate must be an integer in range [1, 64].");
3518 case Match_InvalidIndex1:
3519 return Error(Loc, "expected lane specifier '[1]'");
3520 case Match_InvalidIndexB:
3521 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3522 case Match_InvalidIndexH:
3523 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3524 case Match_InvalidIndexS:
3525 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3526 case Match_InvalidIndexD:
3527 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3528 case Match_InvalidLabel:
3529 return Error(Loc, "expected label or encodable integer pc offset");
3531 return Error(Loc, "expected readable system register");
3533 return Error(Loc, "expected writable system register or pstate");
3534 case Match_MnemonicFail:
3535 return Error(Loc, "unrecognized instruction mnemonic");
3537 llvm_unreachable("unexpected error code!");
3541 static const char *getSubtargetFeatureName(unsigned Val);
3543 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3544 OperandVector &Operands,
3546 unsigned &ErrorInfo,
3547 bool MatchingInlineAsm) {
3548 assert(!Operands.empty() && "Unexpect empty operand list!");
3549 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3550 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3552 StringRef Tok = Op.getToken();
3553 unsigned NumOperands = Operands.size();
3555 if (NumOperands == 4 && Tok == "lsl") {
3556 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3557 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3558 if (Op2.isReg() && Op3.isImm()) {
3559 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3561 uint64_t Op3Val = Op3CE->getValue();
3562 uint64_t NewOp3Val = 0;
3563 uint64_t NewOp4Val = 0;
3564 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3566 NewOp3Val = (32 - Op3Val) & 0x1f;
3567 NewOp4Val = 31 - Op3Val;
3569 NewOp3Val = (64 - Op3Val) & 0x3f;
3570 NewOp4Val = 63 - Op3Val;
3573 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3574 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3576 Operands[0] = AArch64Operand::CreateToken(
3577 "ubfm", false, Op.getStartLoc(), getContext());
3578 Operands.push_back(AArch64Operand::CreateImm(
3579 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3580 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3581 Op3.getEndLoc(), getContext());
3584 } else if (NumOperands == 5) {
3585 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3586 // UBFIZ -> UBFM aliases.
3587 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3588 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3589 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3590 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3592 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3593 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3594 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3596 if (Op3CE && Op4CE) {
3597 uint64_t Op3Val = Op3CE->getValue();
3598 uint64_t Op4Val = Op4CE->getValue();
3600 uint64_t RegWidth = 0;
3601 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3607 if (Op3Val >= RegWidth)
3608 return Error(Op3.getStartLoc(),
3609 "expected integer in range [0, 31]");
3610 if (Op4Val < 1 || Op4Val > RegWidth)
3611 return Error(Op4.getStartLoc(),
3612 "expected integer in range [1, 32]");
3614 uint64_t NewOp3Val = 0;
3615 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3617 NewOp3Val = (32 - Op3Val) & 0x1f;
3619 NewOp3Val = (64 - Op3Val) & 0x3f;
3621 uint64_t NewOp4Val = Op4Val - 1;
3623 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3624 return Error(Op4.getStartLoc(),
3625 "requested insert overflows register");
3627 const MCExpr *NewOp3 =
3628 MCConstantExpr::Create(NewOp3Val, getContext());
3629 const MCExpr *NewOp4 =
3630 MCConstantExpr::Create(NewOp4Val, getContext());
3631 Operands[3] = AArch64Operand::CreateImm(
3632 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3633 Operands[4] = AArch64Operand::CreateImm(
3634 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3636 Operands[0] = AArch64Operand::CreateToken(
3637 "bfm", false, Op.getStartLoc(), getContext());
3638 else if (Tok == "sbfiz")
3639 Operands[0] = AArch64Operand::CreateToken(
3640 "sbfm", false, Op.getStartLoc(), getContext());
3641 else if (Tok == "ubfiz")
3642 Operands[0] = AArch64Operand::CreateToken(
3643 "ubfm", false, Op.getStartLoc(), getContext());
3645 llvm_unreachable("No valid mnemonic for alias?");
3649 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3650 // UBFX -> UBFM aliases.
3651 } else if (NumOperands == 5 &&
3652 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3653 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3654 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3655 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3657 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3658 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3659 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3661 if (Op3CE && Op4CE) {
3662 uint64_t Op3Val = Op3CE->getValue();
3663 uint64_t Op4Val = Op4CE->getValue();
3665 uint64_t RegWidth = 0;
3666 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3672 if (Op3Val >= RegWidth)
3673 return Error(Op3.getStartLoc(),
3674 "expected integer in range [0, 31]");
3675 if (Op4Val < 1 || Op4Val > RegWidth)
3676 return Error(Op4.getStartLoc(),
3677 "expected integer in range [1, 32]");
3679 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3681 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3682 return Error(Op4.getStartLoc(),
3683 "requested extract overflows register");
3685 const MCExpr *NewOp4 =
3686 MCConstantExpr::Create(NewOp4Val, getContext());
3687 Operands[4] = AArch64Operand::CreateImm(
3688 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3690 Operands[0] = AArch64Operand::CreateToken(
3691 "bfm", false, Op.getStartLoc(), getContext());
3692 else if (Tok == "sbfx")
3693 Operands[0] = AArch64Operand::CreateToken(
3694 "sbfm", false, Op.getStartLoc(), getContext());
3695 else if (Tok == "ubfx")
3696 Operands[0] = AArch64Operand::CreateToken(
3697 "ubfm", false, Op.getStartLoc(), getContext());
3699 llvm_unreachable("No valid mnemonic for alias?");
3704 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3705 // InstAlias can't quite handle this since the reg classes aren't
3707 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3708 // The source register can be Wn here, but the matcher expects a
3709 // GPR64. Twiddle it here if necessary.
3710 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3712 unsigned Reg = getXRegFromWReg(Op.getReg());
3713 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3714 Op.getEndLoc(), getContext());
3717 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3718 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3719 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3721 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3723 // The source register can be Wn here, but the matcher expects a
3724 // GPR64. Twiddle it here if necessary.
3725 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3727 unsigned Reg = getXRegFromWReg(Op.getReg());
3728 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3729 Op.getEndLoc(), getContext());
3733 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3734 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3735 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3737 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3739 // The source register can be Wn here, but the matcher expects a
3740 // GPR32. Twiddle it here if necessary.
3741 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3743 unsigned Reg = getWRegFromXReg(Op.getReg());
3744 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3745 Op.getEndLoc(), getContext());
3750 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3751 if (NumOperands == 3 && Tok == "fmov") {
3752 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3753 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3754 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3756 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3760 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3761 Op.getEndLoc(), getContext());
3766 // First try to match against the secondary set of tables containing the
3767 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3768 unsigned MatchResult =
3769 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3771 // If that fails, try against the alternate table containing long-form NEON:
3772 // "fadd v0.2s, v1.2s, v2.2s"
3773 if (MatchResult != Match_Success)
3775 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3777 switch (MatchResult) {
3778 case Match_Success: {
3779 // Perform range checking and other semantic validations
3780 SmallVector<SMLoc, 8> OperandLocs;
3781 NumOperands = Operands.size();
3782 for (unsigned i = 1; i < NumOperands; ++i)
3783 OperandLocs.push_back(Operands[i]->getStartLoc());
3784 if (validateInstruction(Inst, OperandLocs))
3788 Out.EmitInstruction(Inst, STI);
3791 case Match_MissingFeature: {
3792 assert(ErrorInfo && "Unknown missing feature!");
3793 // Special case the error message for the very common case where only
3794 // a single subtarget feature is missing (neon, e.g.).
3795 std::string Msg = "instruction requires:";
3797 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3798 if (ErrorInfo & Mask) {
3800 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3804 return Error(IDLoc, Msg);
3806 case Match_MnemonicFail:
3807 return showMatchError(IDLoc, MatchResult);
3808 case Match_InvalidOperand: {
3809 SMLoc ErrorLoc = IDLoc;
3810 if (ErrorInfo != ~0U) {
3811 if (ErrorInfo >= Operands.size())
3812 return Error(IDLoc, "too few operands for instruction");
3814 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3815 if (ErrorLoc == SMLoc())
3818 // If the match failed on a suffix token operand, tweak the diagnostic
3820 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3821 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3822 MatchResult = Match_InvalidSuffix;
3824 return showMatchError(ErrorLoc, MatchResult);
3826 case Match_InvalidMemoryIndexed1:
3827 case Match_InvalidMemoryIndexed2:
3828 case Match_InvalidMemoryIndexed4:
3829 case Match_InvalidMemoryIndexed8:
3830 case Match_InvalidMemoryIndexed16:
3831 case Match_InvalidCondCode:
3832 case Match_AddSubRegExtendSmall:
3833 case Match_AddSubRegExtendLarge:
3834 case Match_AddSubSecondSource:
3835 case Match_LogicalSecondSource:
3836 case Match_AddSubRegShift32:
3837 case Match_AddSubRegShift64:
3838 case Match_InvalidMovImm32Shift:
3839 case Match_InvalidMovImm64Shift:
3840 case Match_InvalidFPImm:
3841 case Match_InvalidMemoryWExtend8:
3842 case Match_InvalidMemoryWExtend16:
3843 case Match_InvalidMemoryWExtend32:
3844 case Match_InvalidMemoryWExtend64:
3845 case Match_InvalidMemoryWExtend128:
3846 case Match_InvalidMemoryXExtend8:
3847 case Match_InvalidMemoryXExtend16:
3848 case Match_InvalidMemoryXExtend32:
3849 case Match_InvalidMemoryXExtend64:
3850 case Match_InvalidMemoryXExtend128:
3851 case Match_InvalidMemoryIndexed4SImm7:
3852 case Match_InvalidMemoryIndexed8SImm7:
3853 case Match_InvalidMemoryIndexed16SImm7:
3854 case Match_InvalidMemoryIndexedSImm9:
3855 case Match_InvalidImm0_7:
3856 case Match_InvalidImm0_15:
3857 case Match_InvalidImm0_31:
3858 case Match_InvalidImm0_63:
3859 case Match_InvalidImm0_127:
3860 case Match_InvalidImm0_65535:
3861 case Match_InvalidImm1_8:
3862 case Match_InvalidImm1_16:
3863 case Match_InvalidImm1_32:
3864 case Match_InvalidImm1_64:
3865 case Match_InvalidIndex1:
3866 case Match_InvalidIndexB:
3867 case Match_InvalidIndexH:
3868 case Match_InvalidIndexS:
3869 case Match_InvalidIndexD:
3870 case Match_InvalidLabel:
3873 if (ErrorInfo >= Operands.size())
3874 return Error(IDLoc, "too few operands for instruction");
3875 // Any time we get here, there's nothing fancy to do. Just get the
3876 // operand SMLoc and display the diagnostic.
3877 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3878 if (ErrorLoc == SMLoc())
3880 return showMatchError(ErrorLoc, MatchResult);
3884 llvm_unreachable("Implement any new match types added!");
3888 /// ParseDirective parses the arm specific directives
3889 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3890 StringRef IDVal = DirectiveID.getIdentifier();
3891 SMLoc Loc = DirectiveID.getLoc();
3892 if (IDVal == ".hword")
3893 return parseDirectiveWord(2, Loc);
3894 if (IDVal == ".word")
3895 return parseDirectiveWord(4, Loc);
3896 if (IDVal == ".xword")
3897 return parseDirectiveWord(8, Loc);
3898 if (IDVal == ".tlsdesccall")
3899 return parseDirectiveTLSDescCall(Loc);
3900 if (IDVal == ".ltorg" || IDVal == ".pool")
3901 return parseDirectiveLtorg(Loc);
3902 if (IDVal == ".unreq")
3903 return parseDirectiveUnreq(DirectiveID.getLoc());
3905 return parseDirectiveLOH(IDVal, Loc);
3908 /// parseDirectiveWord
3909 /// ::= .word [ expression (, expression)* ]
3910 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3911 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3913 const MCExpr *Value;
3914 if (getParser().parseExpression(Value))
3917 getParser().getStreamer().EmitValue(Value, Size);
3919 if (getLexer().is(AsmToken::EndOfStatement))
3922 // FIXME: Improve diagnostic.
3923 if (getLexer().isNot(AsmToken::Comma))
3924 return Error(L, "unexpected token in directive");
3933 // parseDirectiveTLSDescCall:
3934 // ::= .tlsdesccall symbol
3935 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3937 if (getParser().parseIdentifier(Name))
3938 return Error(L, "expected symbol after directive");
3940 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3941 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3942 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3945 Inst.setOpcode(AArch64::TLSDESCCALL);
3946 Inst.addOperand(MCOperand::CreateExpr(Expr));
3948 getParser().getStreamer().EmitInstruction(Inst, STI);
3952 /// ::= .loh <lohName | lohId> label1, ..., labelN
3953 /// The number of arguments depends on the loh identifier.
3954 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3955 if (IDVal != MCLOHDirectiveName())
3958 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3959 if (getParser().getTok().isNot(AsmToken::Integer))
3960 return TokError("expected an identifier or a number in directive");
3961 // We successfully get a numeric value for the identifier.
3962 // Check if it is valid.
3963 int64_t Id = getParser().getTok().getIntVal();
3964 Kind = (MCLOHType)Id;
3965 // Check that Id does not overflow MCLOHType.
3966 if (!isValidMCLOHType(Kind) || Id != Kind)
3967 return TokError("invalid numeric identifier in directive");
3969 StringRef Name = getTok().getIdentifier();
3970 // We successfully parse an identifier.
3971 // Check if it is a recognized one.
3972 int Id = MCLOHNameToId(Name);
3975 return TokError("invalid identifier in directive");
3976 Kind = (MCLOHType)Id;
3978 // Consume the identifier.
3980 // Get the number of arguments of this LOH.
3981 int NbArgs = MCLOHIdToNbArgs(Kind);
3983 assert(NbArgs != -1 && "Invalid number of arguments");
3985 SmallVector<MCSymbol *, 3> Args;
3986 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3988 if (getParser().parseIdentifier(Name))
3989 return TokError("expected identifier in directive");
3990 Args.push_back(getContext().GetOrCreateSymbol(Name));
3992 if (Idx + 1 == NbArgs)
3994 if (getLexer().isNot(AsmToken::Comma))
3995 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3998 if (getLexer().isNot(AsmToken::EndOfStatement))
3999 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4001 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4005 /// parseDirectiveLtorg
4006 /// ::= .ltorg | .pool
4007 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4008 getTargetStreamer().emitCurrentConstantPool();
4012 /// parseDirectiveReq
4013 /// ::= name .req registername
4014 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4015 Parser.Lex(); // Eat the '.req' token.
4016 SMLoc SRegLoc = getLoc();
4017 unsigned RegNum = tryParseRegister();
4018 bool IsVector = false;
4020 if (RegNum == static_cast<unsigned>(-1)) {
4022 RegNum = tryMatchVectorRegister(Kind, false);
4023 if (!Kind.empty()) {
4024 Error(SRegLoc, "vector register without type specifier expected");
4030 if (RegNum == static_cast<unsigned>(-1)) {
4031 Parser.eatToEndOfStatement();
4032 Error(SRegLoc, "register name or alias expected");
4036 // Shouldn't be anything else.
4037 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4038 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4039 Parser.eatToEndOfStatement();
4043 Parser.Lex(); // Consume the EndOfStatement
4045 auto pair = std::make_pair(IsVector, RegNum);
4046 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4047 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4052 /// parseDirectiveUneq
4053 /// ::= .unreq registername
4054 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4055 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4056 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4057 Parser.eatToEndOfStatement();
4060 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4061 Parser.Lex(); // Eat the identifier.
4066 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4067 AArch64MCExpr::VariantKind &ELFRefKind,
4068 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4070 ELFRefKind = AArch64MCExpr::VK_INVALID;
4071 DarwinRefKind = MCSymbolRefExpr::VK_None;
4074 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4075 ELFRefKind = AE->getKind();
4076 Expr = AE->getSubExpr();
4079 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4081 // It's a simple symbol reference with no addend.
4082 DarwinRefKind = SE->getKind();
4086 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4090 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4093 DarwinRefKind = SE->getKind();
4095 if (BE->getOpcode() != MCBinaryExpr::Add &&
4096 BE->getOpcode() != MCBinaryExpr::Sub)
4099 // See if the addend is is a constant, otherwise there's more going
4100 // on here than we can deal with.
4101 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4105 Addend = AddendExpr->getValue();
4106 if (BE->getOpcode() == MCBinaryExpr::Sub)
4109 // It's some symbol reference + a constant addend, but really
4110 // shouldn't use both Darwin and ELF syntax.
4111 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4112 DarwinRefKind == MCSymbolRefExpr::VK_None;
4115 /// Force static initialization.
4116 extern "C" void LLVMInitializeAArch64AsmParser() {
4117 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4118 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4120 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4121 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4124 #define GET_REGISTER_MATCHER
4125 #define GET_SUBTARGET_FEATURE_NAME
4126 #define GET_MATCHER_IMPLEMENTATION
4127 #include "AArch64GenAsmMatcher.inc"
4129 // Define this matcher function after the auto-generated include so we
4130 // have the match class enum definitions.
4131 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4133 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4134 // If the kind is a token for a literal immediate, check if our asm
4135 // operand matches. This is for InstAliases which have a fixed-value
4136 // immediate in the syntax.
4137 int64_t ExpectedVal;
4140 return Match_InvalidOperand;
4182 return Match_InvalidOperand;
4183 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4185 return Match_InvalidOperand;
4186 if (CE->getValue() == ExpectedVal)
4187 return Match_Success;
4188 return Match_InvalidOperand;