1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/MC/SubtargetFeature.h"
44 class AArch64AsmParser : public MCTargetAsmParser {
46 StringRef Mnemonic; ///< Instruction mnemonic.
49 // Map of register aliases registers via the .req directive.
50 StringMap<std::pair<bool, unsigned> > RegisterReqs;
52 AArch64TargetStreamer &getTargetStreamer() {
53 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
54 return static_cast<AArch64TargetStreamer &>(TS);
57 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
59 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63 int tryParseRegister();
64 int tryMatchVectorRegister(StringRef &Kind, bool expected);
65 bool parseRegister(OperandVector &Operands);
66 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67 bool parseVectorList(OperandVector &Operands);
68 bool parseOperand(OperandVector &Operands, bool isCondCode,
71 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
72 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
73 bool showMatchError(SMLoc Loc, unsigned ErrCode);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveInst(SMLoc L);
78 bool parseDirectiveTLSDescCall(SMLoc L);
80 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
81 bool parseDirectiveLtorg(SMLoc L);
83 bool parseDirectiveReq(StringRef Name, SMLoc L);
84 bool parseDirectiveUnreq(SMLoc L);
86 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
87 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
88 OperandVector &Operands, MCStreamer &Out,
90 FeatureBitset &ErrorMissingFeature,
91 bool MatchingInlineAsm) override;
92 /// @name Auto-generated Match Functions
95 #define GET_ASSEMBLER_HEADER
96 #include "AArch64GenAsmMatcher.inc"
100 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
101 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
102 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
103 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
104 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
105 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
106 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
107 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
108 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
109 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
111 bool tryParseVectorRegister(OperandVector &Operands);
112 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
115 enum AArch64MatchResultTy {
116 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
117 #define GET_OPERAND_DIAGNOSTIC_TYPES
118 #include "AArch64GenAsmMatcher.inc"
120 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
121 const MCInstrInfo &MII, const MCTargetOptions &Options)
122 : MCTargetAsmParser(), STI(STI) {
123 MCAsmParserExtension::Initialize(Parser);
124 MCStreamer &S = getParser().getStreamer();
125 if (S.getTargetStreamer() == nullptr)
126 new AArch64TargetStreamer(S);
128 // Initialize the set of available features.
129 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
132 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
133 SMLoc NameLoc, OperandVector &Operands) override;
134 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
135 bool ParseDirective(AsmToken DirectiveID) override;
136 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
137 unsigned Kind) override;
139 static bool classifySymbolRef(const MCExpr *Expr,
140 AArch64MCExpr::VariantKind &ELFRefKind,
141 MCSymbolRefExpr::VariantKind &DarwinRefKind,
144 } // end anonymous namespace
148 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
150 class AArch64Operand : public MCParsedAsmOperand {
168 SMLoc StartLoc, EndLoc;
173 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
181 struct VectorListOp {
184 unsigned NumElements;
185 unsigned ElementKind;
188 struct VectorIndexOp {
196 struct ShiftedImmOp {
198 unsigned ShiftAmount;
202 AArch64CC::CondCode Code;
206 unsigned Val; // Encoded 8-bit representation.
210 unsigned Val; // Not the enum since not all values have names.
220 uint32_t PStateField;
233 struct ShiftExtendOp {
234 AArch64_AM::ShiftExtendType Type;
236 bool HasExplicitAmount;
246 struct VectorListOp VectorList;
247 struct VectorIndexOp VectorIndex;
249 struct ShiftedImmOp ShiftedImm;
250 struct CondCodeOp CondCode;
251 struct FPImmOp FPImm;
252 struct BarrierOp Barrier;
253 struct SysRegOp SysReg;
254 struct SysCRImmOp SysCRImm;
255 struct PrefetchOp Prefetch;
256 struct ShiftExtendOp ShiftExtend;
259 // Keep the MCContext around as the MCExprs may need manipulated during
260 // the add<>Operands() calls.
264 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
266 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
268 StartLoc = o.StartLoc;
278 ShiftedImm = o.ShiftedImm;
281 CondCode = o.CondCode;
293 VectorList = o.VectorList;
296 VectorIndex = o.VectorIndex;
302 SysCRImm = o.SysCRImm;
305 Prefetch = o.Prefetch;
308 ShiftExtend = o.ShiftExtend;
313 /// getStartLoc - Get the location of the first token of this operand.
314 SMLoc getStartLoc() const override { return StartLoc; }
315 /// getEndLoc - Get the location of the last token of this operand.
316 SMLoc getEndLoc() const override { return EndLoc; }
318 StringRef getToken() const {
319 assert(Kind == k_Token && "Invalid access!");
320 return StringRef(Tok.Data, Tok.Length);
323 bool isTokenSuffix() const {
324 assert(Kind == k_Token && "Invalid access!");
328 const MCExpr *getImm() const {
329 assert(Kind == k_Immediate && "Invalid access!");
333 const MCExpr *getShiftedImmVal() const {
334 assert(Kind == k_ShiftedImm && "Invalid access!");
335 return ShiftedImm.Val;
338 unsigned getShiftedImmShift() const {
339 assert(Kind == k_ShiftedImm && "Invalid access!");
340 return ShiftedImm.ShiftAmount;
343 AArch64CC::CondCode getCondCode() const {
344 assert(Kind == k_CondCode && "Invalid access!");
345 return CondCode.Code;
348 unsigned getFPImm() const {
349 assert(Kind == k_FPImm && "Invalid access!");
353 unsigned getBarrier() const {
354 assert(Kind == k_Barrier && "Invalid access!");
358 StringRef getBarrierName() const {
359 assert(Kind == k_Barrier && "Invalid access!");
360 return StringRef(Barrier.Data, Barrier.Length);
363 unsigned getReg() const override {
364 assert(Kind == k_Register && "Invalid access!");
368 unsigned getVectorListStart() const {
369 assert(Kind == k_VectorList && "Invalid access!");
370 return VectorList.RegNum;
373 unsigned getVectorListCount() const {
374 assert(Kind == k_VectorList && "Invalid access!");
375 return VectorList.Count;
378 unsigned getVectorIndex() const {
379 assert(Kind == k_VectorIndex && "Invalid access!");
380 return VectorIndex.Val;
383 StringRef getSysReg() const {
384 assert(Kind == k_SysReg && "Invalid access!");
385 return StringRef(SysReg.Data, SysReg.Length);
388 unsigned getSysCR() const {
389 assert(Kind == k_SysCR && "Invalid access!");
393 unsigned getPrefetch() const {
394 assert(Kind == k_Prefetch && "Invalid access!");
398 StringRef getPrefetchName() const {
399 assert(Kind == k_Prefetch && "Invalid access!");
400 return StringRef(Prefetch.Data, Prefetch.Length);
403 AArch64_AM::ShiftExtendType getShiftExtendType() const {
404 assert(Kind == k_ShiftExtend && "Invalid access!");
405 return ShiftExtend.Type;
408 unsigned getShiftExtendAmount() const {
409 assert(Kind == k_ShiftExtend && "Invalid access!");
410 return ShiftExtend.Amount;
413 bool hasShiftExtendAmount() const {
414 assert(Kind == k_ShiftExtend && "Invalid access!");
415 return ShiftExtend.HasExplicitAmount;
418 bool isImm() const override { return Kind == k_Immediate; }
419 bool isMem() const override { return false; }
420 bool isSImm9() const {
423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 int64_t Val = MCE->getValue();
427 return (Val >= -256 && Val < 256);
429 bool isSImm7s4() const {
432 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
435 int64_t Val = MCE->getValue();
436 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
438 bool isSImm7s8() const {
441 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
444 int64_t Val = MCE->getValue();
445 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
447 bool isSImm7s16() const {
450 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
453 int64_t Val = MCE->getValue();
454 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
457 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
458 AArch64MCExpr::VariantKind ELFRefKind;
459 MCSymbolRefExpr::VariantKind DarwinRefKind;
461 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
463 // If we don't understand the expression, assume the best and
464 // let the fixup and relocation code deal with it.
468 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
469 ELFRefKind == AArch64MCExpr::VK_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
472 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
474 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
475 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
476 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
477 // Note that we don't range-check the addend. It's adjusted modulo page
478 // size when converted, so there is no "out of range" condition when using
480 return Addend >= 0 && (Addend % Scale) == 0;
481 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
482 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
483 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
490 template <int Scale> bool isUImm12Offset() const {
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 return isSymbolicUImm12Offset(getImm(), Scale);
498 int64_t Val = MCE->getValue();
499 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
502 bool isImm0_7() const {
505 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 int64_t Val = MCE->getValue();
509 return (Val >= 0 && Val < 8);
511 bool isImm1_8() const {
514 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 int64_t Val = MCE->getValue();
518 return (Val > 0 && Val < 9);
520 bool isImm0_15() const {
523 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 int64_t Val = MCE->getValue();
527 return (Val >= 0 && Val < 16);
529 bool isImm1_16() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 int64_t Val = MCE->getValue();
536 return (Val > 0 && Val < 17);
538 bool isImm0_31() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 int64_t Val = MCE->getValue();
545 return (Val >= 0 && Val < 32);
547 bool isImm1_31() const {
550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
553 int64_t Val = MCE->getValue();
554 return (Val >= 1 && Val < 32);
556 bool isImm1_32() const {
559 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
562 int64_t Val = MCE->getValue();
563 return (Val >= 1 && Val < 33);
565 bool isImm0_63() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 int64_t Val = MCE->getValue();
572 return (Val >= 0 && Val < 64);
574 bool isImm1_63() const {
577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 int64_t Val = MCE->getValue();
581 return (Val >= 1 && Val < 64);
583 bool isImm1_64() const {
586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 int64_t Val = MCE->getValue();
590 return (Val >= 1 && Val < 65);
592 bool isImm0_127() const {
595 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
598 int64_t Val = MCE->getValue();
599 return (Val >= 0 && Val < 128);
601 bool isImm0_255() const {
604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
607 int64_t Val = MCE->getValue();
608 return (Val >= 0 && Val < 256);
610 bool isImm0_65535() const {
613 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
616 int64_t Val = MCE->getValue();
617 return (Val >= 0 && Val < 65536);
619 bool isImm32_63() const {
622 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
625 int64_t Val = MCE->getValue();
626 return (Val >= 32 && Val < 64);
628 bool isLogicalImm32() const {
631 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
634 int64_t Val = MCE->getValue();
635 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
638 return AArch64_AM::isLogicalImmediate(Val, 32);
640 bool isLogicalImm64() const {
643 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
646 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
648 bool isLogicalImm32Not() const {
651 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
654 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
655 return AArch64_AM::isLogicalImmediate(Val, 32);
657 bool isLogicalImm64Not() const {
660 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
663 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
665 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
666 bool isAddSubImm() const {
667 if (!isShiftedImm() && !isImm())
672 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
673 if (isShiftedImm()) {
674 unsigned Shift = ShiftedImm.ShiftAmount;
675 Expr = ShiftedImm.Val;
676 if (Shift != 0 && Shift != 12)
682 AArch64MCExpr::VariantKind ELFRefKind;
683 MCSymbolRefExpr::VariantKind DarwinRefKind;
685 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
686 DarwinRefKind, Addend)) {
687 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
688 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
689 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
690 || ELFRefKind == AArch64MCExpr::VK_LO12
691 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
692 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
693 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
694 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
695 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
696 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
697 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
700 // Otherwise it should be a real immediate in range:
701 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
702 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
704 bool isCondCode() const { return Kind == k_CondCode; }
705 bool isSIMDImmType10() const {
708 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
711 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
713 bool isBranchTarget26() const {
716 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
719 int64_t Val = MCE->getValue();
722 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
724 bool isPCRelLabel19() const {
727 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
730 int64_t Val = MCE->getValue();
733 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
735 bool isBranchTarget14() const {
738 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
741 int64_t Val = MCE->getValue();
744 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
748 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
752 AArch64MCExpr::VariantKind ELFRefKind;
753 MCSymbolRefExpr::VariantKind DarwinRefKind;
755 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
756 DarwinRefKind, Addend)) {
759 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
762 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
763 if (ELFRefKind == AllowedModifiers[i])
770 bool isMovZSymbolG3() const {
771 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
774 bool isMovZSymbolG2() const {
775 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
776 AArch64MCExpr::VK_TPREL_G2,
777 AArch64MCExpr::VK_DTPREL_G2});
780 bool isMovZSymbolG1() const {
781 return isMovWSymbol({
782 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
783 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
784 AArch64MCExpr::VK_DTPREL_G1,
788 bool isMovZSymbolG0() const {
789 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
790 AArch64MCExpr::VK_TPREL_G0,
791 AArch64MCExpr::VK_DTPREL_G0});
794 bool isMovKSymbolG3() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
798 bool isMovKSymbolG2() const {
799 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
802 bool isMovKSymbolG1() const {
803 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
804 AArch64MCExpr::VK_TPREL_G1_NC,
805 AArch64MCExpr::VK_DTPREL_G1_NC});
808 bool isMovKSymbolG0() const {
810 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
811 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
814 template<int RegWidth, int Shift>
815 bool isMOVZMovAlias() const {
816 if (!isImm()) return false;
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
823 Value &= 0xffffffffULL;
825 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
826 if (Value == 0 && Shift != 0)
829 return (Value & ~(0xffffULL << Shift)) == 0;
832 template<int RegWidth, int Shift>
833 bool isMOVNMovAlias() const {
834 if (!isImm()) return false;
836 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837 if (!CE) return false;
838 uint64_t Value = CE->getValue();
840 // MOVZ takes precedence over MOVN.
841 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
842 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
847 Value &= 0xffffffffULL;
849 return (Value & ~(0xffffULL << Shift)) == 0;
852 bool isFPImm() const { return Kind == k_FPImm; }
853 bool isBarrier() const { return Kind == k_Barrier; }
854 bool isSysReg() const { return Kind == k_SysReg; }
855 bool isMRSSystemRegister() const {
856 if (!isSysReg()) return false;
858 return SysReg.MRSReg != -1U;
860 bool isMSRSystemRegister() const {
861 if (!isSysReg()) return false;
863 return SysReg.MSRReg != -1U;
865 bool isSystemPStateField() const {
866 if (!isSysReg()) return false;
868 return SysReg.PStateField != -1U;
870 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
871 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
872 bool isVectorRegLo() const {
873 return Kind == k_Register && Reg.isVector &&
874 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
877 bool isGPR32as64() const {
878 return Kind == k_Register && !Reg.isVector &&
879 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
881 bool isWSeqPair() const {
882 return Kind == k_Register && !Reg.isVector &&
883 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
886 bool isXSeqPair() const {
887 return Kind == k_Register && !Reg.isVector &&
888 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
892 bool isGPR64sp0() const {
893 return Kind == k_Register && !Reg.isVector &&
894 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
897 /// Is this a vector list with the type implicit (presumably attached to the
898 /// instruction itself)?
899 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
900 return Kind == k_VectorList && VectorList.Count == NumRegs &&
901 !VectorList.ElementKind;
904 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
905 bool isTypedVectorList() const {
906 if (Kind != k_VectorList)
908 if (VectorList.Count != NumRegs)
910 if (VectorList.ElementKind != ElementKind)
912 return VectorList.NumElements == NumElements;
915 bool isVectorIndex1() const {
916 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 bool isVectorIndexB() const {
919 return Kind == k_VectorIndex && VectorIndex.Val < 16;
921 bool isVectorIndexH() const {
922 return Kind == k_VectorIndex && VectorIndex.Val < 8;
924 bool isVectorIndexS() const {
925 return Kind == k_VectorIndex && VectorIndex.Val < 4;
927 bool isVectorIndexD() const {
928 return Kind == k_VectorIndex && VectorIndex.Val < 2;
930 bool isToken() const override { return Kind == k_Token; }
931 bool isTokenEqual(StringRef Str) const {
932 return Kind == k_Token && getToken() == Str;
934 bool isSysCR() const { return Kind == k_SysCR; }
935 bool isPrefetch() const { return Kind == k_Prefetch; }
936 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
937 bool isShifter() const {
938 if (!isShiftExtend())
941 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
942 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
943 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
944 ST == AArch64_AM::MSL);
946 bool isExtend() const {
947 if (!isShiftExtend())
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
952 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
953 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
954 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955 ET == AArch64_AM::LSL) &&
956 getShiftExtendAmount() <= 4;
959 bool isExtend64() const {
962 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
963 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
966 bool isExtendLSL64() const {
969 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
970 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
971 ET == AArch64_AM::LSL) &&
972 getShiftExtendAmount() <= 4;
975 template<int Width> bool isMemXExtend() const {
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
980 (getShiftExtendAmount() == Log2_32(Width / 8) ||
981 getShiftExtendAmount() == 0);
984 template<int Width> bool isMemWExtend() const {
987 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
988 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
989 (getShiftExtendAmount() == Log2_32(Width / 8) ||
990 getShiftExtendAmount() == 0);
993 template <unsigned width>
994 bool isArithmeticShifter() const {
998 // An arithmetic shifter is LSL, LSR, or ASR.
999 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1000 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1001 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1004 template <unsigned width>
1005 bool isLogicalShifter() const {
1009 // A logical shifter is LSL, LSR, ASR or ROR.
1010 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1011 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1012 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1013 getShiftExtendAmount() < width;
1016 bool isMovImm32Shifter() const {
1020 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1021 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1022 if (ST != AArch64_AM::LSL)
1024 uint64_t Val = getShiftExtendAmount();
1025 return (Val == 0 || Val == 16);
1028 bool isMovImm64Shifter() const {
1032 // A MOVi shifter is LSL of 0 or 16.
1033 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1034 if (ST != AArch64_AM::LSL)
1036 uint64_t Val = getShiftExtendAmount();
1037 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1040 bool isLogicalVecShifter() const {
1044 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1045 unsigned Shift = getShiftExtendAmount();
1046 return getShiftExtendType() == AArch64_AM::LSL &&
1047 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1050 bool isLogicalVecHalfWordShifter() const {
1051 if (!isLogicalVecShifter())
1054 // A logical vector shifter is a left shift by 0 or 8.
1055 unsigned Shift = getShiftExtendAmount();
1056 return getShiftExtendType() == AArch64_AM::LSL &&
1057 (Shift == 0 || Shift == 8);
1060 bool isMoveVecShifter() const {
1061 if (!isShiftExtend())
1064 // A logical vector shifter is a left shift by 8 or 16.
1065 unsigned Shift = getShiftExtendAmount();
1066 return getShiftExtendType() == AArch64_AM::MSL &&
1067 (Shift == 8 || Shift == 16);
1070 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1071 // to LDUR/STUR when the offset is not legal for the former but is for
1072 // the latter. As such, in addition to checking for being a legal unscaled
1073 // address, also check that it is not a legal scaled address. This avoids
1074 // ambiguity in the matcher.
1076 bool isSImm9OffsetFB() const {
1077 return isSImm9() && !isUImm12Offset<Width / 8>();
1080 bool isAdrpLabel() const {
1081 // Validation was handled during parsing, so we just sanity check that
1082 // something didn't go haywire.
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087 int64_t Val = CE->getValue();
1088 int64_t Min = - (4096 * (1LL << (21 - 1)));
1089 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1090 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1096 bool isAdrLabel() const {
1097 // Validation was handled during parsing, so we just sanity check that
1098 // something didn't go haywire.
1102 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1103 int64_t Val = CE->getValue();
1104 int64_t Min = - (1LL << (21 - 1));
1105 int64_t Max = ((1LL << (21 - 1)) - 1);
1106 return Val >= Min && Val <= Max;
1112 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1113 // Add as immediates when possible. Null MCExpr = 0.
1115 Inst.addOperand(MCOperand::createImm(0));
1116 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1117 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1119 Inst.addOperand(MCOperand::createExpr(Expr));
1122 void addRegOperands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1124 Inst.addOperand(MCOperand::createReg(getReg()));
1127 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1128 assert(N == 1 && "Invalid number of operands!");
1130 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1132 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1133 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1134 RI->getEncodingValue(getReg()));
1136 Inst.addOperand(MCOperand::createReg(Reg));
1139 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1142 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1143 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1146 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1149 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1150 Inst.addOperand(MCOperand::createReg(getReg()));
1153 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 Inst.addOperand(MCOperand::createReg(getReg()));
1158 template <unsigned NumRegs>
1159 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1162 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1163 unsigned FirstReg = FirstRegs[NumRegs - 1];
1166 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1169 template <unsigned NumRegs>
1170 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1173 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1174 unsigned FirstReg = FirstRegs[NumRegs - 1];
1177 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1180 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1185 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1190 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1195 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1200 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1205 void addImmOperands(MCInst &Inst, unsigned N) const {
1206 assert(N == 1 && "Invalid number of operands!");
1207 // If this is a pageoff symrefexpr with an addend, adjust the addend
1208 // to be only the page-offset portion. Otherwise, just add the expr
1210 addExpr(Inst, getImm());
1213 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1214 assert(N == 2 && "Invalid number of operands!");
1215 if (isShiftedImm()) {
1216 addExpr(Inst, getShiftedImmVal());
1217 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1219 addExpr(Inst, getImm());
1220 Inst.addOperand(MCOperand::createImm(0));
1224 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!");
1226 Inst.addOperand(MCOperand::createImm(getCondCode()));
1229 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 addExpr(Inst, getImm());
1235 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1238 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239 addImmOperands(Inst, N);
1243 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244 assert(N == 1 && "Invalid number of operands!");
1245 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1248 Inst.addOperand(MCOperand::createExpr(getImm()));
1251 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1254 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1257 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1260 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1263 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1266 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1267 assert(N == 1 && "Invalid number of operands!");
1268 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1269 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1272 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1275 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1278 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!");
1280 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1281 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1284 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!");
1286 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1287 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1290 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!");
1292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1293 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1296 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1303 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1306 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1309 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1312 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1315 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!");
1317 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1318 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1321 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1324 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1327 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1330 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1333 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1339 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1345 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1351 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1353 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1357 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1363 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1368 Inst.addOperand(MCOperand::createImm(encoding));
1371 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1375 Inst.addOperand(MCOperand::createImm(encoding));
1378 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1379 assert(N == 1 && "Invalid number of operands!");
1380 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1381 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1382 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1383 Inst.addOperand(MCOperand::createImm(encoding));
1386 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1391 Inst.addOperand(MCOperand::createImm(encoding));
1394 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1398 Inst.addOperand(MCOperand::createImm(encoding));
1401 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1415 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1429 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1430 // Branch operands don't encode the low bits, so shift them off
1431 // here. If it's a label, however, just put it on directly as there's
1432 // not enough information now to do anything.
1433 assert(N == 1 && "Invalid number of operands!");
1434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1436 addExpr(Inst, getImm());
1439 assert(MCE && "Invalid constant immediate operand!");
1440 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1443 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 Inst.addOperand(MCOperand::createImm(getFPImm()));
1448 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1449 assert(N == 1 && "Invalid number of operands!");
1450 Inst.addOperand(MCOperand::createImm(getBarrier()));
1453 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1454 assert(N == 1 && "Invalid number of operands!");
1456 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1459 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1460 assert(N == 1 && "Invalid number of operands!");
1462 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1465 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1466 assert(N == 1 && "Invalid number of operands!");
1468 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1471 void addSysCROperands(MCInst &Inst, unsigned N) const {
1472 assert(N == 1 && "Invalid number of operands!");
1473 Inst.addOperand(MCOperand::createImm(getSysCR()));
1476 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1477 assert(N == 1 && "Invalid number of operands!");
1478 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1481 void addShifterOperands(MCInst &Inst, unsigned N) const {
1482 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1485 Inst.addOperand(MCOperand::createImm(Imm));
1488 void addExtendOperands(MCInst &Inst, unsigned N) const {
1489 assert(N == 1 && "Invalid number of operands!");
1490 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1491 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1492 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1493 Inst.addOperand(MCOperand::createImm(Imm));
1496 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1497 assert(N == 1 && "Invalid number of operands!");
1498 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1499 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1500 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1501 Inst.addOperand(MCOperand::createImm(Imm));
1504 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1505 assert(N == 2 && "Invalid number of operands!");
1506 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1507 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1508 Inst.addOperand(MCOperand::createImm(IsSigned));
1509 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1512 // For 8-bit load/store instructions with a register offset, both the
1513 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1514 // they're disambiguated by whether the shift was explicit or implicit rather
1516 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1517 assert(N == 2 && "Invalid number of operands!");
1518 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1519 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1520 Inst.addOperand(MCOperand::createImm(IsSigned));
1521 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1525 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && "Invalid number of operands!");
1528 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1529 uint64_t Value = CE->getValue();
1530 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1534 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1535 assert(N == 1 && "Invalid number of operands!");
1537 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1538 uint64_t Value = CE->getValue();
1539 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1542 void print(raw_ostream &OS) const override;
1544 static std::unique_ptr<AArch64Operand>
1545 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1546 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1547 Op->Tok.Data = Str.data();
1548 Op->Tok.Length = Str.size();
1549 Op->Tok.IsSuffix = IsSuffix;
1555 static std::unique_ptr<AArch64Operand>
1556 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1557 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1558 Op->Reg.RegNum = RegNum;
1559 Op->Reg.isVector = isVector;
1565 static std::unique_ptr<AArch64Operand>
1566 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1567 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1568 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1569 Op->VectorList.RegNum = RegNum;
1570 Op->VectorList.Count = Count;
1571 Op->VectorList.NumElements = NumElements;
1572 Op->VectorList.ElementKind = ElementKind;
1578 static std::unique_ptr<AArch64Operand>
1579 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1580 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1581 Op->VectorIndex.Val = Idx;
1587 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1588 SMLoc E, MCContext &Ctx) {
1589 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1596 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1597 unsigned ShiftAmount,
1600 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1601 Op->ShiftedImm .Val = Val;
1602 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1608 static std::unique_ptr<AArch64Operand>
1609 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1610 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1611 Op->CondCode.Code = Code;
1617 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1619 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1620 Op->FPImm.Val = Val;
1626 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1630 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1631 Op->Barrier.Val = Val;
1632 Op->Barrier.Data = Str.data();
1633 Op->Barrier.Length = Str.size();
1639 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1642 uint32_t PStateField,
1644 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1645 Op->SysReg.Data = Str.data();
1646 Op->SysReg.Length = Str.size();
1647 Op->SysReg.MRSReg = MRSReg;
1648 Op->SysReg.MSRReg = MSRReg;
1649 Op->SysReg.PStateField = PStateField;
1655 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1656 SMLoc E, MCContext &Ctx) {
1657 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1658 Op->SysCRImm.Val = Val;
1664 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1668 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1669 Op->Prefetch.Val = Val;
1670 Op->Barrier.Data = Str.data();
1671 Op->Barrier.Length = Str.size();
1677 static std::unique_ptr<AArch64Operand>
1678 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1679 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1681 Op->ShiftExtend.Type = ShOp;
1682 Op->ShiftExtend.Amount = Val;
1683 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1690 } // end anonymous namespace.
1692 void AArch64Operand::print(raw_ostream &OS) const {
1695 OS << "<fpimm " << getFPImm() << "("
1696 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1699 StringRef Name = getBarrierName();
1701 OS << "<barrier " << Name << ">";
1703 OS << "<barrier invalid #" << getBarrier() << ">";
1709 case k_ShiftedImm: {
1710 unsigned Shift = getShiftedImmShift();
1711 OS << "<shiftedimm ";
1712 OS << *getShiftedImmVal();
1713 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1717 OS << "<condcode " << getCondCode() << ">";
1720 OS << "<register " << getReg() << ">";
1722 case k_VectorList: {
1723 OS << "<vectorlist ";
1724 unsigned Reg = getVectorListStart();
1725 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1726 OS << Reg + i << " ";
1731 OS << "<vectorindex " << getVectorIndex() << ">";
1734 OS << "<sysreg: " << getSysReg() << '>';
1737 OS << "'" << getToken() << "'";
1740 OS << "c" << getSysCR();
1743 StringRef Name = getPrefetchName();
1745 OS << "<prfop " << Name << ">";
1747 OS << "<prfop invalid #" << getPrefetch() << ">";
1750 case k_ShiftExtend: {
1751 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1752 << getShiftExtendAmount();
1753 if (!hasShiftExtendAmount())
1761 /// @name Auto-generated Match Functions
1764 static unsigned MatchRegisterName(StringRef Name);
1768 static unsigned matchVectorRegName(StringRef Name) {
1769 return StringSwitch<unsigned>(Name.lower())
1770 .Case("v0", AArch64::Q0)
1771 .Case("v1", AArch64::Q1)
1772 .Case("v2", AArch64::Q2)
1773 .Case("v3", AArch64::Q3)
1774 .Case("v4", AArch64::Q4)
1775 .Case("v5", AArch64::Q5)
1776 .Case("v6", AArch64::Q6)
1777 .Case("v7", AArch64::Q7)
1778 .Case("v8", AArch64::Q8)
1779 .Case("v9", AArch64::Q9)
1780 .Case("v10", AArch64::Q10)
1781 .Case("v11", AArch64::Q11)
1782 .Case("v12", AArch64::Q12)
1783 .Case("v13", AArch64::Q13)
1784 .Case("v14", AArch64::Q14)
1785 .Case("v15", AArch64::Q15)
1786 .Case("v16", AArch64::Q16)
1787 .Case("v17", AArch64::Q17)
1788 .Case("v18", AArch64::Q18)
1789 .Case("v19", AArch64::Q19)
1790 .Case("v20", AArch64::Q20)
1791 .Case("v21", AArch64::Q21)
1792 .Case("v22", AArch64::Q22)
1793 .Case("v23", AArch64::Q23)
1794 .Case("v24", AArch64::Q24)
1795 .Case("v25", AArch64::Q25)
1796 .Case("v26", AArch64::Q26)
1797 .Case("v27", AArch64::Q27)
1798 .Case("v28", AArch64::Q28)
1799 .Case("v29", AArch64::Q29)
1800 .Case("v30", AArch64::Q30)
1801 .Case("v31", AArch64::Q31)
1805 static bool isValidVectorKind(StringRef Name) {
1806 return StringSwitch<bool>(Name.lower())
1816 // Accept the width neutral ones, too, for verbose syntax. If those
1817 // aren't used in the right places, the token operand won't match so
1818 // all will work out.
1826 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1827 char &ElementKind) {
1828 assert(isValidVectorKind(Name));
1830 ElementKind = Name.lower()[Name.size() - 1];
1833 if (Name.size() == 2)
1836 // Parse the lane count
1837 Name = Name.drop_front();
1838 while (isdigit(Name.front())) {
1839 NumElements = 10 * NumElements + (Name.front() - '0');
1840 Name = Name.drop_front();
1844 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1846 StartLoc = getLoc();
1847 RegNo = tryParseRegister();
1848 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1849 return (RegNo == (unsigned)-1);
1852 // Matches a register name or register alias previously defined by '.req'
1853 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1855 unsigned RegNum = isVector ? matchVectorRegName(Name)
1856 : MatchRegisterName(Name);
1859 // Check for aliases registered via .req. Canonicalize to lower case.
1860 // That's more consistent since register names are case insensitive, and
1861 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1862 auto Entry = RegisterReqs.find(Name.lower());
1863 if (Entry == RegisterReqs.end())
1865 // set RegNum if the match is the right kind of register
1866 if (isVector == Entry->getValue().first)
1867 RegNum = Entry->getValue().second;
1872 /// tryParseRegister - Try to parse a register name. The token must be an
1873 /// Identifier when called, and if it is a register name the token is eaten and
1874 /// the register is added to the operand list.
1875 int AArch64AsmParser::tryParseRegister() {
1876 MCAsmParser &Parser = getParser();
1877 const AsmToken &Tok = Parser.getTok();
1878 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1880 std::string lowerCase = Tok.getString().lower();
1881 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1882 // Also handle a few aliases of registers.
1884 RegNum = StringSwitch<unsigned>(lowerCase)
1885 .Case("fp", AArch64::FP)
1886 .Case("lr", AArch64::LR)
1887 .Case("x31", AArch64::XZR)
1888 .Case("w31", AArch64::WZR)
1894 Parser.Lex(); // Eat identifier token.
1898 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1899 /// kind specifier. If it is a register specifier, eat the token and return it.
1900 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1901 MCAsmParser &Parser = getParser();
1902 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1903 TokError("vector register expected");
1907 StringRef Name = Parser.getTok().getString();
1908 // If there is a kind specifier, it's separated from the register name by
1910 size_t Start = 0, Next = Name.find('.');
1911 StringRef Head = Name.slice(Start, Next);
1912 unsigned RegNum = matchRegisterNameAlias(Head, true);
1915 if (Next != StringRef::npos) {
1916 Kind = Name.slice(Next, StringRef::npos);
1917 if (!isValidVectorKind(Kind)) {
1918 TokError("invalid vector kind qualifier");
1922 Parser.Lex(); // Eat the register token.
1927 TokError("vector register expected");
1931 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1932 AArch64AsmParser::OperandMatchResultTy
1933 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1934 MCAsmParser &Parser = getParser();
1937 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1938 Error(S, "Expected cN operand where 0 <= N <= 15");
1939 return MatchOperand_ParseFail;
1942 StringRef Tok = Parser.getTok().getIdentifier();
1943 if (Tok[0] != 'c' && Tok[0] != 'C') {
1944 Error(S, "Expected cN operand where 0 <= N <= 15");
1945 return MatchOperand_ParseFail;
1949 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1950 if (BadNum || CRNum > 15) {
1951 Error(S, "Expected cN operand where 0 <= N <= 15");
1952 return MatchOperand_ParseFail;
1955 Parser.Lex(); // Eat identifier token.
1957 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1958 return MatchOperand_Success;
1961 /// tryParsePrefetch - Try to parse a prefetch operand.
1962 AArch64AsmParser::OperandMatchResultTy
1963 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1964 MCAsmParser &Parser = getParser();
1966 const AsmToken &Tok = Parser.getTok();
1967 // Either an identifier for named values or a 5-bit immediate.
1968 bool Hash = Tok.is(AsmToken::Hash);
1969 if (Hash || Tok.is(AsmToken::Integer)) {
1971 Parser.Lex(); // Eat hash token.
1972 const MCExpr *ImmVal;
1973 if (getParser().parseExpression(ImmVal))
1974 return MatchOperand_ParseFail;
1976 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1978 TokError("immediate value expected for prefetch operand");
1979 return MatchOperand_ParseFail;
1981 unsigned prfop = MCE->getValue();
1983 TokError("prefetch operand out of range, [0,31] expected");
1984 return MatchOperand_ParseFail;
1988 auto Mapper = AArch64PRFM::PRFMMapper();
1990 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
1991 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1993 return MatchOperand_Success;
1996 if (Tok.isNot(AsmToken::Identifier)) {
1997 TokError("pre-fetch hint expected");
1998 return MatchOperand_ParseFail;
2002 auto Mapper = AArch64PRFM::PRFMMapper();
2004 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2006 TokError("pre-fetch hint expected");
2007 return MatchOperand_ParseFail;
2010 Parser.Lex(); // Eat identifier token.
2011 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2013 return MatchOperand_Success;
2016 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2018 AArch64AsmParser::OperandMatchResultTy
2019 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2020 MCAsmParser &Parser = getParser();
2024 if (Parser.getTok().is(AsmToken::Hash)) {
2025 Parser.Lex(); // Eat hash token.
2028 if (parseSymbolicImmVal(Expr))
2029 return MatchOperand_ParseFail;
2031 AArch64MCExpr::VariantKind ELFRefKind;
2032 MCSymbolRefExpr::VariantKind DarwinRefKind;
2034 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2035 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2036 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2037 // No modifier was specified at all; this is the syntax for an ELF basic
2038 // ADRP relocation (unfortunately).
2040 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2041 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2042 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2044 Error(S, "gotpage label reference not allowed an addend");
2045 return MatchOperand_ParseFail;
2046 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2047 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2048 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2049 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2050 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2051 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2052 // The operand must be an @page or @gotpage qualified symbolref.
2053 Error(S, "page or gotpage label reference expected");
2054 return MatchOperand_ParseFail;
2058 // We have either a label reference possibly with addend or an immediate. The
2059 // addend is a raw value here. The linker will adjust it to only reference the
2061 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2062 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2064 return MatchOperand_Success;
2067 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2069 AArch64AsmParser::OperandMatchResultTy
2070 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2071 MCAsmParser &Parser = getParser();
2075 if (Parser.getTok().is(AsmToken::Hash)) {
2076 Parser.Lex(); // Eat hash token.
2079 if (getParser().parseExpression(Expr))
2080 return MatchOperand_ParseFail;
2082 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2083 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2085 return MatchOperand_Success;
2088 /// tryParseFPImm - A floating point immediate expression operand.
2089 AArch64AsmParser::OperandMatchResultTy
2090 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2091 MCAsmParser &Parser = getParser();
2095 if (Parser.getTok().is(AsmToken::Hash)) {
2096 Parser.Lex(); // Eat '#'
2100 // Handle negation, as that still comes through as a separate token.
2101 bool isNegative = false;
2102 if (Parser.getTok().is(AsmToken::Minus)) {
2106 const AsmToken &Tok = Parser.getTok();
2107 if (Tok.is(AsmToken::Real)) {
2108 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2110 RealVal.changeSign();
2112 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2113 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2114 Parser.Lex(); // Eat the token.
2115 // Check for out of range values. As an exception, we let Zero through,
2116 // as we handle that special case in post-processing before matching in
2117 // order to use the zero register for it.
2118 if (Val == -1 && !RealVal.isPosZero()) {
2119 TokError("expected compatible register or floating-point constant");
2120 return MatchOperand_ParseFail;
2122 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2123 return MatchOperand_Success;
2125 if (Tok.is(AsmToken::Integer)) {
2127 if (!isNegative && Tok.getString().startswith("0x")) {
2128 Val = Tok.getIntVal();
2129 if (Val > 255 || Val < 0) {
2130 TokError("encoded floating point value out of range");
2131 return MatchOperand_ParseFail;
2134 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2135 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2136 // If we had a '-' in front, toggle the sign bit.
2137 IntVal ^= (uint64_t)isNegative << 63;
2138 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2140 Parser.Lex(); // Eat the token.
2141 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2142 return MatchOperand_Success;
2146 return MatchOperand_NoMatch;
2148 TokError("invalid floating point immediate");
2149 return MatchOperand_ParseFail;
2152 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2153 AArch64AsmParser::OperandMatchResultTy
2154 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2155 MCAsmParser &Parser = getParser();
2158 if (Parser.getTok().is(AsmToken::Hash))
2159 Parser.Lex(); // Eat '#'
2160 else if (Parser.getTok().isNot(AsmToken::Integer))
2161 // Operand should start from # or should be integer, emit error otherwise.
2162 return MatchOperand_NoMatch;
2165 if (parseSymbolicImmVal(Imm))
2166 return MatchOperand_ParseFail;
2167 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2168 uint64_t ShiftAmount = 0;
2169 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2171 int64_t Val = MCE->getValue();
2172 if (Val > 0xfff && (Val & 0xfff) == 0) {
2173 Imm = MCConstantExpr::create(Val >> 12, getContext());
2177 SMLoc E = Parser.getTok().getLoc();
2178 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2180 return MatchOperand_Success;
2186 // The optional operand must be "lsl #N" where N is non-negative.
2187 if (!Parser.getTok().is(AsmToken::Identifier) ||
2188 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2189 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2190 return MatchOperand_ParseFail;
2196 if (Parser.getTok().is(AsmToken::Hash)) {
2200 if (Parser.getTok().isNot(AsmToken::Integer)) {
2201 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2202 return MatchOperand_ParseFail;
2205 int64_t ShiftAmount = Parser.getTok().getIntVal();
2207 if (ShiftAmount < 0) {
2208 Error(Parser.getTok().getLoc(), "positive shift amount required");
2209 return MatchOperand_ParseFail;
2211 Parser.Lex(); // Eat the number
2213 SMLoc E = Parser.getTok().getLoc();
2214 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2215 S, E, getContext()));
2216 return MatchOperand_Success;
2219 /// parseCondCodeString - Parse a Condition Code string.
2220 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2221 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2222 .Case("eq", AArch64CC::EQ)
2223 .Case("ne", AArch64CC::NE)
2224 .Case("cs", AArch64CC::HS)
2225 .Case("hs", AArch64CC::HS)
2226 .Case("cc", AArch64CC::LO)
2227 .Case("lo", AArch64CC::LO)
2228 .Case("mi", AArch64CC::MI)
2229 .Case("pl", AArch64CC::PL)
2230 .Case("vs", AArch64CC::VS)
2231 .Case("vc", AArch64CC::VC)
2232 .Case("hi", AArch64CC::HI)
2233 .Case("ls", AArch64CC::LS)
2234 .Case("ge", AArch64CC::GE)
2235 .Case("lt", AArch64CC::LT)
2236 .Case("gt", AArch64CC::GT)
2237 .Case("le", AArch64CC::LE)
2238 .Case("al", AArch64CC::AL)
2239 .Case("nv", AArch64CC::NV)
2240 .Default(AArch64CC::Invalid);
2244 /// parseCondCode - Parse a Condition Code operand.
2245 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2246 bool invertCondCode) {
2247 MCAsmParser &Parser = getParser();
2249 const AsmToken &Tok = Parser.getTok();
2250 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2252 StringRef Cond = Tok.getString();
2253 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2254 if (CC == AArch64CC::Invalid)
2255 return TokError("invalid condition code");
2256 Parser.Lex(); // Eat identifier token.
2258 if (invertCondCode) {
2259 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2260 return TokError("condition codes AL and NV are invalid for this instruction");
2261 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2265 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2269 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2270 /// them if present.
2271 AArch64AsmParser::OperandMatchResultTy
2272 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2273 MCAsmParser &Parser = getParser();
2274 const AsmToken &Tok = Parser.getTok();
2275 std::string LowerID = Tok.getString().lower();
2276 AArch64_AM::ShiftExtendType ShOp =
2277 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2278 .Case("lsl", AArch64_AM::LSL)
2279 .Case("lsr", AArch64_AM::LSR)
2280 .Case("asr", AArch64_AM::ASR)
2281 .Case("ror", AArch64_AM::ROR)
2282 .Case("msl", AArch64_AM::MSL)
2283 .Case("uxtb", AArch64_AM::UXTB)
2284 .Case("uxth", AArch64_AM::UXTH)
2285 .Case("uxtw", AArch64_AM::UXTW)
2286 .Case("uxtx", AArch64_AM::UXTX)
2287 .Case("sxtb", AArch64_AM::SXTB)
2288 .Case("sxth", AArch64_AM::SXTH)
2289 .Case("sxtw", AArch64_AM::SXTW)
2290 .Case("sxtx", AArch64_AM::SXTX)
2291 .Default(AArch64_AM::InvalidShiftExtend);
2293 if (ShOp == AArch64_AM::InvalidShiftExtend)
2294 return MatchOperand_NoMatch;
2296 SMLoc S = Tok.getLoc();
2299 bool Hash = getLexer().is(AsmToken::Hash);
2300 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2301 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2302 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2303 ShOp == AArch64_AM::MSL) {
2304 // We expect a number here.
2305 TokError("expected #imm after shift specifier");
2306 return MatchOperand_ParseFail;
2309 // "extend" type operatoins don't need an immediate, #0 is implicit.
2310 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2312 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2313 return MatchOperand_Success;
2317 Parser.Lex(); // Eat the '#'.
2319 // Make sure we do actually have a number or a parenthesized expression.
2320 SMLoc E = Parser.getTok().getLoc();
2321 if (!Parser.getTok().is(AsmToken::Integer) &&
2322 !Parser.getTok().is(AsmToken::LParen)) {
2323 Error(E, "expected integer shift amount");
2324 return MatchOperand_ParseFail;
2327 const MCExpr *ImmVal;
2328 if (getParser().parseExpression(ImmVal))
2329 return MatchOperand_ParseFail;
2331 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2333 Error(E, "expected constant '#imm' after shift specifier");
2334 return MatchOperand_ParseFail;
2337 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2338 Operands.push_back(AArch64Operand::CreateShiftExtend(
2339 ShOp, MCE->getValue(), true, S, E, getContext()));
2340 return MatchOperand_Success;
2343 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2344 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2345 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2346 OperandVector &Operands) {
2347 if (Name.find('.') != StringRef::npos)
2348 return TokError("invalid operand");
2352 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2354 MCAsmParser &Parser = getParser();
2355 const AsmToken &Tok = Parser.getTok();
2356 StringRef Op = Tok.getString();
2357 SMLoc S = Tok.getLoc();
2359 const MCExpr *Expr = nullptr;
2361 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2363 Expr = MCConstantExpr::create(op1, getContext()); \
2364 Operands.push_back( \
2365 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2366 Operands.push_back( \
2367 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2368 Operands.push_back( \
2369 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2370 Expr = MCConstantExpr::create(op2, getContext()); \
2371 Operands.push_back( \
2372 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2375 if (Mnemonic == "ic") {
2376 if (!Op.compare_lower("ialluis")) {
2377 // SYS #0, C7, C1, #0
2378 SYS_ALIAS(0, 7, 1, 0);
2379 } else if (!Op.compare_lower("iallu")) {
2380 // SYS #0, C7, C5, #0
2381 SYS_ALIAS(0, 7, 5, 0);
2382 } else if (!Op.compare_lower("ivau")) {
2383 // SYS #3, C7, C5, #1
2384 SYS_ALIAS(3, 7, 5, 1);
2386 return TokError("invalid operand for IC instruction");
2388 } else if (Mnemonic == "dc") {
2389 if (!Op.compare_lower("zva")) {
2390 // SYS #3, C7, C4, #1
2391 SYS_ALIAS(3, 7, 4, 1);
2392 } else if (!Op.compare_lower("ivac")) {
2393 // SYS #3, C7, C6, #1
2394 SYS_ALIAS(0, 7, 6, 1);
2395 } else if (!Op.compare_lower("isw")) {
2396 // SYS #0, C7, C6, #2
2397 SYS_ALIAS(0, 7, 6, 2);
2398 } else if (!Op.compare_lower("cvac")) {
2399 // SYS #3, C7, C10, #1
2400 SYS_ALIAS(3, 7, 10, 1);
2401 } else if (!Op.compare_lower("csw")) {
2402 // SYS #0, C7, C10, #2
2403 SYS_ALIAS(0, 7, 10, 2);
2404 } else if (!Op.compare_lower("cvau")) {
2405 // SYS #3, C7, C11, #1
2406 SYS_ALIAS(3, 7, 11, 1);
2407 } else if (!Op.compare_lower("civac")) {
2408 // SYS #3, C7, C14, #1
2409 SYS_ALIAS(3, 7, 14, 1);
2410 } else if (!Op.compare_lower("cisw")) {
2411 // SYS #0, C7, C14, #2
2412 SYS_ALIAS(0, 7, 14, 2);
2414 return TokError("invalid operand for DC instruction");
2416 } else if (Mnemonic == "at") {
2417 if (!Op.compare_lower("s1e1r")) {
2418 // SYS #0, C7, C8, #0
2419 SYS_ALIAS(0, 7, 8, 0);
2420 } else if (!Op.compare_lower("s1e2r")) {
2421 // SYS #4, C7, C8, #0
2422 SYS_ALIAS(4, 7, 8, 0);
2423 } else if (!Op.compare_lower("s1e3r")) {
2424 // SYS #6, C7, C8, #0
2425 SYS_ALIAS(6, 7, 8, 0);
2426 } else if (!Op.compare_lower("s1e1w")) {
2427 // SYS #0, C7, C8, #1
2428 SYS_ALIAS(0, 7, 8, 1);
2429 } else if (!Op.compare_lower("s1e2w")) {
2430 // SYS #4, C7, C8, #1
2431 SYS_ALIAS(4, 7, 8, 1);
2432 } else if (!Op.compare_lower("s1e3w")) {
2433 // SYS #6, C7, C8, #1
2434 SYS_ALIAS(6, 7, 8, 1);
2435 } else if (!Op.compare_lower("s1e0r")) {
2436 // SYS #0, C7, C8, #3
2437 SYS_ALIAS(0, 7, 8, 2);
2438 } else if (!Op.compare_lower("s1e0w")) {
2439 // SYS #0, C7, C8, #3
2440 SYS_ALIAS(0, 7, 8, 3);
2441 } else if (!Op.compare_lower("s12e1r")) {
2442 // SYS #4, C7, C8, #4
2443 SYS_ALIAS(4, 7, 8, 4);
2444 } else if (!Op.compare_lower("s12e1w")) {
2445 // SYS #4, C7, C8, #5
2446 SYS_ALIAS(4, 7, 8, 5);
2447 } else if (!Op.compare_lower("s12e0r")) {
2448 // SYS #4, C7, C8, #6
2449 SYS_ALIAS(4, 7, 8, 6);
2450 } else if (!Op.compare_lower("s12e0w")) {
2451 // SYS #4, C7, C8, #7
2452 SYS_ALIAS(4, 7, 8, 7);
2454 return TokError("invalid operand for AT instruction");
2456 } else if (Mnemonic == "tlbi") {
2457 if (!Op.compare_lower("vmalle1is")) {
2458 // SYS #0, C8, C3, #0
2459 SYS_ALIAS(0, 8, 3, 0);
2460 } else if (!Op.compare_lower("alle2is")) {
2461 // SYS #4, C8, C3, #0
2462 SYS_ALIAS(4, 8, 3, 0);
2463 } else if (!Op.compare_lower("alle3is")) {
2464 // SYS #6, C8, C3, #0
2465 SYS_ALIAS(6, 8, 3, 0);
2466 } else if (!Op.compare_lower("vae1is")) {
2467 // SYS #0, C8, C3, #1
2468 SYS_ALIAS(0, 8, 3, 1);
2469 } else if (!Op.compare_lower("vae2is")) {
2470 // SYS #4, C8, C3, #1
2471 SYS_ALIAS(4, 8, 3, 1);
2472 } else if (!Op.compare_lower("vae3is")) {
2473 // SYS #6, C8, C3, #1
2474 SYS_ALIAS(6, 8, 3, 1);
2475 } else if (!Op.compare_lower("aside1is")) {
2476 // SYS #0, C8, C3, #2
2477 SYS_ALIAS(0, 8, 3, 2);
2478 } else if (!Op.compare_lower("vaae1is")) {
2479 // SYS #0, C8, C3, #3
2480 SYS_ALIAS(0, 8, 3, 3);
2481 } else if (!Op.compare_lower("alle1is")) {
2482 // SYS #4, C8, C3, #4
2483 SYS_ALIAS(4, 8, 3, 4);
2484 } else if (!Op.compare_lower("vale1is")) {
2485 // SYS #0, C8, C3, #5
2486 SYS_ALIAS(0, 8, 3, 5);
2487 } else if (!Op.compare_lower("vaale1is")) {
2488 // SYS #0, C8, C3, #7
2489 SYS_ALIAS(0, 8, 3, 7);
2490 } else if (!Op.compare_lower("vmalle1")) {
2491 // SYS #0, C8, C7, #0
2492 SYS_ALIAS(0, 8, 7, 0);
2493 } else if (!Op.compare_lower("alle2")) {
2494 // SYS #4, C8, C7, #0
2495 SYS_ALIAS(4, 8, 7, 0);
2496 } else if (!Op.compare_lower("vale2is")) {
2497 // SYS #4, C8, C3, #5
2498 SYS_ALIAS(4, 8, 3, 5);
2499 } else if (!Op.compare_lower("vale3is")) {
2500 // SYS #6, C8, C3, #5
2501 SYS_ALIAS(6, 8, 3, 5);
2502 } else if (!Op.compare_lower("alle3")) {
2503 // SYS #6, C8, C7, #0
2504 SYS_ALIAS(6, 8, 7, 0);
2505 } else if (!Op.compare_lower("vae1")) {
2506 // SYS #0, C8, C7, #1
2507 SYS_ALIAS(0, 8, 7, 1);
2508 } else if (!Op.compare_lower("vae2")) {
2509 // SYS #4, C8, C7, #1
2510 SYS_ALIAS(4, 8, 7, 1);
2511 } else if (!Op.compare_lower("vae3")) {
2512 // SYS #6, C8, C7, #1
2513 SYS_ALIAS(6, 8, 7, 1);
2514 } else if (!Op.compare_lower("aside1")) {
2515 // SYS #0, C8, C7, #2
2516 SYS_ALIAS(0, 8, 7, 2);
2517 } else if (!Op.compare_lower("vaae1")) {
2518 // SYS #0, C8, C7, #3
2519 SYS_ALIAS(0, 8, 7, 3);
2520 } else if (!Op.compare_lower("alle1")) {
2521 // SYS #4, C8, C7, #4
2522 SYS_ALIAS(4, 8, 7, 4);
2523 } else if (!Op.compare_lower("vale1")) {
2524 // SYS #0, C8, C7, #5
2525 SYS_ALIAS(0, 8, 7, 5);
2526 } else if (!Op.compare_lower("vale2")) {
2527 // SYS #4, C8, C7, #5
2528 SYS_ALIAS(4, 8, 7, 5);
2529 } else if (!Op.compare_lower("vale3")) {
2530 // SYS #6, C8, C7, #5
2531 SYS_ALIAS(6, 8, 7, 5);
2532 } else if (!Op.compare_lower("vaale1")) {
2533 // SYS #0, C8, C7, #7
2534 SYS_ALIAS(0, 8, 7, 7);
2535 } else if (!Op.compare_lower("ipas2e1")) {
2536 // SYS #4, C8, C4, #1
2537 SYS_ALIAS(4, 8, 4, 1);
2538 } else if (!Op.compare_lower("ipas2le1")) {
2539 // SYS #4, C8, C4, #5
2540 SYS_ALIAS(4, 8, 4, 5);
2541 } else if (!Op.compare_lower("ipas2e1is")) {
2542 // SYS #4, C8, C4, #1
2543 SYS_ALIAS(4, 8, 0, 1);
2544 } else if (!Op.compare_lower("ipas2le1is")) {
2545 // SYS #4, C8, C4, #5
2546 SYS_ALIAS(4, 8, 0, 5);
2547 } else if (!Op.compare_lower("vmalls12e1")) {
2548 // SYS #4, C8, C7, #6
2549 SYS_ALIAS(4, 8, 7, 6);
2550 } else if (!Op.compare_lower("vmalls12e1is")) {
2551 // SYS #4, C8, C3, #6
2552 SYS_ALIAS(4, 8, 3, 6);
2554 return TokError("invalid operand for TLBI instruction");
2560 Parser.Lex(); // Eat operand.
2562 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2563 bool HasRegister = false;
2565 // Check for the optional register operand.
2566 if (getLexer().is(AsmToken::Comma)) {
2567 Parser.Lex(); // Eat comma.
2569 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2570 return TokError("expected register operand");
2575 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2576 Parser.eatToEndOfStatement();
2577 return TokError("unexpected token in argument list");
2580 if (ExpectRegister && !HasRegister) {
2581 return TokError("specified " + Mnemonic + " op requires a register");
2583 else if (!ExpectRegister && HasRegister) {
2584 return TokError("specified " + Mnemonic + " op does not use a register");
2587 Parser.Lex(); // Consume the EndOfStatement
2591 AArch64AsmParser::OperandMatchResultTy
2592 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2593 MCAsmParser &Parser = getParser();
2594 const AsmToken &Tok = Parser.getTok();
2596 // Can be either a #imm style literal or an option name
2597 bool Hash = Tok.is(AsmToken::Hash);
2598 if (Hash || Tok.is(AsmToken::Integer)) {
2599 // Immediate operand.
2601 Parser.Lex(); // Eat the '#'
2602 const MCExpr *ImmVal;
2603 SMLoc ExprLoc = getLoc();
2604 if (getParser().parseExpression(ImmVal))
2605 return MatchOperand_ParseFail;
2606 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2608 Error(ExprLoc, "immediate value expected for barrier operand");
2609 return MatchOperand_ParseFail;
2611 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2612 Error(ExprLoc, "barrier operand out of range");
2613 return MatchOperand_ParseFail;
2616 auto Mapper = AArch64DB::DBarrierMapper();
2618 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2619 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2620 ExprLoc, getContext()));
2621 return MatchOperand_Success;
2624 if (Tok.isNot(AsmToken::Identifier)) {
2625 TokError("invalid operand for instruction");
2626 return MatchOperand_ParseFail;
2630 auto Mapper = AArch64DB::DBarrierMapper();
2632 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2634 TokError("invalid barrier option name");
2635 return MatchOperand_ParseFail;
2638 // The only valid named option for ISB is 'sy'
2639 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2640 TokError("'sy' or #imm operand expected");
2641 return MatchOperand_ParseFail;
2644 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2645 getLoc(), getContext()));
2646 Parser.Lex(); // Consume the option
2648 return MatchOperand_Success;
2651 AArch64AsmParser::OperandMatchResultTy
2652 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2653 MCAsmParser &Parser = getParser();
2654 const AsmToken &Tok = Parser.getTok();
2656 if (Tok.isNot(AsmToken::Identifier))
2657 return MatchOperand_NoMatch;
2660 auto MRSMapper = AArch64SysReg::MRSMapper();
2661 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2663 assert(IsKnown == (MRSReg != -1U) &&
2664 "register should be -1 if and only if it's unknown");
2666 auto MSRMapper = AArch64SysReg::MSRMapper();
2667 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2669 assert(IsKnown == (MSRReg != -1U) &&
2670 "register should be -1 if and only if it's unknown");
2672 auto PStateMapper = AArch64PState::PStateMapper();
2673 uint32_t PStateField =
2674 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2675 assert(IsKnown == (PStateField != -1U) &&
2676 "register should be -1 if and only if it's unknown");
2678 Operands.push_back(AArch64Operand::CreateSysReg(
2679 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2680 Parser.Lex(); // Eat identifier
2682 return MatchOperand_Success;
2685 /// tryParseVectorRegister - Parse a vector register operand.
2686 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2687 MCAsmParser &Parser = getParser();
2688 if (Parser.getTok().isNot(AsmToken::Identifier))
2692 // Check for a vector register specifier first.
2694 int64_t Reg = tryMatchVectorRegister(Kind, false);
2698 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2699 // If there was an explicit qualifier, that goes on as a literal text
2703 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2705 // If there is an index specifier following the register, parse that too.
2706 if (Parser.getTok().is(AsmToken::LBrac)) {
2707 SMLoc SIdx = getLoc();
2708 Parser.Lex(); // Eat left bracket token.
2710 const MCExpr *ImmVal;
2711 if (getParser().parseExpression(ImmVal))
2713 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2715 TokError("immediate value expected for vector index");
2720 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2721 Error(E, "']' expected");
2725 Parser.Lex(); // Eat right bracket token.
2727 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2734 /// parseRegister - Parse a non-vector register operand.
2735 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2736 MCAsmParser &Parser = getParser();
2738 // Try for a vector register.
2739 if (!tryParseVectorRegister(Operands))
2742 // Try for a scalar register.
2743 int64_t Reg = tryParseRegister();
2747 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2749 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2750 // as a string token in the instruction itself.
2751 if (getLexer().getKind() == AsmToken::LBrac) {
2752 SMLoc LBracS = getLoc();
2754 const AsmToken &Tok = Parser.getTok();
2755 if (Tok.is(AsmToken::Integer)) {
2756 SMLoc IntS = getLoc();
2757 int64_t Val = Tok.getIntVal();
2760 if (getLexer().getKind() == AsmToken::RBrac) {
2761 SMLoc RBracS = getLoc();
2764 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2766 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2768 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2778 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2779 MCAsmParser &Parser = getParser();
2780 bool HasELFModifier = false;
2781 AArch64MCExpr::VariantKind RefKind;
2783 if (Parser.getTok().is(AsmToken::Colon)) {
2784 Parser.Lex(); // Eat ':"
2785 HasELFModifier = true;
2787 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2788 Error(Parser.getTok().getLoc(),
2789 "expect relocation specifier in operand after ':'");
2793 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2794 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2795 .Case("lo12", AArch64MCExpr::VK_LO12)
2796 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2797 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2798 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2799 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2800 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2801 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2802 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2803 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2804 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2805 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2806 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2807 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2808 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2809 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2810 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2811 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2812 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2813 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2814 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2815 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2816 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2817 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2818 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2819 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2820 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2821 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2822 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2823 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2824 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2825 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2826 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2827 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2828 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2829 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2830 .Default(AArch64MCExpr::VK_INVALID);
2832 if (RefKind == AArch64MCExpr::VK_INVALID) {
2833 Error(Parser.getTok().getLoc(),
2834 "expect relocation specifier in operand after ':'");
2838 Parser.Lex(); // Eat identifier
2840 if (Parser.getTok().isNot(AsmToken::Colon)) {
2841 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2844 Parser.Lex(); // Eat ':'
2847 if (getParser().parseExpression(ImmVal))
2851 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2856 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2857 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2858 MCAsmParser &Parser = getParser();
2859 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2861 Parser.Lex(); // Eat left bracket token.
2863 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2866 int64_t PrevReg = FirstReg;
2869 if (Parser.getTok().is(AsmToken::Minus)) {
2870 Parser.Lex(); // Eat the minus.
2872 SMLoc Loc = getLoc();
2874 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2877 // Any Kind suffices must match on all regs in the list.
2878 if (Kind != NextKind)
2879 return Error(Loc, "mismatched register size suffix");
2881 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2883 if (Space == 0 || Space > 3) {
2884 return Error(Loc, "invalid number of vectors");
2890 while (Parser.getTok().is(AsmToken::Comma)) {
2891 Parser.Lex(); // Eat the comma token.
2893 SMLoc Loc = getLoc();
2895 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2898 // Any Kind suffices must match on all regs in the list.
2899 if (Kind != NextKind)
2900 return Error(Loc, "mismatched register size suffix");
2902 // Registers must be incremental (with wraparound at 31)
2903 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2904 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2905 return Error(Loc, "registers must be sequential");
2912 if (Parser.getTok().isNot(AsmToken::RCurly))
2913 return Error(getLoc(), "'}' expected");
2914 Parser.Lex(); // Eat the '}' token.
2917 return Error(S, "invalid number of vectors");
2919 unsigned NumElements = 0;
2920 char ElementKind = 0;
2922 parseValidVectorKind(Kind, NumElements, ElementKind);
2924 Operands.push_back(AArch64Operand::CreateVectorList(
2925 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2927 // If there is an index specifier following the list, parse that too.
2928 if (Parser.getTok().is(AsmToken::LBrac)) {
2929 SMLoc SIdx = getLoc();
2930 Parser.Lex(); // Eat left bracket token.
2932 const MCExpr *ImmVal;
2933 if (getParser().parseExpression(ImmVal))
2935 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2937 TokError("immediate value expected for vector index");
2942 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2943 Error(E, "']' expected");
2947 Parser.Lex(); // Eat right bracket token.
2949 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2955 AArch64AsmParser::OperandMatchResultTy
2956 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2957 MCAsmParser &Parser = getParser();
2958 const AsmToken &Tok = Parser.getTok();
2959 if (!Tok.is(AsmToken::Identifier))
2960 return MatchOperand_NoMatch;
2962 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2964 MCContext &Ctx = getContext();
2965 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2966 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2967 return MatchOperand_NoMatch;
2970 Parser.Lex(); // Eat register
2972 if (Parser.getTok().isNot(AsmToken::Comma)) {
2974 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2975 return MatchOperand_Success;
2977 Parser.Lex(); // Eat comma.
2979 if (Parser.getTok().is(AsmToken::Hash))
2980 Parser.Lex(); // Eat hash
2982 if (Parser.getTok().isNot(AsmToken::Integer)) {
2983 Error(getLoc(), "index must be absent or #0");
2984 return MatchOperand_ParseFail;
2987 const MCExpr *ImmVal;
2988 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2989 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2990 Error(getLoc(), "index must be absent or #0");
2991 return MatchOperand_ParseFail;
2995 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2996 return MatchOperand_Success;
2999 /// parseOperand - Parse a arm instruction operand. For now this parses the
3000 /// operand regardless of the mnemonic.
3001 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3002 bool invertCondCode) {
3003 MCAsmParser &Parser = getParser();
3004 // Check if the current operand has a custom associated parser, if so, try to
3005 // custom parse the operand, or fallback to the general approach.
3006 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3007 if (ResTy == MatchOperand_Success)
3009 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3010 // there was a match, but an error occurred, in which case, just return that
3011 // the operand parsing failed.
3012 if (ResTy == MatchOperand_ParseFail)
3015 // Nothing custom, so do general case parsing.
3017 switch (getLexer().getKind()) {
3021 if (parseSymbolicImmVal(Expr))
3022 return Error(S, "invalid operand");
3024 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3025 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3028 case AsmToken::LBrac: {
3029 SMLoc Loc = Parser.getTok().getLoc();
3030 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3032 Parser.Lex(); // Eat '['
3034 // There's no comma after a '[', so we can parse the next operand
3036 return parseOperand(Operands, false, false);
3038 case AsmToken::LCurly:
3039 return parseVectorList(Operands);
3040 case AsmToken::Identifier: {
3041 // If we're expecting a Condition Code operand, then just parse that.
3043 return parseCondCode(Operands, invertCondCode);
3045 // If it's a register name, parse it.
3046 if (!parseRegister(Operands))
3049 // This could be an optional "shift" or "extend" operand.
3050 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3051 // We can only continue if no tokens were eaten.
3052 if (GotShift != MatchOperand_NoMatch)
3055 // This was not a register so parse other operands that start with an
3056 // identifier (like labels) as expressions and create them as immediates.
3057 const MCExpr *IdVal;
3059 if (getParser().parseExpression(IdVal))
3062 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3063 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3066 case AsmToken::Integer:
3067 case AsmToken::Real:
3068 case AsmToken::Hash: {
3069 // #42 -> immediate.
3071 if (getLexer().is(AsmToken::Hash))
3074 // Parse a negative sign
3075 bool isNegative = false;
3076 if (Parser.getTok().is(AsmToken::Minus)) {
3078 // We need to consume this token only when we have a Real, otherwise
3079 // we let parseSymbolicImmVal take care of it
3080 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3084 // The only Real that should come through here is a literal #0.0 for
3085 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3086 // so convert the value.
3087 const AsmToken &Tok = Parser.getTok();
3088 if (Tok.is(AsmToken::Real)) {
3089 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3090 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3091 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3092 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3093 Mnemonic != "fcmlt")
3094 return TokError("unexpected floating point literal");
3095 else if (IntVal != 0 || isNegative)
3096 return TokError("expected floating-point constant #0.0");
3097 Parser.Lex(); // Eat the token.
3100 AArch64Operand::CreateToken("#0", false, S, getContext()));
3102 AArch64Operand::CreateToken(".0", false, S, getContext()));
3106 const MCExpr *ImmVal;
3107 if (parseSymbolicImmVal(ImmVal))
3110 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3111 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3114 case AsmToken::Equal: {
3115 SMLoc Loc = Parser.getTok().getLoc();
3116 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3117 return Error(Loc, "unexpected token in operand");
3118 Parser.Lex(); // Eat '='
3119 const MCExpr *SubExprVal;
3120 if (getParser().parseExpression(SubExprVal))
3123 if (Operands.size() < 2 ||
3124 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3128 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3129 Operands[1]->getReg());
3131 MCContext& Ctx = getContext();
3132 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3133 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3134 if (isa<MCConstantExpr>(SubExprVal)) {
3135 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3136 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3137 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3141 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3142 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3143 Operands.push_back(AArch64Operand::CreateImm(
3144 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3146 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3147 ShiftAmt, true, S, E, Ctx));
3150 APInt Simm = APInt(64, Imm << ShiftAmt);
3151 // check if the immediate is an unsigned or signed 32-bit int for W regs
3152 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3153 return Error(Loc, "Immediate too large for register");
3155 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3156 const MCExpr *CPLoc =
3157 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3158 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3164 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3166 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3167 StringRef Name, SMLoc NameLoc,
3168 OperandVector &Operands) {
3169 MCAsmParser &Parser = getParser();
3170 Name = StringSwitch<StringRef>(Name.lower())
3171 .Case("beq", "b.eq")
3172 .Case("bne", "b.ne")
3173 .Case("bhs", "b.hs")
3174 .Case("bcs", "b.cs")
3175 .Case("blo", "b.lo")
3176 .Case("bcc", "b.cc")
3177 .Case("bmi", "b.mi")
3178 .Case("bpl", "b.pl")
3179 .Case("bvs", "b.vs")
3180 .Case("bvc", "b.vc")
3181 .Case("bhi", "b.hi")
3182 .Case("bls", "b.ls")
3183 .Case("bge", "b.ge")
3184 .Case("blt", "b.lt")
3185 .Case("bgt", "b.gt")
3186 .Case("ble", "b.le")
3187 .Case("bal", "b.al")
3188 .Case("bnv", "b.nv")
3191 // First check for the AArch64-specific .req directive.
3192 if (Parser.getTok().is(AsmToken::Identifier) &&
3193 Parser.getTok().getIdentifier() == ".req") {
3194 parseDirectiveReq(Name, NameLoc);
3195 // We always return 'error' for this, as we're done with this
3196 // statement and don't need to match the 'instruction."
3200 // Create the leading tokens for the mnemonic, split by '.' characters.
3201 size_t Start = 0, Next = Name.find('.');
3202 StringRef Head = Name.slice(Start, Next);
3204 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3205 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3206 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3207 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3208 Parser.eatToEndOfStatement();
3213 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3216 // Handle condition codes for a branch mnemonic
3217 if (Head == "b" && Next != StringRef::npos) {
3219 Next = Name.find('.', Start + 1);
3220 Head = Name.slice(Start + 1, Next);
3222 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3223 (Head.data() - Name.data()));
3224 AArch64CC::CondCode CC = parseCondCodeString(Head);
3225 if (CC == AArch64CC::Invalid)
3226 return Error(SuffixLoc, "invalid condition code");
3228 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3230 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3233 // Add the remaining tokens in the mnemonic.
3234 while (Next != StringRef::npos) {
3236 Next = Name.find('.', Start + 1);
3237 Head = Name.slice(Start, Next);
3238 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3239 (Head.data() - Name.data()) + 1);
3241 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3244 // Conditional compare instructions have a Condition Code operand, which needs
3245 // to be parsed and an immediate operand created.
3246 bool condCodeFourthOperand =
3247 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3248 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3249 Head == "csinc" || Head == "csinv" || Head == "csneg");
3251 // These instructions are aliases to some of the conditional select
3252 // instructions. However, the condition code is inverted in the aliased
3255 // FIXME: Is this the correct way to handle these? Or should the parser
3256 // generate the aliased instructions directly?
3257 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3258 bool condCodeThirdOperand =
3259 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3261 // Read the remaining operands.
3262 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3263 // Read the first operand.
3264 if (parseOperand(Operands, false, false)) {
3265 Parser.eatToEndOfStatement();
3270 while (getLexer().is(AsmToken::Comma)) {
3271 Parser.Lex(); // Eat the comma.
3273 // Parse and remember the operand.
3274 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3275 (N == 3 && condCodeThirdOperand) ||
3276 (N == 2 && condCodeSecondOperand),
3277 condCodeSecondOperand || condCodeThirdOperand)) {
3278 Parser.eatToEndOfStatement();
3282 // After successfully parsing some operands there are two special cases to
3283 // consider (i.e. notional operands not separated by commas). Both are due
3284 // to memory specifiers:
3285 // + An RBrac will end an address for load/store/prefetch
3286 // + An '!' will indicate a pre-indexed operation.
3288 // It's someone else's responsibility to make sure these tokens are sane
3289 // in the given context!
3290 if (Parser.getTok().is(AsmToken::RBrac)) {
3291 SMLoc Loc = Parser.getTok().getLoc();
3292 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3297 if (Parser.getTok().is(AsmToken::Exclaim)) {
3298 SMLoc Loc = Parser.getTok().getLoc();
3299 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3308 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3309 SMLoc Loc = Parser.getTok().getLoc();
3310 Parser.eatToEndOfStatement();
3311 return Error(Loc, "unexpected token in argument list");
3314 Parser.Lex(); // Consume the EndOfStatement
3318 // FIXME: This entire function is a giant hack to provide us with decent
3319 // operand range validation/diagnostics until TableGen/MC can be extended
3320 // to support autogeneration of this kind of validation.
3321 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3322 SmallVectorImpl<SMLoc> &Loc) {
3323 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3324 // Check for indexed addressing modes w/ the base register being the
3325 // same as a destination/source register or pair load where
3326 // the Rt == Rt2. All of those are undefined behaviour.
3327 switch (Inst.getOpcode()) {
3328 case AArch64::LDPSWpre:
3329 case AArch64::LDPWpost:
3330 case AArch64::LDPWpre:
3331 case AArch64::LDPXpost:
3332 case AArch64::LDPXpre: {
3333 unsigned Rt = Inst.getOperand(1).getReg();
3334 unsigned Rt2 = Inst.getOperand(2).getReg();
3335 unsigned Rn = Inst.getOperand(3).getReg();
3336 if (RI->isSubRegisterEq(Rn, Rt))
3337 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3338 "is also a destination");
3339 if (RI->isSubRegisterEq(Rn, Rt2))
3340 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3341 "is also a destination");
3344 case AArch64::LDPDi:
3345 case AArch64::LDPQi:
3346 case AArch64::LDPSi:
3347 case AArch64::LDPSWi:
3348 case AArch64::LDPWi:
3349 case AArch64::LDPXi: {
3350 unsigned Rt = Inst.getOperand(0).getReg();
3351 unsigned Rt2 = Inst.getOperand(1).getReg();
3353 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3356 case AArch64::LDPDpost:
3357 case AArch64::LDPDpre:
3358 case AArch64::LDPQpost:
3359 case AArch64::LDPQpre:
3360 case AArch64::LDPSpost:
3361 case AArch64::LDPSpre:
3362 case AArch64::LDPSWpost: {
3363 unsigned Rt = Inst.getOperand(1).getReg();
3364 unsigned Rt2 = Inst.getOperand(2).getReg();
3366 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3369 case AArch64::STPDpost:
3370 case AArch64::STPDpre:
3371 case AArch64::STPQpost:
3372 case AArch64::STPQpre:
3373 case AArch64::STPSpost:
3374 case AArch64::STPSpre:
3375 case AArch64::STPWpost:
3376 case AArch64::STPWpre:
3377 case AArch64::STPXpost:
3378 case AArch64::STPXpre: {
3379 unsigned Rt = Inst.getOperand(1).getReg();
3380 unsigned Rt2 = Inst.getOperand(2).getReg();
3381 unsigned Rn = Inst.getOperand(3).getReg();
3382 if (RI->isSubRegisterEq(Rn, Rt))
3383 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3384 "is also a source");
3385 if (RI->isSubRegisterEq(Rn, Rt2))
3386 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3387 "is also a source");
3390 case AArch64::LDRBBpre:
3391 case AArch64::LDRBpre:
3392 case AArch64::LDRHHpre:
3393 case AArch64::LDRHpre:
3394 case AArch64::LDRSBWpre:
3395 case AArch64::LDRSBXpre:
3396 case AArch64::LDRSHWpre:
3397 case AArch64::LDRSHXpre:
3398 case AArch64::LDRSWpre:
3399 case AArch64::LDRWpre:
3400 case AArch64::LDRXpre:
3401 case AArch64::LDRBBpost:
3402 case AArch64::LDRBpost:
3403 case AArch64::LDRHHpost:
3404 case AArch64::LDRHpost:
3405 case AArch64::LDRSBWpost:
3406 case AArch64::LDRSBXpost:
3407 case AArch64::LDRSHWpost:
3408 case AArch64::LDRSHXpost:
3409 case AArch64::LDRSWpost:
3410 case AArch64::LDRWpost:
3411 case AArch64::LDRXpost: {
3412 unsigned Rt = Inst.getOperand(1).getReg();
3413 unsigned Rn = Inst.getOperand(2).getReg();
3414 if (RI->isSubRegisterEq(Rn, Rt))
3415 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3416 "is also a source");
3419 case AArch64::STRBBpost:
3420 case AArch64::STRBpost:
3421 case AArch64::STRHHpost:
3422 case AArch64::STRHpost:
3423 case AArch64::STRWpost:
3424 case AArch64::STRXpost:
3425 case AArch64::STRBBpre:
3426 case AArch64::STRBpre:
3427 case AArch64::STRHHpre:
3428 case AArch64::STRHpre:
3429 case AArch64::STRWpre:
3430 case AArch64::STRXpre: {
3431 unsigned Rt = Inst.getOperand(1).getReg();
3432 unsigned Rn = Inst.getOperand(2).getReg();
3433 if (RI->isSubRegisterEq(Rn, Rt))
3434 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3435 "is also a source");
3440 // Now check immediate ranges. Separate from the above as there is overlap
3441 // in the instructions being checked and this keeps the nested conditionals
3443 switch (Inst.getOpcode()) {
3444 case AArch64::ADDSWri:
3445 case AArch64::ADDSXri:
3446 case AArch64::ADDWri:
3447 case AArch64::ADDXri:
3448 case AArch64::SUBSWri:
3449 case AArch64::SUBSXri:
3450 case AArch64::SUBWri:
3451 case AArch64::SUBXri: {
3452 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3453 // some slight duplication here.
3454 if (Inst.getOperand(2).isExpr()) {
3455 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3456 AArch64MCExpr::VariantKind ELFRefKind;
3457 MCSymbolRefExpr::VariantKind DarwinRefKind;
3459 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3460 return Error(Loc[2], "invalid immediate expression");
3463 // Only allow these with ADDXri.
3464 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3465 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3466 Inst.getOpcode() == AArch64::ADDXri)
3469 // Only allow these with ADDXri/ADDWri
3470 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3471 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3472 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3473 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3474 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3475 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3476 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3477 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3478 (Inst.getOpcode() == AArch64::ADDXri ||
3479 Inst.getOpcode() == AArch64::ADDWri))
3482 // Don't allow expressions in the immediate field otherwise
3483 return Error(Loc[2], "invalid immediate expression");
3492 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3494 case Match_MissingFeature:
3496 "instruction requires a CPU feature not currently enabled");
3497 case Match_InvalidOperand:
3498 return Error(Loc, "invalid operand for instruction");
3499 case Match_InvalidSuffix:
3500 return Error(Loc, "invalid type suffix for instruction");
3501 case Match_InvalidCondCode:
3502 return Error(Loc, "expected AArch64 condition code");
3503 case Match_AddSubRegExtendSmall:
3505 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3506 case Match_AddSubRegExtendLarge:
3508 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3509 case Match_AddSubSecondSource:
3511 "expected compatible register, symbol or integer in range [0, 4095]");
3512 case Match_LogicalSecondSource:
3513 return Error(Loc, "expected compatible register or logical immediate");
3514 case Match_InvalidMovImm32Shift:
3515 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3516 case Match_InvalidMovImm64Shift:
3517 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3518 case Match_AddSubRegShift32:
3520 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3521 case Match_AddSubRegShift64:
3523 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3524 case Match_InvalidFPImm:
3526 "expected compatible register or floating-point constant");
3527 case Match_InvalidMemoryIndexedSImm9:
3528 return Error(Loc, "index must be an integer in range [-256, 255].");
3529 case Match_InvalidMemoryIndexed4SImm7:
3530 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3531 case Match_InvalidMemoryIndexed8SImm7:
3532 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3533 case Match_InvalidMemoryIndexed16SImm7:
3534 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3535 case Match_InvalidMemoryWExtend8:
3537 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3538 case Match_InvalidMemoryWExtend16:
3540 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3541 case Match_InvalidMemoryWExtend32:
3543 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3544 case Match_InvalidMemoryWExtend64:
3546 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3547 case Match_InvalidMemoryWExtend128:
3549 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3550 case Match_InvalidMemoryXExtend8:
3552 "expected 'lsl' or 'sxtx' with optional shift of #0");
3553 case Match_InvalidMemoryXExtend16:
3555 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3556 case Match_InvalidMemoryXExtend32:
3558 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3559 case Match_InvalidMemoryXExtend64:
3561 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3562 case Match_InvalidMemoryXExtend128:
3564 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3565 case Match_InvalidMemoryIndexed1:
3566 return Error(Loc, "index must be an integer in range [0, 4095].");
3567 case Match_InvalidMemoryIndexed2:
3568 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3569 case Match_InvalidMemoryIndexed4:
3570 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3571 case Match_InvalidMemoryIndexed8:
3572 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3573 case Match_InvalidMemoryIndexed16:
3574 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3575 case Match_InvalidImm0_7:
3576 return Error(Loc, "immediate must be an integer in range [0, 7].");
3577 case Match_InvalidImm0_15:
3578 return Error(Loc, "immediate must be an integer in range [0, 15].");
3579 case Match_InvalidImm0_31:
3580 return Error(Loc, "immediate must be an integer in range [0, 31].");
3581 case Match_InvalidImm0_63:
3582 return Error(Loc, "immediate must be an integer in range [0, 63].");
3583 case Match_InvalidImm0_127:
3584 return Error(Loc, "immediate must be an integer in range [0, 127].");
3585 case Match_InvalidImm0_65535:
3586 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3587 case Match_InvalidImm1_8:
3588 return Error(Loc, "immediate must be an integer in range [1, 8].");
3589 case Match_InvalidImm1_16:
3590 return Error(Loc, "immediate must be an integer in range [1, 16].");
3591 case Match_InvalidImm1_32:
3592 return Error(Loc, "immediate must be an integer in range [1, 32].");
3593 case Match_InvalidImm1_64:
3594 return Error(Loc, "immediate must be an integer in range [1, 64].");
3595 case Match_InvalidIndex1:
3596 return Error(Loc, "expected lane specifier '[1]'");
3597 case Match_InvalidIndexB:
3598 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3599 case Match_InvalidIndexH:
3600 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3601 case Match_InvalidIndexS:
3602 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3603 case Match_InvalidIndexD:
3604 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3605 case Match_InvalidLabel:
3606 return Error(Loc, "expected label or encodable integer pc offset");
3608 return Error(Loc, "expected readable system register");
3610 return Error(Loc, "expected writable system register or pstate");
3611 case Match_MnemonicFail:
3612 return Error(Loc, "unrecognized instruction mnemonic");
3614 llvm_unreachable("unexpected error code!");
3618 static const char *getSubtargetFeatureName(uint64_t Feature);
3620 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3621 OperandVector &Operands,
3623 uint64_t &ErrorInfo,
3624 FeatureBitset &ErrorMissingFeature,
3625 bool MatchingInlineAsm) {
3626 assert(!Operands.empty() && "Unexpect empty operand list!");
3627 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3628 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3630 StringRef Tok = Op.getToken();
3631 unsigned NumOperands = Operands.size();
3633 if (NumOperands == 4 && Tok == "lsl") {
3634 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3635 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3636 if (Op2.isReg() && Op3.isImm()) {
3637 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3639 uint64_t Op3Val = Op3CE->getValue();
3640 uint64_t NewOp3Val = 0;
3641 uint64_t NewOp4Val = 0;
3642 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3644 NewOp3Val = (32 - Op3Val) & 0x1f;
3645 NewOp4Val = 31 - Op3Val;
3647 NewOp3Val = (64 - Op3Val) & 0x3f;
3648 NewOp4Val = 63 - Op3Val;
3651 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3652 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3654 Operands[0] = AArch64Operand::CreateToken(
3655 "ubfm", false, Op.getStartLoc(), getContext());
3656 Operands.push_back(AArch64Operand::CreateImm(
3657 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3658 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3659 Op3.getEndLoc(), getContext());
3662 } else if (NumOperands == 4 && Tok == "bfc") {
3663 // FIXME: Horrible hack to handle BFC->BFM alias.
3664 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3665 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3666 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3668 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3669 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3670 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3672 if (LSBCE && WidthCE) {
3673 uint64_t LSB = LSBCE->getValue();
3674 uint64_t Width = WidthCE->getValue();
3676 uint64_t RegWidth = 0;
3677 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3683 if (LSB >= RegWidth)
3684 return Error(LSBOp.getStartLoc(),
3685 "expected integer in range [0, 31]");
3686 if (Width < 1 || Width > RegWidth)
3687 return Error(WidthOp.getStartLoc(),
3688 "expected integer in range [1, 32]");
3692 ImmR = (32 - LSB) & 0x1f;
3694 ImmR = (64 - LSB) & 0x3f;
3696 uint64_t ImmS = Width - 1;
3698 if (ImmR != 0 && ImmS >= ImmR)
3699 return Error(WidthOp.getStartLoc(),
3700 "requested insert overflows register");
3702 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3703 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3704 Operands[0] = AArch64Operand::CreateToken(
3705 "bfm", false, Op.getStartLoc(), getContext());
3706 Operands[2] = AArch64Operand::CreateReg(
3707 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3708 SMLoc(), getContext());
3709 Operands[3] = AArch64Operand::CreateImm(
3710 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3711 Operands.emplace_back(
3712 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3713 WidthOp.getEndLoc(), getContext()));
3716 } else if (NumOperands == 5) {
3717 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3718 // UBFIZ -> UBFM aliases.
3719 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3720 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3721 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3722 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3724 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3725 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3726 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3728 if (Op3CE && Op4CE) {
3729 uint64_t Op3Val = Op3CE->getValue();
3730 uint64_t Op4Val = Op4CE->getValue();
3732 uint64_t RegWidth = 0;
3733 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3739 if (Op3Val >= RegWidth)
3740 return Error(Op3.getStartLoc(),
3741 "expected integer in range [0, 31]");
3742 if (Op4Val < 1 || Op4Val > RegWidth)
3743 return Error(Op4.getStartLoc(),
3744 "expected integer in range [1, 32]");
3746 uint64_t NewOp3Val = 0;
3748 NewOp3Val = (32 - Op3Val) & 0x1f;
3750 NewOp3Val = (64 - Op3Val) & 0x3f;
3752 uint64_t NewOp4Val = Op4Val - 1;
3754 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3755 return Error(Op4.getStartLoc(),
3756 "requested insert overflows register");
3758 const MCExpr *NewOp3 =
3759 MCConstantExpr::create(NewOp3Val, getContext());
3760 const MCExpr *NewOp4 =
3761 MCConstantExpr::create(NewOp4Val, getContext());
3762 Operands[3] = AArch64Operand::CreateImm(
3763 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3764 Operands[4] = AArch64Operand::CreateImm(
3765 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3767 Operands[0] = AArch64Operand::CreateToken(
3768 "bfm", false, Op.getStartLoc(), getContext());
3769 else if (Tok == "sbfiz")
3770 Operands[0] = AArch64Operand::CreateToken(
3771 "sbfm", false, Op.getStartLoc(), getContext());
3772 else if (Tok == "ubfiz")
3773 Operands[0] = AArch64Operand::CreateToken(
3774 "ubfm", false, Op.getStartLoc(), getContext());
3776 llvm_unreachable("No valid mnemonic for alias?");
3780 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3781 // UBFX -> UBFM aliases.
3782 } else if (NumOperands == 5 &&
3783 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3784 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3785 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3786 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3788 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3789 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3790 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3792 if (Op3CE && Op4CE) {
3793 uint64_t Op3Val = Op3CE->getValue();
3794 uint64_t Op4Val = Op4CE->getValue();
3796 uint64_t RegWidth = 0;
3797 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3803 if (Op3Val >= RegWidth)
3804 return Error(Op3.getStartLoc(),
3805 "expected integer in range [0, 31]");
3806 if (Op4Val < 1 || Op4Val > RegWidth)
3807 return Error(Op4.getStartLoc(),
3808 "expected integer in range [1, 32]");
3810 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3812 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3813 return Error(Op4.getStartLoc(),
3814 "requested extract overflows register");
3816 const MCExpr *NewOp4 =
3817 MCConstantExpr::create(NewOp4Val, getContext());
3818 Operands[4] = AArch64Operand::CreateImm(
3819 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3821 Operands[0] = AArch64Operand::CreateToken(
3822 "bfm", false, Op.getStartLoc(), getContext());
3823 else if (Tok == "sbfx")
3824 Operands[0] = AArch64Operand::CreateToken(
3825 "sbfm", false, Op.getStartLoc(), getContext());
3826 else if (Tok == "ubfx")
3827 Operands[0] = AArch64Operand::CreateToken(
3828 "ubfm", false, Op.getStartLoc(), getContext());
3830 llvm_unreachable("No valid mnemonic for alias?");
3835 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3836 // InstAlias can't quite handle this since the reg classes aren't
3838 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3839 // The source register can be Wn here, but the matcher expects a
3840 // GPR64. Twiddle it here if necessary.
3841 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3843 unsigned Reg = getXRegFromWReg(Op.getReg());
3844 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3845 Op.getEndLoc(), getContext());
3848 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3849 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3850 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3852 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3854 // The source register can be Wn here, but the matcher expects a
3855 // GPR64. Twiddle it here if necessary.
3856 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3858 unsigned Reg = getXRegFromWReg(Op.getReg());
3859 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3860 Op.getEndLoc(), getContext());
3864 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3865 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3866 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3868 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3870 // The source register can be Wn here, but the matcher expects a
3871 // GPR32. Twiddle it here if necessary.
3872 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3874 unsigned Reg = getWRegFromXReg(Op.getReg());
3875 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3876 Op.getEndLoc(), getContext());
3881 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3882 if (NumOperands == 3 && Tok == "fmov") {
3883 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3884 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3885 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3887 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3891 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3892 Op.getEndLoc(), getContext());
3897 // First try to match against the secondary set of tables containing the
3898 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3899 unsigned MatchResult =
3900 MatchInstructionImpl(Operands, Inst, ErrorInfo, ErrorMissingFeature, MatchingInlineAsm, 1);
3902 // If that fails, try against the alternate table containing long-form NEON:
3903 // "fadd v0.2s, v1.2s, v2.2s"
3904 if (MatchResult != Match_Success)
3906 MatchInstructionImpl(Operands, Inst, ErrorInfo, ErrorMissingFeature, MatchingInlineAsm, 0);
3908 switch (MatchResult) {
3909 case Match_Success: {
3910 // Perform range checking and other semantic validations
3911 SmallVector<SMLoc, 8> OperandLocs;
3912 NumOperands = Operands.size();
3913 for (unsigned i = 1; i < NumOperands; ++i)
3914 OperandLocs.push_back(Operands[i]->getStartLoc());
3915 if (validateInstruction(Inst, OperandLocs))
3919 Out.EmitInstruction(Inst, STI);
3922 case Match_MissingFeature: {
3923 assert(ErrorMissingFeature.any() && "Unknown missing feature!");
3924 // Special case the error message for the very common case where only
3925 // a single subtarget feature is missing (neon, e.g.).
3926 std::string Msg = "instruction requires:";
3927 for (unsigned i = 0; i < ErrorMissingFeature.size(); ++i) {
3928 if (ErrorMissingFeature[i]) {
3930 Msg += getSubtargetFeatureName(i);
3933 return Error(IDLoc, Msg);
3935 case Match_MnemonicFail:
3936 return showMatchError(IDLoc, MatchResult);
3937 case Match_InvalidOperand: {
3938 SMLoc ErrorLoc = IDLoc;
3939 if (ErrorInfo != ~0ULL) {
3940 if (ErrorInfo >= Operands.size())
3941 return Error(IDLoc, "too few operands for instruction");
3943 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3944 if (ErrorLoc == SMLoc())
3947 // If the match failed on a suffix token operand, tweak the diagnostic
3949 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3950 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3951 MatchResult = Match_InvalidSuffix;
3953 return showMatchError(ErrorLoc, MatchResult);
3955 case Match_InvalidMemoryIndexed1:
3956 case Match_InvalidMemoryIndexed2:
3957 case Match_InvalidMemoryIndexed4:
3958 case Match_InvalidMemoryIndexed8:
3959 case Match_InvalidMemoryIndexed16:
3960 case Match_InvalidCondCode:
3961 case Match_AddSubRegExtendSmall:
3962 case Match_AddSubRegExtendLarge:
3963 case Match_AddSubSecondSource:
3964 case Match_LogicalSecondSource:
3965 case Match_AddSubRegShift32:
3966 case Match_AddSubRegShift64:
3967 case Match_InvalidMovImm32Shift:
3968 case Match_InvalidMovImm64Shift:
3969 case Match_InvalidFPImm:
3970 case Match_InvalidMemoryWExtend8:
3971 case Match_InvalidMemoryWExtend16:
3972 case Match_InvalidMemoryWExtend32:
3973 case Match_InvalidMemoryWExtend64:
3974 case Match_InvalidMemoryWExtend128:
3975 case Match_InvalidMemoryXExtend8:
3976 case Match_InvalidMemoryXExtend16:
3977 case Match_InvalidMemoryXExtend32:
3978 case Match_InvalidMemoryXExtend64:
3979 case Match_InvalidMemoryXExtend128:
3980 case Match_InvalidMemoryIndexed4SImm7:
3981 case Match_InvalidMemoryIndexed8SImm7:
3982 case Match_InvalidMemoryIndexed16SImm7:
3983 case Match_InvalidMemoryIndexedSImm9:
3984 case Match_InvalidImm0_7:
3985 case Match_InvalidImm0_15:
3986 case Match_InvalidImm0_31:
3987 case Match_InvalidImm0_63:
3988 case Match_InvalidImm0_127:
3989 case Match_InvalidImm0_65535:
3990 case Match_InvalidImm1_8:
3991 case Match_InvalidImm1_16:
3992 case Match_InvalidImm1_32:
3993 case Match_InvalidImm1_64:
3994 case Match_InvalidIndex1:
3995 case Match_InvalidIndexB:
3996 case Match_InvalidIndexH:
3997 case Match_InvalidIndexS:
3998 case Match_InvalidIndexD:
3999 case Match_InvalidLabel:
4002 if (ErrorInfo >= Operands.size())
4003 return Error(IDLoc, "too few operands for instruction");
4004 // Any time we get here, there's nothing fancy to do. Just get the
4005 // operand SMLoc and display the diagnostic.
4006 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4007 if (ErrorLoc == SMLoc())
4009 return showMatchError(ErrorLoc, MatchResult);
4013 llvm_unreachable("Implement any new match types added!");
4016 /// ParseDirective parses the arm specific directives
4017 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4018 const MCObjectFileInfo::Environment Format =
4019 getContext().getObjectFileInfo()->getObjectFileType();
4020 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4021 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4023 StringRef IDVal = DirectiveID.getIdentifier();
4024 SMLoc Loc = DirectiveID.getLoc();
4025 if (IDVal == ".hword")
4026 return parseDirectiveWord(2, Loc);
4027 if (IDVal == ".word")
4028 return parseDirectiveWord(4, Loc);
4029 if (IDVal == ".xword")
4030 return parseDirectiveWord(8, Loc);
4031 if (IDVal == ".tlsdesccall")
4032 return parseDirectiveTLSDescCall(Loc);
4033 if (IDVal == ".ltorg" || IDVal == ".pool")
4034 return parseDirectiveLtorg(Loc);
4035 if (IDVal == ".unreq")
4036 return parseDirectiveUnreq(Loc);
4038 if (!IsMachO && !IsCOFF) {
4039 if (IDVal == ".inst")
4040 return parseDirectiveInst(Loc);
4043 return parseDirectiveLOH(IDVal, Loc);
4046 /// parseDirectiveWord
4047 /// ::= .word [ expression (, expression)* ]
4048 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4049 MCAsmParser &Parser = getParser();
4050 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4052 const MCExpr *Value;
4053 if (getParser().parseExpression(Value))
4056 getParser().getStreamer().EmitValue(Value, Size);
4058 if (getLexer().is(AsmToken::EndOfStatement))
4061 // FIXME: Improve diagnostic.
4062 if (getLexer().isNot(AsmToken::Comma))
4063 return Error(L, "unexpected token in directive");
4072 /// parseDirectiveInst
4073 /// ::= .inst opcode [, ...]
4074 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4075 MCAsmParser &Parser = getParser();
4076 if (getLexer().is(AsmToken::EndOfStatement)) {
4077 Parser.eatToEndOfStatement();
4078 Error(Loc, "expected expression following directive");
4085 if (getParser().parseExpression(Expr)) {
4086 Error(Loc, "expected expression");
4090 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4092 Error(Loc, "expected constant expression");
4096 getTargetStreamer().emitInst(Value->getValue());
4098 if (getLexer().is(AsmToken::EndOfStatement))
4101 if (getLexer().isNot(AsmToken::Comma)) {
4102 Error(Loc, "unexpected token in directive");
4106 Parser.Lex(); // Eat comma.
4113 // parseDirectiveTLSDescCall:
4114 // ::= .tlsdesccall symbol
4115 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4117 if (getParser().parseIdentifier(Name))
4118 return Error(L, "expected symbol after directive");
4120 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4121 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4122 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4125 Inst.setOpcode(AArch64::TLSDESCCALL);
4126 Inst.addOperand(MCOperand::createExpr(Expr));
4128 getParser().getStreamer().EmitInstruction(Inst, STI);
4132 /// ::= .loh <lohName | lohId> label1, ..., labelN
4133 /// The number of arguments depends on the loh identifier.
4134 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4135 if (IDVal != MCLOHDirectiveName())
4138 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4139 if (getParser().getTok().isNot(AsmToken::Integer))
4140 return TokError("expected an identifier or a number in directive");
4141 // We successfully get a numeric value for the identifier.
4142 // Check if it is valid.
4143 int64_t Id = getParser().getTok().getIntVal();
4144 if (Id <= -1U && !isValidMCLOHType(Id))
4145 return TokError("invalid numeric identifier in directive");
4146 Kind = (MCLOHType)Id;
4148 StringRef Name = getTok().getIdentifier();
4149 // We successfully parse an identifier.
4150 // Check if it is a recognized one.
4151 int Id = MCLOHNameToId(Name);
4154 return TokError("invalid identifier in directive");
4155 Kind = (MCLOHType)Id;
4157 // Consume the identifier.
4159 // Get the number of arguments of this LOH.
4160 int NbArgs = MCLOHIdToNbArgs(Kind);
4162 assert(NbArgs != -1 && "Invalid number of arguments");
4164 SmallVector<MCSymbol *, 3> Args;
4165 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4167 if (getParser().parseIdentifier(Name))
4168 return TokError("expected identifier in directive");
4169 Args.push_back(getContext().getOrCreateSymbol(Name));
4171 if (Idx + 1 == NbArgs)
4173 if (getLexer().isNot(AsmToken::Comma))
4174 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4177 if (getLexer().isNot(AsmToken::EndOfStatement))
4178 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4180 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4184 /// parseDirectiveLtorg
4185 /// ::= .ltorg | .pool
4186 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4187 getTargetStreamer().emitCurrentConstantPool();
4191 /// parseDirectiveReq
4192 /// ::= name .req registername
4193 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4194 MCAsmParser &Parser = getParser();
4195 Parser.Lex(); // Eat the '.req' token.
4196 SMLoc SRegLoc = getLoc();
4197 unsigned RegNum = tryParseRegister();
4198 bool IsVector = false;
4200 if (RegNum == static_cast<unsigned>(-1)) {
4202 RegNum = tryMatchVectorRegister(Kind, false);
4203 if (!Kind.empty()) {
4204 Error(SRegLoc, "vector register without type specifier expected");
4210 if (RegNum == static_cast<unsigned>(-1)) {
4211 Parser.eatToEndOfStatement();
4212 Error(SRegLoc, "register name or alias expected");
4216 // Shouldn't be anything else.
4217 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4218 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4219 Parser.eatToEndOfStatement();
4223 Parser.Lex(); // Consume the EndOfStatement
4225 auto pair = std::make_pair(IsVector, RegNum);
4226 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4227 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4232 /// parseDirectiveUneq
4233 /// ::= .unreq registername
4234 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4235 MCAsmParser &Parser = getParser();
4236 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4237 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4238 Parser.eatToEndOfStatement();
4241 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4242 Parser.Lex(); // Eat the identifier.
4247 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4248 AArch64MCExpr::VariantKind &ELFRefKind,
4249 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4251 ELFRefKind = AArch64MCExpr::VK_INVALID;
4252 DarwinRefKind = MCSymbolRefExpr::VK_None;
4255 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4256 ELFRefKind = AE->getKind();
4257 Expr = AE->getSubExpr();
4260 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4262 // It's a simple symbol reference with no addend.
4263 DarwinRefKind = SE->getKind();
4267 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4271 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4274 DarwinRefKind = SE->getKind();
4276 if (BE->getOpcode() != MCBinaryExpr::Add &&
4277 BE->getOpcode() != MCBinaryExpr::Sub)
4280 // See if the addend is is a constant, otherwise there's more going
4281 // on here than we can deal with.
4282 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4286 Addend = AddendExpr->getValue();
4287 if (BE->getOpcode() == MCBinaryExpr::Sub)
4290 // It's some symbol reference + a constant addend, but really
4291 // shouldn't use both Darwin and ELF syntax.
4292 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4293 DarwinRefKind == MCSymbolRefExpr::VK_None;
4296 /// Force static initialization.
4297 extern "C" void LLVMInitializeAArch64AsmParser() {
4298 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4299 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4300 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4303 #define GET_REGISTER_MATCHER
4304 #define GET_SUBTARGET_FEATURE_NAME
4305 #define GET_MATCHER_IMPLEMENTATION
4306 #include "AArch64GenAsmMatcher.inc"
4308 // Define this matcher function after the auto-generated include so we
4309 // have the match class enum definitions.
4310 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4312 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4313 // If the kind is a token for a literal immediate, check if our asm
4314 // operand matches. This is for InstAliases which have a fixed-value
4315 // immediate in the syntax.
4316 int64_t ExpectedVal;
4319 return Match_InvalidOperand;
4361 return Match_InvalidOperand;
4362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4364 return Match_InvalidOperand;
4365 if (CE->getValue() == ExpectedVal)
4366 return Match_Success;
4367 return Match_InvalidOperand;
4371 AArch64AsmParser::OperandMatchResultTy
4372 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4376 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4377 Error(S, "expected register");
4378 return MatchOperand_ParseFail;
4381 int FirstReg = tryParseRegister();
4382 if (FirstReg == -1) {
4383 return MatchOperand_ParseFail;
4385 const MCRegisterClass &WRegClass =
4386 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4387 const MCRegisterClass &XRegClass =
4388 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4390 bool isXReg = XRegClass.contains(FirstReg),
4391 isWReg = WRegClass.contains(FirstReg);
4392 if (!isXReg && !isWReg) {
4393 Error(S, "expected first even register of a "
4394 "consecutive same-size even/odd register pair");
4395 return MatchOperand_ParseFail;
4398 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4399 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4401 if (FirstEncoding & 0x1) {
4402 Error(S, "expected first even register of a "
4403 "consecutive same-size even/odd register pair");
4404 return MatchOperand_ParseFail;
4408 if (getParser().getTok().isNot(AsmToken::Comma)) {
4409 Error(M, "expected comma");
4410 return MatchOperand_ParseFail;
4416 int SecondReg = tryParseRegister();
4417 if (SecondReg ==-1) {
4418 return MatchOperand_ParseFail;
4421 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4422 (isXReg && !XRegClass.contains(SecondReg)) ||
4423 (isWReg && !WRegClass.contains(SecondReg))) {
4424 Error(E,"expected second odd register of a "
4425 "consecutive same-size even/odd register pair");
4426 return MatchOperand_ParseFail;
4431 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4432 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4434 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4435 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4438 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4441 return MatchOperand_Success;