1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
216 uint32_t PStateField;
229 struct ShiftExtendOp {
230 AArch64_AM::ShiftExtendType Type;
232 bool HasExplicitAmount;
242 struct VectorListOp VectorList;
243 struct VectorIndexOp VectorIndex;
245 struct ShiftedImmOp ShiftedImm;
246 struct CondCodeOp CondCode;
247 struct FPImmOp FPImm;
248 struct BarrierOp Barrier;
249 struct SysRegOp SysReg;
250 struct SysCRImmOp SysCRImm;
251 struct PrefetchOp Prefetch;
252 struct ShiftExtendOp ShiftExtend;
255 // Keep the MCContext around as the MCExprs may need manipulated during
256 // the add<>Operands() calls.
260 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
262 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
264 StartLoc = o.StartLoc;
274 ShiftedImm = o.ShiftedImm;
277 CondCode = o.CondCode;
289 VectorList = o.VectorList;
292 VectorIndex = o.VectorIndex;
298 SysCRImm = o.SysCRImm;
301 Prefetch = o.Prefetch;
304 ShiftExtend = o.ShiftExtend;
309 /// getStartLoc - Get the location of the first token of this operand.
310 SMLoc getStartLoc() const override { return StartLoc; }
311 /// getEndLoc - Get the location of the last token of this operand.
312 SMLoc getEndLoc() const override { return EndLoc; }
314 StringRef getToken() const {
315 assert(Kind == k_Token && "Invalid access!");
316 return StringRef(Tok.Data, Tok.Length);
319 bool isTokenSuffix() const {
320 assert(Kind == k_Token && "Invalid access!");
324 const MCExpr *getImm() const {
325 assert(Kind == k_Immediate && "Invalid access!");
329 const MCExpr *getShiftedImmVal() const {
330 assert(Kind == k_ShiftedImm && "Invalid access!");
331 return ShiftedImm.Val;
334 unsigned getShiftedImmShift() const {
335 assert(Kind == k_ShiftedImm && "Invalid access!");
336 return ShiftedImm.ShiftAmount;
339 AArch64CC::CondCode getCondCode() const {
340 assert(Kind == k_CondCode && "Invalid access!");
341 return CondCode.Code;
344 unsigned getFPImm() const {
345 assert(Kind == k_FPImm && "Invalid access!");
349 unsigned getBarrier() const {
350 assert(Kind == k_Barrier && "Invalid access!");
354 StringRef getBarrierName() const {
355 assert(Kind == k_Barrier && "Invalid access!");
356 return StringRef(Barrier.Data, Barrier.Length);
359 unsigned getReg() const override {
360 assert(Kind == k_Register && "Invalid access!");
364 unsigned getVectorListStart() const {
365 assert(Kind == k_VectorList && "Invalid access!");
366 return VectorList.RegNum;
369 unsigned getVectorListCount() const {
370 assert(Kind == k_VectorList && "Invalid access!");
371 return VectorList.Count;
374 unsigned getVectorIndex() const {
375 assert(Kind == k_VectorIndex && "Invalid access!");
376 return VectorIndex.Val;
379 StringRef getSysReg() const {
380 assert(Kind == k_SysReg && "Invalid access!");
381 return StringRef(SysReg.Data, SysReg.Length);
384 unsigned getSysCR() const {
385 assert(Kind == k_SysCR && "Invalid access!");
389 unsigned getPrefetch() const {
390 assert(Kind == k_Prefetch && "Invalid access!");
394 StringRef getPrefetchName() const {
395 assert(Kind == k_Prefetch && "Invalid access!");
396 return StringRef(Prefetch.Data, Prefetch.Length);
399 AArch64_AM::ShiftExtendType getShiftExtendType() const {
400 assert(Kind == k_ShiftExtend && "Invalid access!");
401 return ShiftExtend.Type;
404 unsigned getShiftExtendAmount() const {
405 assert(Kind == k_ShiftExtend && "Invalid access!");
406 return ShiftExtend.Amount;
409 bool hasShiftExtendAmount() const {
410 assert(Kind == k_ShiftExtend && "Invalid access!");
411 return ShiftExtend.HasExplicitAmount;
414 bool isImm() const override { return Kind == k_Immediate; }
415 bool isMem() const override { return false; }
416 bool isSImm9() const {
419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 int64_t Val = MCE->getValue();
423 return (Val >= -256 && Val < 256);
425 bool isSImm7s4() const {
428 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 int64_t Val = MCE->getValue();
432 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
434 bool isSImm7s8() const {
437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 int64_t Val = MCE->getValue();
441 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
443 bool isSImm7s16() const {
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
449 int64_t Val = MCE->getValue();
450 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
453 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
454 AArch64MCExpr::VariantKind ELFRefKind;
455 MCSymbolRefExpr::VariantKind DarwinRefKind;
457 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
459 // If we don't understand the expression, assume the best and
460 // let the fixup and relocation code deal with it.
464 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
465 ELFRefKind == AArch64MCExpr::VK_LO12 ||
466 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
469 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
473 // Note that we don't range-check the addend. It's adjusted modulo page
474 // size when converted, so there is no "out of range" condition when using
476 return Addend >= 0 && (Addend % Scale) == 0;
477 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
478 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
479 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
486 template <int Scale> bool isUImm12Offset() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 return isSymbolicUImm12Offset(getImm(), Scale);
494 int64_t Val = MCE->getValue();
495 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
498 bool isImm0_7() const {
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 8);
507 bool isImm1_8() const {
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 int64_t Val = MCE->getValue();
514 return (Val > 0 && Val < 9);
516 bool isImm0_15() const {
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 int64_t Val = MCE->getValue();
523 return (Val >= 0 && Val < 16);
525 bool isImm1_16() const {
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 int64_t Val = MCE->getValue();
532 return (Val > 0 && Val < 17);
534 bool isImm0_31() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 32);
543 bool isImm1_31() const {
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 int64_t Val = MCE->getValue();
550 return (Val >= 1 && Val < 32);
552 bool isImm1_32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 int64_t Val = MCE->getValue();
559 return (Val >= 1 && Val < 33);
561 bool isImm0_63() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
568 return (Val >= 0 && Val < 64);
570 bool isImm1_63() const {
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 int64_t Val = MCE->getValue();
577 return (Val >= 1 && Val < 64);
579 bool isImm1_64() const {
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 65);
588 bool isImm0_127() const {
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 int64_t Val = MCE->getValue();
595 return (Val >= 0 && Val < 128);
597 bool isImm0_255() const {
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 256);
606 bool isImm0_65535() const {
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 int64_t Val = MCE->getValue();
613 return (Val >= 0 && Val < 65536);
615 bool isImm32_63() const {
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 int64_t Val = MCE->getValue();
622 return (Val >= 32 && Val < 64);
624 bool isLogicalImm32() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 int64_t Val = MCE->getValue();
631 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
634 return AArch64_AM::isLogicalImmediate(Val, 32);
636 bool isLogicalImm64() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
644 bool isLogicalImm32Not() const {
647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
651 return AArch64_AM::isLogicalImmediate(Val, 32);
653 bool isLogicalImm64Not() const {
656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
661 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
662 bool isAddSubImm() const {
663 if (!isShiftedImm() && !isImm())
668 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
669 if (isShiftedImm()) {
670 unsigned Shift = ShiftedImm.ShiftAmount;
671 Expr = ShiftedImm.Val;
672 if (Shift != 0 && Shift != 12)
678 AArch64MCExpr::VariantKind ELFRefKind;
679 MCSymbolRefExpr::VariantKind DarwinRefKind;
681 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
682 DarwinRefKind, Addend)) {
683 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
684 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
685 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
686 || ELFRefKind == AArch64MCExpr::VK_LO12
687 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
690 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
693 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
696 // Otherwise it should be a real immediate in range:
697 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
698 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
700 bool isCondCode() const { return Kind == k_CondCode; }
701 bool isSIMDImmType10() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
709 bool isBranchTarget26() const {
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
715 int64_t Val = MCE->getValue();
718 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
720 bool isPCRelLabel19() const {
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 int64_t Val = MCE->getValue();
729 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
731 bool isBranchTarget14() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 int64_t Val = MCE->getValue();
740 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
744 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
748 AArch64MCExpr::VariantKind ELFRefKind;
749 MCSymbolRefExpr::VariantKind DarwinRefKind;
751 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
752 DarwinRefKind, Addend)) {
755 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
758 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
759 if (ELFRefKind == AllowedModifiers[i])
766 bool isMovZSymbolG3() const {
767 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
770 bool isMovZSymbolG2() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
772 AArch64MCExpr::VK_TPREL_G2,
773 AArch64MCExpr::VK_DTPREL_G2});
776 bool isMovZSymbolG1() const {
777 return isMovWSymbol({
778 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
779 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
780 AArch64MCExpr::VK_DTPREL_G1,
784 bool isMovZSymbolG0() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
786 AArch64MCExpr::VK_TPREL_G0,
787 AArch64MCExpr::VK_DTPREL_G0});
790 bool isMovKSymbolG3() const {
791 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
794 bool isMovKSymbolG2() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
798 bool isMovKSymbolG1() const {
799 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
800 AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC});
804 bool isMovKSymbolG0() const {
806 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
807 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
810 template<int RegWidth, int Shift>
811 bool isMOVZMovAlias() const {
812 if (!isImm()) return false;
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
819 Value &= 0xffffffffULL;
821 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
822 if (Value == 0 && Shift != 0)
825 return (Value & ~(0xffffULL << Shift)) == 0;
828 template<int RegWidth, int Shift>
829 bool isMOVNMovAlias() const {
830 if (!isImm()) return false;
832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833 if (!CE) return false;
834 uint64_t Value = CE->getValue();
836 // MOVZ takes precedence over MOVN.
837 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
838 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 Value &= 0xffffffffULL;
845 return (Value & ~(0xffffULL << Shift)) == 0;
848 bool isFPImm() const { return Kind == k_FPImm; }
849 bool isBarrier() const { return Kind == k_Barrier; }
850 bool isSysReg() const { return Kind == k_SysReg; }
851 bool isMRSSystemRegister() const {
852 if (!isSysReg()) return false;
854 return SysReg.MRSReg != -1U;
856 bool isMSRSystemRegister() const {
857 if (!isSysReg()) return false;
859 return SysReg.MSRReg != -1U;
861 bool isSystemPStateField() const {
862 if (!isSysReg()) return false;
864 return SysReg.PStateField != -1U;
866 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
867 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
868 bool isVectorRegLo() const {
869 return Kind == k_Register && Reg.isVector &&
870 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
873 bool isGPR32as64() const {
874 return Kind == k_Register && !Reg.isVector &&
875 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
878 bool isGPR64sp0() const {
879 return Kind == k_Register && !Reg.isVector &&
880 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
883 /// Is this a vector list with the type implicit (presumably attached to the
884 /// instruction itself)?
885 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
886 return Kind == k_VectorList && VectorList.Count == NumRegs &&
887 !VectorList.ElementKind;
890 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
891 bool isTypedVectorList() const {
892 if (Kind != k_VectorList)
894 if (VectorList.Count != NumRegs)
896 if (VectorList.ElementKind != ElementKind)
898 return VectorList.NumElements == NumElements;
901 bool isVectorIndex1() const {
902 return Kind == k_VectorIndex && VectorIndex.Val == 1;
904 bool isVectorIndexB() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 16;
907 bool isVectorIndexH() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 8;
910 bool isVectorIndexS() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 4;
913 bool isVectorIndexD() const {
914 return Kind == k_VectorIndex && VectorIndex.Val < 2;
916 bool isToken() const override { return Kind == k_Token; }
917 bool isTokenEqual(StringRef Str) const {
918 return Kind == k_Token && getToken() == Str;
920 bool isSysCR() const { return Kind == k_SysCR; }
921 bool isPrefetch() const { return Kind == k_Prefetch; }
922 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
923 bool isShifter() const {
924 if (!isShiftExtend())
927 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
928 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
929 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
930 ST == AArch64_AM::MSL);
932 bool isExtend() const {
933 if (!isShiftExtend())
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
938 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
939 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
940 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
941 ET == AArch64_AM::LSL) &&
942 getShiftExtendAmount() <= 4;
945 bool isExtend64() const {
948 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
949 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
950 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
952 bool isExtendLSL64() const {
955 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
957 ET == AArch64_AM::LSL) &&
958 getShiftExtendAmount() <= 4;
961 template<int Width> bool isMemXExtend() const {
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
966 (getShiftExtendAmount() == Log2_32(Width / 8) ||
967 getShiftExtendAmount() == 0);
970 template<int Width> bool isMemWExtend() const {
973 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
974 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
975 (getShiftExtendAmount() == Log2_32(Width / 8) ||
976 getShiftExtendAmount() == 0);
979 template <unsigned width>
980 bool isArithmeticShifter() const {
984 // An arithmetic shifter is LSL, LSR, or ASR.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
987 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
990 template <unsigned width>
991 bool isLogicalShifter() const {
995 // A logical shifter is LSL, LSR, ASR or ROR.
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
999 getShiftExtendAmount() < width;
1002 bool isMovImm32Shifter() const {
1006 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16);
1014 bool isMovImm64Shifter() const {
1018 // A MOVi shifter is LSL of 0 or 16.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1026 bool isLogicalVecShifter() const {
1030 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1031 unsigned Shift = getShiftExtendAmount();
1032 return getShiftExtendType() == AArch64_AM::LSL &&
1033 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1036 bool isLogicalVecHalfWordShifter() const {
1037 if (!isLogicalVecShifter())
1040 // A logical vector shifter is a left shift by 0 or 8.
1041 unsigned Shift = getShiftExtendAmount();
1042 return getShiftExtendType() == AArch64_AM::LSL &&
1043 (Shift == 0 || Shift == 8);
1046 bool isMoveVecShifter() const {
1047 if (!isShiftExtend())
1050 // A logical vector shifter is a left shift by 8 or 16.
1051 unsigned Shift = getShiftExtendAmount();
1052 return getShiftExtendType() == AArch64_AM::MSL &&
1053 (Shift == 8 || Shift == 16);
1056 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1057 // to LDUR/STUR when the offset is not legal for the former but is for
1058 // the latter. As such, in addition to checking for being a legal unscaled
1059 // address, also check that it is not a legal scaled address. This avoids
1060 // ambiguity in the matcher.
1062 bool isSImm9OffsetFB() const {
1063 return isSImm9() && !isUImm12Offset<Width / 8>();
1066 bool isAdrpLabel() const {
1067 // Validation was handled during parsing, so we just sanity check that
1068 // something didn't go haywire.
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073 int64_t Val = CE->getValue();
1074 int64_t Min = - (4096 * (1LL << (21 - 1)));
1075 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1076 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1082 bool isAdrLabel() const {
1083 // Validation was handled during parsing, so we just sanity check that
1084 // something didn't go haywire.
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Min = - (1LL << (21 - 1));
1091 int64_t Max = ((1LL << (21 - 1)) - 1);
1092 return Val >= Min && Val <= Max;
1098 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1099 // Add as immediates when possible. Null MCExpr = 0.
1101 Inst.addOperand(MCOperand::CreateImm(0));
1102 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1103 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1105 Inst.addOperand(MCOperand::CreateExpr(Expr));
1108 void addRegOperands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 Inst.addOperand(MCOperand::CreateReg(getReg()));
1113 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1118 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1119 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1120 RI->getEncodingValue(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(Reg));
1125 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1132 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1135 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(getReg()));
1139 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::CreateReg(getReg()));
1144 template <unsigned NumRegs>
1145 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1148 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1152 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1155 template <unsigned NumRegs>
1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1159 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1160 unsigned FirstReg = FirstRegs[NumRegs - 1];
1163 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1171 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1176 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1181 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1186 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1191 void addImmOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 // If this is a pageoff symrefexpr with an addend, adjust the addend
1194 // to be only the page-offset portion. Otherwise, just add the expr
1196 addExpr(Inst, getImm());
1199 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 2 && "Invalid number of operands!");
1201 if (isShiftedImm()) {
1202 addExpr(Inst, getShiftedImmVal());
1203 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1205 addExpr(Inst, getImm());
1206 Inst.addOperand(MCOperand::CreateImm(0));
1210 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1215 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1224 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1225 addImmOperands(Inst, N);
1229 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1240 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1246 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1252 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1264 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1270 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1276 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1301 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1307 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1313 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1319 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1325 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1343 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1349 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1354 Inst.addOperand(MCOperand::CreateImm(encoding));
1357 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1361 Inst.addOperand(MCOperand::CreateImm(encoding));
1364 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1372 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::CreateImm(encoding));
1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1387 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 addExpr(Inst, getImm());
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1401 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1415 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1429 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1434 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1439 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1442 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1448 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1451 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1454 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1457 void addSysCROperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1459 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1462 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1467 void addShifterOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1470 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1471 Inst.addOperand(MCOperand::CreateImm(Imm));
1474 void addExtendOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1477 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1478 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::CreateImm(Imm));
1482 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::CreateImm(Imm));
1490 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1495 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1498 // For 8-bit load/store instructions with a register offset, both the
1499 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1500 // they're disambiguated by whether the shift was explicit or implicit rather
1502 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1507 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1511 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1514 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1515 uint64_t Value = CE->getValue();
1516 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1520 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1524 uint64_t Value = CE->getValue();
1525 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1528 void print(raw_ostream &OS) const override;
1530 static std::unique_ptr<AArch64Operand>
1531 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1532 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1533 Op->Tok.Data = Str.data();
1534 Op->Tok.Length = Str.size();
1535 Op->Tok.IsSuffix = IsSuffix;
1541 static std::unique_ptr<AArch64Operand>
1542 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1543 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1544 Op->Reg.RegNum = RegNum;
1545 Op->Reg.isVector = isVector;
1551 static std::unique_ptr<AArch64Operand>
1552 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1553 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1555 Op->VectorList.RegNum = RegNum;
1556 Op->VectorList.Count = Count;
1557 Op->VectorList.NumElements = NumElements;
1558 Op->VectorList.ElementKind = ElementKind;
1564 static std::unique_ptr<AArch64Operand>
1565 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1567 Op->VectorIndex.Val = Idx;
1573 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1574 SMLoc E, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1582 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1583 unsigned ShiftAmount,
1586 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1587 Op->ShiftedImm .Val = Val;
1588 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1594 static std::unique_ptr<AArch64Operand>
1595 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1596 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1597 Op->CondCode.Code = Code;
1603 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1605 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1606 Op->FPImm.Val = Val;
1612 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1616 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1617 Op->Barrier.Val = Val;
1618 Op->Barrier.Data = Str.data();
1619 Op->Barrier.Length = Str.size();
1625 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1628 uint32_t PStateField,
1630 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1631 Op->SysReg.Data = Str.data();
1632 Op->SysReg.Length = Str.size();
1633 Op->SysReg.MRSReg = MRSReg;
1634 Op->SysReg.MSRReg = MSRReg;
1635 Op->SysReg.PStateField = PStateField;
1641 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1642 SMLoc E, MCContext &Ctx) {
1643 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1644 Op->SysCRImm.Val = Val;
1650 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1654 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1655 Op->Prefetch.Val = Val;
1656 Op->Barrier.Data = Str.data();
1657 Op->Barrier.Length = Str.size();
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1685 StringRef Name = getBarrierName();
1687 OS << "<barrier " << Name << ">";
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1693 getImm()->print(OS);
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 getShiftedImmVal()->print(OS);
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1703 OS << "<condcode " << getCondCode() << ">";
1706 OS << "<register " << getReg() << ">";
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1720 OS << "<sysreg: " << getSysReg() << '>';
1723 OS << "'" << getToken() << "'";
1726 OS << "c" << getSysCR();
1729 StringRef Name = getPrefetchName();
1731 OS << "<prfop " << Name << ">";
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1736 case k_ShiftExtend: {
1737 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1738 << getShiftExtendAmount();
1739 if (!hasShiftExtendAmount())
1747 /// @name Auto-generated Match Functions
1750 static unsigned MatchRegisterName(StringRef Name);
1754 static unsigned matchVectorRegName(StringRef Name) {
1755 return StringSwitch<unsigned>(Name)
1756 .Case("v0", AArch64::Q0)
1757 .Case("v1", AArch64::Q1)
1758 .Case("v2", AArch64::Q2)
1759 .Case("v3", AArch64::Q3)
1760 .Case("v4", AArch64::Q4)
1761 .Case("v5", AArch64::Q5)
1762 .Case("v6", AArch64::Q6)
1763 .Case("v7", AArch64::Q7)
1764 .Case("v8", AArch64::Q8)
1765 .Case("v9", AArch64::Q9)
1766 .Case("v10", AArch64::Q10)
1767 .Case("v11", AArch64::Q11)
1768 .Case("v12", AArch64::Q12)
1769 .Case("v13", AArch64::Q13)
1770 .Case("v14", AArch64::Q14)
1771 .Case("v15", AArch64::Q15)
1772 .Case("v16", AArch64::Q16)
1773 .Case("v17", AArch64::Q17)
1774 .Case("v18", AArch64::Q18)
1775 .Case("v19", AArch64::Q19)
1776 .Case("v20", AArch64::Q20)
1777 .Case("v21", AArch64::Q21)
1778 .Case("v22", AArch64::Q22)
1779 .Case("v23", AArch64::Q23)
1780 .Case("v24", AArch64::Q24)
1781 .Case("v25", AArch64::Q25)
1782 .Case("v26", AArch64::Q26)
1783 .Case("v27", AArch64::Q27)
1784 .Case("v28", AArch64::Q28)
1785 .Case("v29", AArch64::Q29)
1786 .Case("v30", AArch64::Q30)
1787 .Case("v31", AArch64::Q31)
1791 static bool isValidVectorKind(StringRef Name) {
1792 return StringSwitch<bool>(Name.lower())
1802 // Accept the width neutral ones, too, for verbose syntax. If those
1803 // aren't used in the right places, the token operand won't match so
1804 // all will work out.
1812 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1813 char &ElementKind) {
1814 assert(isValidVectorKind(Name));
1816 ElementKind = Name.lower()[Name.size() - 1];
1819 if (Name.size() == 2)
1822 // Parse the lane count
1823 Name = Name.drop_front();
1824 while (isdigit(Name.front())) {
1825 NumElements = 10 * NumElements + (Name.front() - '0');
1826 Name = Name.drop_front();
1830 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1832 StartLoc = getLoc();
1833 RegNo = tryParseRegister();
1834 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1835 return (RegNo == (unsigned)-1);
1838 // Matches a register name or register alias previously defined by '.req'
1839 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1841 unsigned RegNum = isVector ? matchVectorRegName(Name)
1842 : MatchRegisterName(Name);
1845 // Check for aliases registered via .req. Canonicalize to lower case.
1846 // That's more consistent since register names are case insensitive, and
1847 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1848 auto Entry = RegisterReqs.find(Name.lower());
1849 if (Entry == RegisterReqs.end())
1851 // set RegNum if the match is the right kind of register
1852 if (isVector == Entry->getValue().first)
1853 RegNum = Entry->getValue().second;
1858 /// tryParseRegister - Try to parse a register name. The token must be an
1859 /// Identifier when called, and if it is a register name the token is eaten and
1860 /// the register is added to the operand list.
1861 int AArch64AsmParser::tryParseRegister() {
1862 MCAsmParser &Parser = getParser();
1863 const AsmToken &Tok = Parser.getTok();
1864 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1866 std::string lowerCase = Tok.getString().lower();
1867 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1868 // Also handle a few aliases of registers.
1870 RegNum = StringSwitch<unsigned>(lowerCase)
1871 .Case("fp", AArch64::FP)
1872 .Case("lr", AArch64::LR)
1873 .Case("x31", AArch64::XZR)
1874 .Case("w31", AArch64::WZR)
1880 Parser.Lex(); // Eat identifier token.
1884 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1885 /// kind specifier. If it is a register specifier, eat the token and return it.
1886 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1887 MCAsmParser &Parser = getParser();
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1908 Parser.Lex(); // Eat the register token.
1913 TokError("vector register expected");
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1920 MCAsmParser &Parser = getParser();
1923 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1928 StringRef Tok = Parser.getTok().getIdentifier();
1929 if (Tok[0] != 'c' && Tok[0] != 'C') {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1935 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1936 if (BadNum || CRNum > 15) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1941 Parser.Lex(); // Eat identifier token.
1943 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1944 return MatchOperand_Success;
1947 /// tryParsePrefetch - Try to parse a prefetch operand.
1948 AArch64AsmParser::OperandMatchResultTy
1949 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 MCAsmParser &Parser = getParser();
1952 const AsmToken &Tok = Parser.getTok();
1953 // Either an identifier for named values or a 5-bit immediate.
1954 bool Hash = Tok.is(AsmToken::Hash);
1955 if (Hash || Tok.is(AsmToken::Integer)) {
1957 Parser.Lex(); // Eat hash token.
1958 const MCExpr *ImmVal;
1959 if (getParser().parseExpression(ImmVal))
1960 return MatchOperand_ParseFail;
1962 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1964 TokError("immediate value expected for prefetch operand");
1965 return MatchOperand_ParseFail;
1967 unsigned prfop = MCE->getValue();
1969 TokError("prefetch operand out of range, [0,31] expected");
1970 return MatchOperand_ParseFail;
1974 auto Mapper = AArch64PRFM::PRFMMapper();
1975 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
1976 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1978 return MatchOperand_Success;
1981 if (Tok.isNot(AsmToken::Identifier)) {
1982 TokError("pre-fetch hint expected");
1983 return MatchOperand_ParseFail;
1987 auto Mapper = AArch64PRFM::PRFMMapper();
1988 unsigned prfop = Mapper.fromString(Tok.getString(), Valid);
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1994 Parser.Lex(); // Eat identifier token.
1995 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
1997 return MatchOperand_Success;
2000 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2002 AArch64AsmParser::OperandMatchResultTy
2003 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2004 MCAsmParser &Parser = getParser();
2008 if (Parser.getTok().is(AsmToken::Hash)) {
2009 Parser.Lex(); // Eat hash token.
2012 if (parseSymbolicImmVal(Expr))
2013 return MatchOperand_ParseFail;
2015 AArch64MCExpr::VariantKind ELFRefKind;
2016 MCSymbolRefExpr::VariantKind DarwinRefKind;
2018 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2019 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2020 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2021 // No modifier was specified at all; this is the syntax for an ELF basic
2022 // ADRP relocation (unfortunately).
2024 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2025 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2026 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2028 Error(S, "gotpage label reference not allowed an addend");
2029 return MatchOperand_ParseFail;
2030 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2031 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2032 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2033 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2034 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2035 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2036 // The operand must be an @page or @gotpage qualified symbolref.
2037 Error(S, "page or gotpage label reference expected");
2038 return MatchOperand_ParseFail;
2042 // We have either a label reference possibly with addend or an immediate. The
2043 // addend is a raw value here. The linker will adjust it to only reference the
2045 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2046 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2048 return MatchOperand_Success;
2051 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2053 AArch64AsmParser::OperandMatchResultTy
2054 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2055 MCAsmParser &Parser = getParser();
2059 if (Parser.getTok().is(AsmToken::Hash)) {
2060 Parser.Lex(); // Eat hash token.
2063 if (getParser().parseExpression(Expr))
2064 return MatchOperand_ParseFail;
2066 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2067 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2069 return MatchOperand_Success;
2072 /// tryParseFPImm - A floating point immediate expression operand.
2073 AArch64AsmParser::OperandMatchResultTy
2074 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2075 MCAsmParser &Parser = getParser();
2079 if (Parser.getTok().is(AsmToken::Hash)) {
2080 Parser.Lex(); // Eat '#'
2084 // Handle negation, as that still comes through as a separate token.
2085 bool isNegative = false;
2086 if (Parser.getTok().is(AsmToken::Minus)) {
2090 const AsmToken &Tok = Parser.getTok();
2091 if (Tok.is(AsmToken::Real)) {
2092 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2094 RealVal.changeSign();
2096 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2097 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2098 Parser.Lex(); // Eat the token.
2099 // Check for out of range values. As an exception, we let Zero through,
2100 // as we handle that special case in post-processing before matching in
2101 // order to use the zero register for it.
2102 if (Val == -1 && !RealVal.isPosZero()) {
2103 TokError("expected compatible register or floating-point constant");
2104 return MatchOperand_ParseFail;
2106 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2107 return MatchOperand_Success;
2109 if (Tok.is(AsmToken::Integer)) {
2111 if (!isNegative && Tok.getString().startswith("0x")) {
2112 Val = Tok.getIntVal();
2113 if (Val > 255 || Val < 0) {
2114 TokError("encoded floating point value out of range");
2115 return MatchOperand_ParseFail;
2118 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2119 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2120 // If we had a '-' in front, toggle the sign bit.
2121 IntVal ^= (uint64_t)isNegative << 63;
2122 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2124 Parser.Lex(); // Eat the token.
2125 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2126 return MatchOperand_Success;
2130 return MatchOperand_NoMatch;
2132 TokError("invalid floating point immediate");
2133 return MatchOperand_ParseFail;
2136 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2137 AArch64AsmParser::OperandMatchResultTy
2138 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2139 MCAsmParser &Parser = getParser();
2142 if (Parser.getTok().is(AsmToken::Hash))
2143 Parser.Lex(); // Eat '#'
2144 else if (Parser.getTok().isNot(AsmToken::Integer))
2145 // Operand should start from # or should be integer, emit error otherwise.
2146 return MatchOperand_NoMatch;
2149 if (parseSymbolicImmVal(Imm))
2150 return MatchOperand_ParseFail;
2151 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2152 uint64_t ShiftAmount = 0;
2153 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2155 int64_t Val = MCE->getValue();
2156 if (Val > 0xfff && (Val & 0xfff) == 0) {
2157 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2161 SMLoc E = Parser.getTok().getLoc();
2162 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2164 return MatchOperand_Success;
2170 // The optional operand must be "lsl #N" where N is non-negative.
2171 if (!Parser.getTok().is(AsmToken::Identifier) ||
2172 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2173 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2174 return MatchOperand_ParseFail;
2180 if (Parser.getTok().is(AsmToken::Hash)) {
2184 if (Parser.getTok().isNot(AsmToken::Integer)) {
2185 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2186 return MatchOperand_ParseFail;
2189 int64_t ShiftAmount = Parser.getTok().getIntVal();
2191 if (ShiftAmount < 0) {
2192 Error(Parser.getTok().getLoc(), "positive shift amount required");
2193 return MatchOperand_ParseFail;
2195 Parser.Lex(); // Eat the number
2197 SMLoc E = Parser.getTok().getLoc();
2198 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2199 S, E, getContext()));
2200 return MatchOperand_Success;
2203 /// parseCondCodeString - Parse a Condition Code string.
2204 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2205 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2206 .Case("eq", AArch64CC::EQ)
2207 .Case("ne", AArch64CC::NE)
2208 .Case("cs", AArch64CC::HS)
2209 .Case("hs", AArch64CC::HS)
2210 .Case("cc", AArch64CC::LO)
2211 .Case("lo", AArch64CC::LO)
2212 .Case("mi", AArch64CC::MI)
2213 .Case("pl", AArch64CC::PL)
2214 .Case("vs", AArch64CC::VS)
2215 .Case("vc", AArch64CC::VC)
2216 .Case("hi", AArch64CC::HI)
2217 .Case("ls", AArch64CC::LS)
2218 .Case("ge", AArch64CC::GE)
2219 .Case("lt", AArch64CC::LT)
2220 .Case("gt", AArch64CC::GT)
2221 .Case("le", AArch64CC::LE)
2222 .Case("al", AArch64CC::AL)
2223 .Case("nv", AArch64CC::NV)
2224 .Default(AArch64CC::Invalid);
2228 /// parseCondCode - Parse a Condition Code operand.
2229 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2230 bool invertCondCode) {
2231 MCAsmParser &Parser = getParser();
2233 const AsmToken &Tok = Parser.getTok();
2234 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2236 StringRef Cond = Tok.getString();
2237 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2238 if (CC == AArch64CC::Invalid)
2239 return TokError("invalid condition code");
2240 Parser.Lex(); // Eat identifier token.
2242 if (invertCondCode) {
2243 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2244 return TokError("condition codes AL and NV are invalid for this instruction");
2245 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2249 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2253 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2254 /// them if present.
2255 AArch64AsmParser::OperandMatchResultTy
2256 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2257 MCAsmParser &Parser = getParser();
2258 const AsmToken &Tok = Parser.getTok();
2259 std::string LowerID = Tok.getString().lower();
2260 AArch64_AM::ShiftExtendType ShOp =
2261 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2262 .Case("lsl", AArch64_AM::LSL)
2263 .Case("lsr", AArch64_AM::LSR)
2264 .Case("asr", AArch64_AM::ASR)
2265 .Case("ror", AArch64_AM::ROR)
2266 .Case("msl", AArch64_AM::MSL)
2267 .Case("uxtb", AArch64_AM::UXTB)
2268 .Case("uxth", AArch64_AM::UXTH)
2269 .Case("uxtw", AArch64_AM::UXTW)
2270 .Case("uxtx", AArch64_AM::UXTX)
2271 .Case("sxtb", AArch64_AM::SXTB)
2272 .Case("sxth", AArch64_AM::SXTH)
2273 .Case("sxtw", AArch64_AM::SXTW)
2274 .Case("sxtx", AArch64_AM::SXTX)
2275 .Default(AArch64_AM::InvalidShiftExtend);
2277 if (ShOp == AArch64_AM::InvalidShiftExtend)
2278 return MatchOperand_NoMatch;
2280 SMLoc S = Tok.getLoc();
2283 bool Hash = getLexer().is(AsmToken::Hash);
2284 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2285 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2286 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2287 ShOp == AArch64_AM::MSL) {
2288 // We expect a number here.
2289 TokError("expected #imm after shift specifier");
2290 return MatchOperand_ParseFail;
2293 // "extend" type operatoins don't need an immediate, #0 is implicit.
2294 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2296 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2297 return MatchOperand_Success;
2301 Parser.Lex(); // Eat the '#'.
2303 // Make sure we do actually have a number or a parenthesized expression.
2304 SMLoc E = Parser.getTok().getLoc();
2305 if (!Parser.getTok().is(AsmToken::Integer) &&
2306 !Parser.getTok().is(AsmToken::LParen)) {
2307 Error(E, "expected integer shift amount");
2308 return MatchOperand_ParseFail;
2311 const MCExpr *ImmVal;
2312 if (getParser().parseExpression(ImmVal))
2313 return MatchOperand_ParseFail;
2315 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2317 Error(E, "expected constant '#imm' after shift specifier");
2318 return MatchOperand_ParseFail;
2321 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2322 Operands.push_back(AArch64Operand::CreateShiftExtend(
2323 ShOp, MCE->getValue(), true, S, E, getContext()));
2324 return MatchOperand_Success;
2327 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2328 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2329 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2330 OperandVector &Operands) {
2331 if (Name.find('.') != StringRef::npos)
2332 return TokError("invalid operand");
2336 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2338 MCAsmParser &Parser = getParser();
2339 const AsmToken &Tok = Parser.getTok();
2340 StringRef Op = Tok.getString();
2341 SMLoc S = Tok.getLoc();
2343 const MCExpr *Expr = nullptr;
2345 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2347 Expr = MCConstantExpr::Create(op1, getContext()); \
2348 Operands.push_back( \
2349 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2350 Operands.push_back( \
2351 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2352 Operands.push_back( \
2353 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2354 Expr = MCConstantExpr::Create(op2, getContext()); \
2355 Operands.push_back( \
2356 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2359 if (Mnemonic == "ic") {
2360 if (!Op.compare_lower("ialluis")) {
2361 // SYS #0, C7, C1, #0
2362 SYS_ALIAS(0, 7, 1, 0);
2363 } else if (!Op.compare_lower("iallu")) {
2364 // SYS #0, C7, C5, #0
2365 SYS_ALIAS(0, 7, 5, 0);
2366 } else if (!Op.compare_lower("ivau")) {
2367 // SYS #3, C7, C5, #1
2368 SYS_ALIAS(3, 7, 5, 1);
2370 return TokError("invalid operand for IC instruction");
2372 } else if (Mnemonic == "dc") {
2373 if (!Op.compare_lower("zva")) {
2374 // SYS #3, C7, C4, #1
2375 SYS_ALIAS(3, 7, 4, 1);
2376 } else if (!Op.compare_lower("ivac")) {
2377 // SYS #3, C7, C6, #1
2378 SYS_ALIAS(0, 7, 6, 1);
2379 } else if (!Op.compare_lower("isw")) {
2380 // SYS #0, C7, C6, #2
2381 SYS_ALIAS(0, 7, 6, 2);
2382 } else if (!Op.compare_lower("cvac")) {
2383 // SYS #3, C7, C10, #1
2384 SYS_ALIAS(3, 7, 10, 1);
2385 } else if (!Op.compare_lower("csw")) {
2386 // SYS #0, C7, C10, #2
2387 SYS_ALIAS(0, 7, 10, 2);
2388 } else if (!Op.compare_lower("cvau")) {
2389 // SYS #3, C7, C11, #1
2390 SYS_ALIAS(3, 7, 11, 1);
2391 } else if (!Op.compare_lower("civac")) {
2392 // SYS #3, C7, C14, #1
2393 SYS_ALIAS(3, 7, 14, 1);
2394 } else if (!Op.compare_lower("cisw")) {
2395 // SYS #0, C7, C14, #2
2396 SYS_ALIAS(0, 7, 14, 2);
2398 return TokError("invalid operand for DC instruction");
2400 } else if (Mnemonic == "at") {
2401 if (!Op.compare_lower("s1e1r")) {
2402 // SYS #0, C7, C8, #0
2403 SYS_ALIAS(0, 7, 8, 0);
2404 } else if (!Op.compare_lower("s1e2r")) {
2405 // SYS #4, C7, C8, #0
2406 SYS_ALIAS(4, 7, 8, 0);
2407 } else if (!Op.compare_lower("s1e3r")) {
2408 // SYS #6, C7, C8, #0
2409 SYS_ALIAS(6, 7, 8, 0);
2410 } else if (!Op.compare_lower("s1e1w")) {
2411 // SYS #0, C7, C8, #1
2412 SYS_ALIAS(0, 7, 8, 1);
2413 } else if (!Op.compare_lower("s1e2w")) {
2414 // SYS #4, C7, C8, #1
2415 SYS_ALIAS(4, 7, 8, 1);
2416 } else if (!Op.compare_lower("s1e3w")) {
2417 // SYS #6, C7, C8, #1
2418 SYS_ALIAS(6, 7, 8, 1);
2419 } else if (!Op.compare_lower("s1e0r")) {
2420 // SYS #0, C7, C8, #3
2421 SYS_ALIAS(0, 7, 8, 2);
2422 } else if (!Op.compare_lower("s1e0w")) {
2423 // SYS #0, C7, C8, #3
2424 SYS_ALIAS(0, 7, 8, 3);
2425 } else if (!Op.compare_lower("s12e1r")) {
2426 // SYS #4, C7, C8, #4
2427 SYS_ALIAS(4, 7, 8, 4);
2428 } else if (!Op.compare_lower("s12e1w")) {
2429 // SYS #4, C7, C8, #5
2430 SYS_ALIAS(4, 7, 8, 5);
2431 } else if (!Op.compare_lower("s12e0r")) {
2432 // SYS #4, C7, C8, #6
2433 SYS_ALIAS(4, 7, 8, 6);
2434 } else if (!Op.compare_lower("s12e0w")) {
2435 // SYS #4, C7, C8, #7
2436 SYS_ALIAS(4, 7, 8, 7);
2438 return TokError("invalid operand for AT instruction");
2440 } else if (Mnemonic == "tlbi") {
2441 if (!Op.compare_lower("vmalle1is")) {
2442 // SYS #0, C8, C3, #0
2443 SYS_ALIAS(0, 8, 3, 0);
2444 } else if (!Op.compare_lower("alle2is")) {
2445 // SYS #4, C8, C3, #0
2446 SYS_ALIAS(4, 8, 3, 0);
2447 } else if (!Op.compare_lower("alle3is")) {
2448 // SYS #6, C8, C3, #0
2449 SYS_ALIAS(6, 8, 3, 0);
2450 } else if (!Op.compare_lower("vae1is")) {
2451 // SYS #0, C8, C3, #1
2452 SYS_ALIAS(0, 8, 3, 1);
2453 } else if (!Op.compare_lower("vae2is")) {
2454 // SYS #4, C8, C3, #1
2455 SYS_ALIAS(4, 8, 3, 1);
2456 } else if (!Op.compare_lower("vae3is")) {
2457 // SYS #6, C8, C3, #1
2458 SYS_ALIAS(6, 8, 3, 1);
2459 } else if (!Op.compare_lower("aside1is")) {
2460 // SYS #0, C8, C3, #2
2461 SYS_ALIAS(0, 8, 3, 2);
2462 } else if (!Op.compare_lower("vaae1is")) {
2463 // SYS #0, C8, C3, #3
2464 SYS_ALIAS(0, 8, 3, 3);
2465 } else if (!Op.compare_lower("alle1is")) {
2466 // SYS #4, C8, C3, #4
2467 SYS_ALIAS(4, 8, 3, 4);
2468 } else if (!Op.compare_lower("vale1is")) {
2469 // SYS #0, C8, C3, #5
2470 SYS_ALIAS(0, 8, 3, 5);
2471 } else if (!Op.compare_lower("vaale1is")) {
2472 // SYS #0, C8, C3, #7
2473 SYS_ALIAS(0, 8, 3, 7);
2474 } else if (!Op.compare_lower("vmalle1")) {
2475 // SYS #0, C8, C7, #0
2476 SYS_ALIAS(0, 8, 7, 0);
2477 } else if (!Op.compare_lower("alle2")) {
2478 // SYS #4, C8, C7, #0
2479 SYS_ALIAS(4, 8, 7, 0);
2480 } else if (!Op.compare_lower("vale2is")) {
2481 // SYS #4, C8, C3, #5
2482 SYS_ALIAS(4, 8, 3, 5);
2483 } else if (!Op.compare_lower("vale3is")) {
2484 // SYS #6, C8, C3, #5
2485 SYS_ALIAS(6, 8, 3, 5);
2486 } else if (!Op.compare_lower("alle3")) {
2487 // SYS #6, C8, C7, #0
2488 SYS_ALIAS(6, 8, 7, 0);
2489 } else if (!Op.compare_lower("vae1")) {
2490 // SYS #0, C8, C7, #1
2491 SYS_ALIAS(0, 8, 7, 1);
2492 } else if (!Op.compare_lower("vae2")) {
2493 // SYS #4, C8, C7, #1
2494 SYS_ALIAS(4, 8, 7, 1);
2495 } else if (!Op.compare_lower("vae3")) {
2496 // SYS #6, C8, C7, #1
2497 SYS_ALIAS(6, 8, 7, 1);
2498 } else if (!Op.compare_lower("aside1")) {
2499 // SYS #0, C8, C7, #2
2500 SYS_ALIAS(0, 8, 7, 2);
2501 } else if (!Op.compare_lower("vaae1")) {
2502 // SYS #0, C8, C7, #3
2503 SYS_ALIAS(0, 8, 7, 3);
2504 } else if (!Op.compare_lower("alle1")) {
2505 // SYS #4, C8, C7, #4
2506 SYS_ALIAS(4, 8, 7, 4);
2507 } else if (!Op.compare_lower("vale1")) {
2508 // SYS #0, C8, C7, #5
2509 SYS_ALIAS(0, 8, 7, 5);
2510 } else if (!Op.compare_lower("vale2")) {
2511 // SYS #4, C8, C7, #5
2512 SYS_ALIAS(4, 8, 7, 5);
2513 } else if (!Op.compare_lower("vale3")) {
2514 // SYS #6, C8, C7, #5
2515 SYS_ALIAS(6, 8, 7, 5);
2516 } else if (!Op.compare_lower("vaale1")) {
2517 // SYS #0, C8, C7, #7
2518 SYS_ALIAS(0, 8, 7, 7);
2519 } else if (!Op.compare_lower("ipas2e1")) {
2520 // SYS #4, C8, C4, #1
2521 SYS_ALIAS(4, 8, 4, 1);
2522 } else if (!Op.compare_lower("ipas2le1")) {
2523 // SYS #4, C8, C4, #5
2524 SYS_ALIAS(4, 8, 4, 5);
2525 } else if (!Op.compare_lower("ipas2e1is")) {
2526 // SYS #4, C8, C4, #1
2527 SYS_ALIAS(4, 8, 0, 1);
2528 } else if (!Op.compare_lower("ipas2le1is")) {
2529 // SYS #4, C8, C4, #5
2530 SYS_ALIAS(4, 8, 0, 5);
2531 } else if (!Op.compare_lower("vmalls12e1")) {
2532 // SYS #4, C8, C7, #6
2533 SYS_ALIAS(4, 8, 7, 6);
2534 } else if (!Op.compare_lower("vmalls12e1is")) {
2535 // SYS #4, C8, C3, #6
2536 SYS_ALIAS(4, 8, 3, 6);
2538 return TokError("invalid operand for TLBI instruction");
2544 Parser.Lex(); // Eat operand.
2546 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2547 bool HasRegister = false;
2549 // Check for the optional register operand.
2550 if (getLexer().is(AsmToken::Comma)) {
2551 Parser.Lex(); // Eat comma.
2553 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2554 return TokError("expected register operand");
2559 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2560 Parser.eatToEndOfStatement();
2561 return TokError("unexpected token in argument list");
2564 if (ExpectRegister && !HasRegister) {
2565 return TokError("specified " + Mnemonic + " op requires a register");
2567 else if (!ExpectRegister && HasRegister) {
2568 return TokError("specified " + Mnemonic + " op does not use a register");
2571 Parser.Lex(); // Consume the EndOfStatement
2575 AArch64AsmParser::OperandMatchResultTy
2576 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2577 MCAsmParser &Parser = getParser();
2578 const AsmToken &Tok = Parser.getTok();
2580 // Can be either a #imm style literal or an option name
2581 bool Hash = Tok.is(AsmToken::Hash);
2582 if (Hash || Tok.is(AsmToken::Integer)) {
2583 // Immediate operand.
2585 Parser.Lex(); // Eat the '#'
2586 const MCExpr *ImmVal;
2587 SMLoc ExprLoc = getLoc();
2588 if (getParser().parseExpression(ImmVal))
2589 return MatchOperand_ParseFail;
2590 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2592 Error(ExprLoc, "immediate value expected for barrier operand");
2593 return MatchOperand_ParseFail;
2595 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2596 Error(ExprLoc, "barrier operand out of range");
2597 return MatchOperand_ParseFail;
2600 auto Mapper = AArch64DB::DBarrierMapper();
2601 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
2602 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2603 ExprLoc, getContext()));
2604 return MatchOperand_Success;
2607 if (Tok.isNot(AsmToken::Identifier)) {
2608 TokError("invalid operand for instruction");
2609 return MatchOperand_ParseFail;
2613 auto Mapper = AArch64DB::DBarrierMapper();
2614 unsigned Opt = Mapper.fromString(Tok.getString(), Valid);
2616 TokError("invalid barrier option name");
2617 return MatchOperand_ParseFail;
2620 // The only valid named option for ISB is 'sy'
2621 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2622 TokError("'sy' or #imm operand expected");
2623 return MatchOperand_ParseFail;
2626 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2627 getLoc(), getContext()));
2628 Parser.Lex(); // Consume the option
2630 return MatchOperand_Success;
2633 AArch64AsmParser::OperandMatchResultTy
2634 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2635 MCAsmParser &Parser = getParser();
2636 const AsmToken &Tok = Parser.getTok();
2638 if (Tok.isNot(AsmToken::Identifier))
2639 return MatchOperand_NoMatch;
2642 auto MRSMapper = AArch64SysReg::MRSMapper();
2643 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2645 assert(IsKnown == (MRSReg != -1U) &&
2646 "register should be -1 if and only if it's unknown");
2648 auto MSRMapper = AArch64SysReg::MSRMapper();
2649 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2651 assert(IsKnown == (MSRReg != -1U) &&
2652 "register should be -1 if and only if it's unknown");
2654 auto PStateMapper = AArch64PState::PStateMapper();
2655 uint32_t PStateField = PStateMapper.fromString(Tok.getString(), IsKnown);
2656 assert(IsKnown == (PStateField != -1U) &&
2657 "register should be -1 if and only if it's unknown");
2659 Operands.push_back(AArch64Operand::CreateSysReg(
2660 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2661 Parser.Lex(); // Eat identifier
2663 return MatchOperand_Success;
2666 /// tryParseVectorRegister - Parse a vector register operand.
2667 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2668 MCAsmParser &Parser = getParser();
2669 if (Parser.getTok().isNot(AsmToken::Identifier))
2673 // Check for a vector register specifier first.
2675 int64_t Reg = tryMatchVectorRegister(Kind, false);
2679 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2680 // If there was an explicit qualifier, that goes on as a literal text
2684 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2686 // If there is an index specifier following the register, parse that too.
2687 if (Parser.getTok().is(AsmToken::LBrac)) {
2688 SMLoc SIdx = getLoc();
2689 Parser.Lex(); // Eat left bracket token.
2691 const MCExpr *ImmVal;
2692 if (getParser().parseExpression(ImmVal))
2694 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2696 TokError("immediate value expected for vector index");
2701 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2702 Error(E, "']' expected");
2706 Parser.Lex(); // Eat right bracket token.
2708 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2715 /// parseRegister - Parse a non-vector register operand.
2716 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2717 MCAsmParser &Parser = getParser();
2719 // Try for a vector register.
2720 if (!tryParseVectorRegister(Operands))
2723 // Try for a scalar register.
2724 int64_t Reg = tryParseRegister();
2728 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2730 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2731 // as a string token in the instruction itself.
2732 if (getLexer().getKind() == AsmToken::LBrac) {
2733 SMLoc LBracS = getLoc();
2735 const AsmToken &Tok = Parser.getTok();
2736 if (Tok.is(AsmToken::Integer)) {
2737 SMLoc IntS = getLoc();
2738 int64_t Val = Tok.getIntVal();
2741 if (getLexer().getKind() == AsmToken::RBrac) {
2742 SMLoc RBracS = getLoc();
2745 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2747 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2749 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2759 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2760 MCAsmParser &Parser = getParser();
2761 bool HasELFModifier = false;
2762 AArch64MCExpr::VariantKind RefKind;
2764 if (Parser.getTok().is(AsmToken::Colon)) {
2765 Parser.Lex(); // Eat ':"
2766 HasELFModifier = true;
2768 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2769 Error(Parser.getTok().getLoc(),
2770 "expect relocation specifier in operand after ':'");
2774 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2775 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2776 .Case("lo12", AArch64MCExpr::VK_LO12)
2777 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2778 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2779 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2780 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2781 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2782 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2783 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2784 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2785 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2786 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2787 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2788 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2789 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2790 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2791 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2792 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2793 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2794 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2795 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2796 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2797 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2798 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2799 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2800 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2801 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2802 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2803 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2804 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2805 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2806 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2807 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2808 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2809 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2810 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2811 .Default(AArch64MCExpr::VK_INVALID);
2813 if (RefKind == AArch64MCExpr::VK_INVALID) {
2814 Error(Parser.getTok().getLoc(),
2815 "expect relocation specifier in operand after ':'");
2819 Parser.Lex(); // Eat identifier
2821 if (Parser.getTok().isNot(AsmToken::Colon)) {
2822 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2825 Parser.Lex(); // Eat ':'
2828 if (getParser().parseExpression(ImmVal))
2832 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2837 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2838 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2839 MCAsmParser &Parser = getParser();
2840 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2842 Parser.Lex(); // Eat left bracket token.
2844 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2847 int64_t PrevReg = FirstReg;
2850 if (Parser.getTok().is(AsmToken::Minus)) {
2851 Parser.Lex(); // Eat the minus.
2853 SMLoc Loc = getLoc();
2855 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2858 // Any Kind suffices must match on all regs in the list.
2859 if (Kind != NextKind)
2860 return Error(Loc, "mismatched register size suffix");
2862 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2864 if (Space == 0 || Space > 3) {
2865 return Error(Loc, "invalid number of vectors");
2871 while (Parser.getTok().is(AsmToken::Comma)) {
2872 Parser.Lex(); // Eat the comma token.
2874 SMLoc Loc = getLoc();
2876 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2879 // Any Kind suffices must match on all regs in the list.
2880 if (Kind != NextKind)
2881 return Error(Loc, "mismatched register size suffix");
2883 // Registers must be incremental (with wraparound at 31)
2884 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2885 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2886 return Error(Loc, "registers must be sequential");
2893 if (Parser.getTok().isNot(AsmToken::RCurly))
2894 return Error(getLoc(), "'}' expected");
2895 Parser.Lex(); // Eat the '}' token.
2898 return Error(S, "invalid number of vectors");
2900 unsigned NumElements = 0;
2901 char ElementKind = 0;
2903 parseValidVectorKind(Kind, NumElements, ElementKind);
2905 Operands.push_back(AArch64Operand::CreateVectorList(
2906 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2908 // If there is an index specifier following the list, parse that too.
2909 if (Parser.getTok().is(AsmToken::LBrac)) {
2910 SMLoc SIdx = getLoc();
2911 Parser.Lex(); // Eat left bracket token.
2913 const MCExpr *ImmVal;
2914 if (getParser().parseExpression(ImmVal))
2916 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2918 TokError("immediate value expected for vector index");
2923 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2924 Error(E, "']' expected");
2928 Parser.Lex(); // Eat right bracket token.
2930 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2936 AArch64AsmParser::OperandMatchResultTy
2937 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2938 MCAsmParser &Parser = getParser();
2939 const AsmToken &Tok = Parser.getTok();
2940 if (!Tok.is(AsmToken::Identifier))
2941 return MatchOperand_NoMatch;
2943 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2945 MCContext &Ctx = getContext();
2946 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2947 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2948 return MatchOperand_NoMatch;
2951 Parser.Lex(); // Eat register
2953 if (Parser.getTok().isNot(AsmToken::Comma)) {
2955 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2956 return MatchOperand_Success;
2958 Parser.Lex(); // Eat comma.
2960 if (Parser.getTok().is(AsmToken::Hash))
2961 Parser.Lex(); // Eat hash
2963 if (Parser.getTok().isNot(AsmToken::Integer)) {
2964 Error(getLoc(), "index must be absent or #0");
2965 return MatchOperand_ParseFail;
2968 const MCExpr *ImmVal;
2969 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2970 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2971 Error(getLoc(), "index must be absent or #0");
2972 return MatchOperand_ParseFail;
2976 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2977 return MatchOperand_Success;
2980 /// parseOperand - Parse a arm instruction operand. For now this parses the
2981 /// operand regardless of the mnemonic.
2982 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2983 bool invertCondCode) {
2984 MCAsmParser &Parser = getParser();
2985 // Check if the current operand has a custom associated parser, if so, try to
2986 // custom parse the operand, or fallback to the general approach.
2987 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2988 if (ResTy == MatchOperand_Success)
2990 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2991 // there was a match, but an error occurred, in which case, just return that
2992 // the operand parsing failed.
2993 if (ResTy == MatchOperand_ParseFail)
2996 // Nothing custom, so do general case parsing.
2998 switch (getLexer().getKind()) {
3002 if (parseSymbolicImmVal(Expr))
3003 return Error(S, "invalid operand");
3005 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3006 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3009 case AsmToken::LBrac: {
3010 SMLoc Loc = Parser.getTok().getLoc();
3011 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3013 Parser.Lex(); // Eat '['
3015 // There's no comma after a '[', so we can parse the next operand
3017 return parseOperand(Operands, false, false);
3019 case AsmToken::LCurly:
3020 return parseVectorList(Operands);
3021 case AsmToken::Identifier: {
3022 // If we're expecting a Condition Code operand, then just parse that.
3024 return parseCondCode(Operands, invertCondCode);
3026 // If it's a register name, parse it.
3027 if (!parseRegister(Operands))
3030 // This could be an optional "shift" or "extend" operand.
3031 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3032 // We can only continue if no tokens were eaten.
3033 if (GotShift != MatchOperand_NoMatch)
3036 // This was not a register so parse other operands that start with an
3037 // identifier (like labels) as expressions and create them as immediates.
3038 const MCExpr *IdVal;
3040 if (getParser().parseExpression(IdVal))
3043 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3044 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3047 case AsmToken::Integer:
3048 case AsmToken::Real:
3049 case AsmToken::Hash: {
3050 // #42 -> immediate.
3052 if (getLexer().is(AsmToken::Hash))
3055 // Parse a negative sign
3056 bool isNegative = false;
3057 if (Parser.getTok().is(AsmToken::Minus)) {
3059 // We need to consume this token only when we have a Real, otherwise
3060 // we let parseSymbolicImmVal take care of it
3061 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3065 // The only Real that should come through here is a literal #0.0 for
3066 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3067 // so convert the value.
3068 const AsmToken &Tok = Parser.getTok();
3069 if (Tok.is(AsmToken::Real)) {
3070 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3071 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3072 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3073 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3074 Mnemonic != "fcmlt")
3075 return TokError("unexpected floating point literal");
3076 else if (IntVal != 0 || isNegative)
3077 return TokError("expected floating-point constant #0.0");
3078 Parser.Lex(); // Eat the token.
3081 AArch64Operand::CreateToken("#0", false, S, getContext()));
3083 AArch64Operand::CreateToken(".0", false, S, getContext()));
3087 const MCExpr *ImmVal;
3088 if (parseSymbolicImmVal(ImmVal))
3091 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3092 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3095 case AsmToken::Equal: {
3096 SMLoc Loc = Parser.getTok().getLoc();
3097 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3098 return Error(Loc, "unexpected token in operand");
3099 Parser.Lex(); // Eat '='
3100 const MCExpr *SubExprVal;
3101 if (getParser().parseExpression(SubExprVal))
3104 if (Operands.size() < 2 ||
3105 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3109 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3110 Operands[1]->getReg());
3112 MCContext& Ctx = getContext();
3113 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3114 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3115 if (isa<MCConstantExpr>(SubExprVal)) {
3116 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3117 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3118 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3122 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3123 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3124 Operands.push_back(AArch64Operand::CreateImm(
3125 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3127 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3128 ShiftAmt, true, S, E, Ctx));
3131 APInt Simm = APInt(64, Imm << ShiftAmt);
3132 // check if the immediate is an unsigned or signed 32-bit int for W regs
3133 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3134 return Error(Loc, "Immediate too large for register");
3136 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3137 const MCExpr *CPLoc =
3138 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3139 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3145 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3147 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3148 StringRef Name, SMLoc NameLoc,
3149 OperandVector &Operands) {
3150 MCAsmParser &Parser = getParser();
3151 Name = StringSwitch<StringRef>(Name.lower())
3152 .Case("beq", "b.eq")
3153 .Case("bne", "b.ne")
3154 .Case("bhs", "b.hs")
3155 .Case("bcs", "b.cs")
3156 .Case("blo", "b.lo")
3157 .Case("bcc", "b.cc")
3158 .Case("bmi", "b.mi")
3159 .Case("bpl", "b.pl")
3160 .Case("bvs", "b.vs")
3161 .Case("bvc", "b.vc")
3162 .Case("bhi", "b.hi")
3163 .Case("bls", "b.ls")
3164 .Case("bge", "b.ge")
3165 .Case("blt", "b.lt")
3166 .Case("bgt", "b.gt")
3167 .Case("ble", "b.le")
3168 .Case("bal", "b.al")
3169 .Case("bnv", "b.nv")
3172 // First check for the AArch64-specific .req directive.
3173 if (Parser.getTok().is(AsmToken::Identifier) &&
3174 Parser.getTok().getIdentifier() == ".req") {
3175 parseDirectiveReq(Name, NameLoc);
3176 // We always return 'error' for this, as we're done with this
3177 // statement and don't need to match the 'instruction."
3181 // Create the leading tokens for the mnemonic, split by '.' characters.
3182 size_t Start = 0, Next = Name.find('.');
3183 StringRef Head = Name.slice(Start, Next);
3185 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3186 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3187 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3188 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3189 Parser.eatToEndOfStatement();
3194 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3197 // Handle condition codes for a branch mnemonic
3198 if (Head == "b" && Next != StringRef::npos) {
3200 Next = Name.find('.', Start + 1);
3201 Head = Name.slice(Start + 1, Next);
3203 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3204 (Head.data() - Name.data()));
3205 AArch64CC::CondCode CC = parseCondCodeString(Head);
3206 if (CC == AArch64CC::Invalid)
3207 return Error(SuffixLoc, "invalid condition code");
3209 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3211 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3214 // Add the remaining tokens in the mnemonic.
3215 while (Next != StringRef::npos) {
3217 Next = Name.find('.', Start + 1);
3218 Head = Name.slice(Start, Next);
3219 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3220 (Head.data() - Name.data()) + 1);
3222 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3225 // Conditional compare instructions have a Condition Code operand, which needs
3226 // to be parsed and an immediate operand created.
3227 bool condCodeFourthOperand =
3228 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3229 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3230 Head == "csinc" || Head == "csinv" || Head == "csneg");
3232 // These instructions are aliases to some of the conditional select
3233 // instructions. However, the condition code is inverted in the aliased
3236 // FIXME: Is this the correct way to handle these? Or should the parser
3237 // generate the aliased instructions directly?
3238 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3239 bool condCodeThirdOperand =
3240 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3242 // Read the remaining operands.
3243 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3244 // Read the first operand.
3245 if (parseOperand(Operands, false, false)) {
3246 Parser.eatToEndOfStatement();
3251 while (getLexer().is(AsmToken::Comma)) {
3252 Parser.Lex(); // Eat the comma.
3254 // Parse and remember the operand.
3255 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3256 (N == 3 && condCodeThirdOperand) ||
3257 (N == 2 && condCodeSecondOperand),
3258 condCodeSecondOperand || condCodeThirdOperand)) {
3259 Parser.eatToEndOfStatement();
3263 // After successfully parsing some operands there are two special cases to
3264 // consider (i.e. notional operands not separated by commas). Both are due
3265 // to memory specifiers:
3266 // + An RBrac will end an address for load/store/prefetch
3267 // + An '!' will indicate a pre-indexed operation.
3269 // It's someone else's responsibility to make sure these tokens are sane
3270 // in the given context!
3271 if (Parser.getTok().is(AsmToken::RBrac)) {
3272 SMLoc Loc = Parser.getTok().getLoc();
3273 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3278 if (Parser.getTok().is(AsmToken::Exclaim)) {
3279 SMLoc Loc = Parser.getTok().getLoc();
3280 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3289 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3290 SMLoc Loc = Parser.getTok().getLoc();
3291 Parser.eatToEndOfStatement();
3292 return Error(Loc, "unexpected token in argument list");
3295 Parser.Lex(); // Consume the EndOfStatement
3299 // FIXME: This entire function is a giant hack to provide us with decent
3300 // operand range validation/diagnostics until TableGen/MC can be extended
3301 // to support autogeneration of this kind of validation.
3302 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3303 SmallVectorImpl<SMLoc> &Loc) {
3304 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3305 // Check for indexed addressing modes w/ the base register being the
3306 // same as a destination/source register or pair load where
3307 // the Rt == Rt2. All of those are undefined behaviour.
3308 switch (Inst.getOpcode()) {
3309 case AArch64::LDPSWpre:
3310 case AArch64::LDPWpost:
3311 case AArch64::LDPWpre:
3312 case AArch64::LDPXpost:
3313 case AArch64::LDPXpre: {
3314 unsigned Rt = Inst.getOperand(1).getReg();
3315 unsigned Rt2 = Inst.getOperand(2).getReg();
3316 unsigned Rn = Inst.getOperand(3).getReg();
3317 if (RI->isSubRegisterEq(Rn, Rt))
3318 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3319 "is also a destination");
3320 if (RI->isSubRegisterEq(Rn, Rt2))
3321 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3322 "is also a destination");
3325 case AArch64::LDPDi:
3326 case AArch64::LDPQi:
3327 case AArch64::LDPSi:
3328 case AArch64::LDPSWi:
3329 case AArch64::LDPWi:
3330 case AArch64::LDPXi: {
3331 unsigned Rt = Inst.getOperand(0).getReg();
3332 unsigned Rt2 = Inst.getOperand(1).getReg();
3334 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3337 case AArch64::LDPDpost:
3338 case AArch64::LDPDpre:
3339 case AArch64::LDPQpost:
3340 case AArch64::LDPQpre:
3341 case AArch64::LDPSpost:
3342 case AArch64::LDPSpre:
3343 case AArch64::LDPSWpost: {
3344 unsigned Rt = Inst.getOperand(1).getReg();
3345 unsigned Rt2 = Inst.getOperand(2).getReg();
3347 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3350 case AArch64::STPDpost:
3351 case AArch64::STPDpre:
3352 case AArch64::STPQpost:
3353 case AArch64::STPQpre:
3354 case AArch64::STPSpost:
3355 case AArch64::STPSpre:
3356 case AArch64::STPWpost:
3357 case AArch64::STPWpre:
3358 case AArch64::STPXpost:
3359 case AArch64::STPXpre: {
3360 unsigned Rt = Inst.getOperand(1).getReg();
3361 unsigned Rt2 = Inst.getOperand(2).getReg();
3362 unsigned Rn = Inst.getOperand(3).getReg();
3363 if (RI->isSubRegisterEq(Rn, Rt))
3364 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3365 "is also a source");
3366 if (RI->isSubRegisterEq(Rn, Rt2))
3367 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3368 "is also a source");
3371 case AArch64::LDRBBpre:
3372 case AArch64::LDRBpre:
3373 case AArch64::LDRHHpre:
3374 case AArch64::LDRHpre:
3375 case AArch64::LDRSBWpre:
3376 case AArch64::LDRSBXpre:
3377 case AArch64::LDRSHWpre:
3378 case AArch64::LDRSHXpre:
3379 case AArch64::LDRSWpre:
3380 case AArch64::LDRWpre:
3381 case AArch64::LDRXpre:
3382 case AArch64::LDRBBpost:
3383 case AArch64::LDRBpost:
3384 case AArch64::LDRHHpost:
3385 case AArch64::LDRHpost:
3386 case AArch64::LDRSBWpost:
3387 case AArch64::LDRSBXpost:
3388 case AArch64::LDRSHWpost:
3389 case AArch64::LDRSHXpost:
3390 case AArch64::LDRSWpost:
3391 case AArch64::LDRWpost:
3392 case AArch64::LDRXpost: {
3393 unsigned Rt = Inst.getOperand(1).getReg();
3394 unsigned Rn = Inst.getOperand(2).getReg();
3395 if (RI->isSubRegisterEq(Rn, Rt))
3396 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3397 "is also a source");
3400 case AArch64::STRBBpost:
3401 case AArch64::STRBpost:
3402 case AArch64::STRHHpost:
3403 case AArch64::STRHpost:
3404 case AArch64::STRWpost:
3405 case AArch64::STRXpost:
3406 case AArch64::STRBBpre:
3407 case AArch64::STRBpre:
3408 case AArch64::STRHHpre:
3409 case AArch64::STRHpre:
3410 case AArch64::STRWpre:
3411 case AArch64::STRXpre: {
3412 unsigned Rt = Inst.getOperand(1).getReg();
3413 unsigned Rn = Inst.getOperand(2).getReg();
3414 if (RI->isSubRegisterEq(Rn, Rt))
3415 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3416 "is also a source");
3421 // Now check immediate ranges. Separate from the above as there is overlap
3422 // in the instructions being checked and this keeps the nested conditionals
3424 switch (Inst.getOpcode()) {
3425 case AArch64::ADDSWri:
3426 case AArch64::ADDSXri:
3427 case AArch64::ADDWri:
3428 case AArch64::ADDXri:
3429 case AArch64::SUBSWri:
3430 case AArch64::SUBSXri:
3431 case AArch64::SUBWri:
3432 case AArch64::SUBXri: {
3433 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3434 // some slight duplication here.
3435 if (Inst.getOperand(2).isExpr()) {
3436 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3437 AArch64MCExpr::VariantKind ELFRefKind;
3438 MCSymbolRefExpr::VariantKind DarwinRefKind;
3440 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3441 return Error(Loc[2], "invalid immediate expression");
3444 // Only allow these with ADDXri.
3445 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3446 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3447 Inst.getOpcode() == AArch64::ADDXri)
3450 // Only allow these with ADDXri/ADDWri
3451 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3452 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3453 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3454 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3455 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3456 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3457 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3458 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3459 (Inst.getOpcode() == AArch64::ADDXri ||
3460 Inst.getOpcode() == AArch64::ADDWri))
3463 // Don't allow expressions in the immediate field otherwise
3464 return Error(Loc[2], "invalid immediate expression");
3473 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3475 case Match_MissingFeature:
3477 "instruction requires a CPU feature not currently enabled");
3478 case Match_InvalidOperand:
3479 return Error(Loc, "invalid operand for instruction");
3480 case Match_InvalidSuffix:
3481 return Error(Loc, "invalid type suffix for instruction");
3482 case Match_InvalidCondCode:
3483 return Error(Loc, "expected AArch64 condition code");
3484 case Match_AddSubRegExtendSmall:
3486 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3487 case Match_AddSubRegExtendLarge:
3489 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3490 case Match_AddSubSecondSource:
3492 "expected compatible register, symbol or integer in range [0, 4095]");
3493 case Match_LogicalSecondSource:
3494 return Error(Loc, "expected compatible register or logical immediate");
3495 case Match_InvalidMovImm32Shift:
3496 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3497 case Match_InvalidMovImm64Shift:
3498 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3499 case Match_AddSubRegShift32:
3501 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3502 case Match_AddSubRegShift64:
3504 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3505 case Match_InvalidFPImm:
3507 "expected compatible register or floating-point constant");
3508 case Match_InvalidMemoryIndexedSImm9:
3509 return Error(Loc, "index must be an integer in range [-256, 255].");
3510 case Match_InvalidMemoryIndexed4SImm7:
3511 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3512 case Match_InvalidMemoryIndexed8SImm7:
3513 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3514 case Match_InvalidMemoryIndexed16SImm7:
3515 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3516 case Match_InvalidMemoryWExtend8:
3518 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3519 case Match_InvalidMemoryWExtend16:
3521 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3522 case Match_InvalidMemoryWExtend32:
3524 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3525 case Match_InvalidMemoryWExtend64:
3527 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3528 case Match_InvalidMemoryWExtend128:
3530 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3531 case Match_InvalidMemoryXExtend8:
3533 "expected 'lsl' or 'sxtx' with optional shift of #0");
3534 case Match_InvalidMemoryXExtend16:
3536 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3537 case Match_InvalidMemoryXExtend32:
3539 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3540 case Match_InvalidMemoryXExtend64:
3542 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3543 case Match_InvalidMemoryXExtend128:
3545 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3546 case Match_InvalidMemoryIndexed1:
3547 return Error(Loc, "index must be an integer in range [0, 4095].");
3548 case Match_InvalidMemoryIndexed2:
3549 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3550 case Match_InvalidMemoryIndexed4:
3551 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3552 case Match_InvalidMemoryIndexed8:
3553 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3554 case Match_InvalidMemoryIndexed16:
3555 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3556 case Match_InvalidImm0_7:
3557 return Error(Loc, "immediate must be an integer in range [0, 7].");
3558 case Match_InvalidImm0_15:
3559 return Error(Loc, "immediate must be an integer in range [0, 15].");
3560 case Match_InvalidImm0_31:
3561 return Error(Loc, "immediate must be an integer in range [0, 31].");
3562 case Match_InvalidImm0_63:
3563 return Error(Loc, "immediate must be an integer in range [0, 63].");
3564 case Match_InvalidImm0_127:
3565 return Error(Loc, "immediate must be an integer in range [0, 127].");
3566 case Match_InvalidImm0_65535:
3567 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3568 case Match_InvalidImm1_8:
3569 return Error(Loc, "immediate must be an integer in range [1, 8].");
3570 case Match_InvalidImm1_16:
3571 return Error(Loc, "immediate must be an integer in range [1, 16].");
3572 case Match_InvalidImm1_32:
3573 return Error(Loc, "immediate must be an integer in range [1, 32].");
3574 case Match_InvalidImm1_64:
3575 return Error(Loc, "immediate must be an integer in range [1, 64].");
3576 case Match_InvalidIndex1:
3577 return Error(Loc, "expected lane specifier '[1]'");
3578 case Match_InvalidIndexB:
3579 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3580 case Match_InvalidIndexH:
3581 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3582 case Match_InvalidIndexS:
3583 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3584 case Match_InvalidIndexD:
3585 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3586 case Match_InvalidLabel:
3587 return Error(Loc, "expected label or encodable integer pc offset");
3589 return Error(Loc, "expected readable system register");
3591 return Error(Loc, "expected writable system register or pstate");
3592 case Match_MnemonicFail:
3593 return Error(Loc, "unrecognized instruction mnemonic");
3595 llvm_unreachable("unexpected error code!");
3599 static const char *getSubtargetFeatureName(uint64_t Val);
3601 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3602 OperandVector &Operands,
3604 uint64_t &ErrorInfo,
3605 bool MatchingInlineAsm) {
3606 assert(!Operands.empty() && "Unexpect empty operand list!");
3607 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3608 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3610 StringRef Tok = Op.getToken();
3611 unsigned NumOperands = Operands.size();
3613 if (NumOperands == 4 && Tok == "lsl") {
3614 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3615 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3616 if (Op2.isReg() && Op3.isImm()) {
3617 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3619 uint64_t Op3Val = Op3CE->getValue();
3620 uint64_t NewOp3Val = 0;
3621 uint64_t NewOp4Val = 0;
3622 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3624 NewOp3Val = (32 - Op3Val) & 0x1f;
3625 NewOp4Val = 31 - Op3Val;
3627 NewOp3Val = (64 - Op3Val) & 0x3f;
3628 NewOp4Val = 63 - Op3Val;
3631 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3632 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3634 Operands[0] = AArch64Operand::CreateToken(
3635 "ubfm", false, Op.getStartLoc(), getContext());
3636 Operands.push_back(AArch64Operand::CreateImm(
3637 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3638 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3639 Op3.getEndLoc(), getContext());
3642 } else if (NumOperands == 5) {
3643 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3644 // UBFIZ -> UBFM aliases.
3645 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3646 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3647 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3648 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3650 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3651 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3652 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3654 if (Op3CE && Op4CE) {
3655 uint64_t Op3Val = Op3CE->getValue();
3656 uint64_t Op4Val = Op4CE->getValue();
3658 uint64_t RegWidth = 0;
3659 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3665 if (Op3Val >= RegWidth)
3666 return Error(Op3.getStartLoc(),
3667 "expected integer in range [0, 31]");
3668 if (Op4Val < 1 || Op4Val > RegWidth)
3669 return Error(Op4.getStartLoc(),
3670 "expected integer in range [1, 32]");
3672 uint64_t NewOp3Val = 0;
3673 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3675 NewOp3Val = (32 - Op3Val) & 0x1f;
3677 NewOp3Val = (64 - Op3Val) & 0x3f;
3679 uint64_t NewOp4Val = Op4Val - 1;
3681 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3682 return Error(Op4.getStartLoc(),
3683 "requested insert overflows register");
3685 const MCExpr *NewOp3 =
3686 MCConstantExpr::Create(NewOp3Val, getContext());
3687 const MCExpr *NewOp4 =
3688 MCConstantExpr::Create(NewOp4Val, getContext());
3689 Operands[3] = AArch64Operand::CreateImm(
3690 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3691 Operands[4] = AArch64Operand::CreateImm(
3692 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3694 Operands[0] = AArch64Operand::CreateToken(
3695 "bfm", false, Op.getStartLoc(), getContext());
3696 else if (Tok == "sbfiz")
3697 Operands[0] = AArch64Operand::CreateToken(
3698 "sbfm", false, Op.getStartLoc(), getContext());
3699 else if (Tok == "ubfiz")
3700 Operands[0] = AArch64Operand::CreateToken(
3701 "ubfm", false, Op.getStartLoc(), getContext());
3703 llvm_unreachable("No valid mnemonic for alias?");
3707 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3708 // UBFX -> UBFM aliases.
3709 } else if (NumOperands == 5 &&
3710 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3711 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3712 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3713 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3715 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3716 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3717 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3719 if (Op3CE && Op4CE) {
3720 uint64_t Op3Val = Op3CE->getValue();
3721 uint64_t Op4Val = Op4CE->getValue();
3723 uint64_t RegWidth = 0;
3724 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3730 if (Op3Val >= RegWidth)
3731 return Error(Op3.getStartLoc(),
3732 "expected integer in range [0, 31]");
3733 if (Op4Val < 1 || Op4Val > RegWidth)
3734 return Error(Op4.getStartLoc(),
3735 "expected integer in range [1, 32]");
3737 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3739 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3740 return Error(Op4.getStartLoc(),
3741 "requested extract overflows register");
3743 const MCExpr *NewOp4 =
3744 MCConstantExpr::Create(NewOp4Val, getContext());
3745 Operands[4] = AArch64Operand::CreateImm(
3746 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3748 Operands[0] = AArch64Operand::CreateToken(
3749 "bfm", false, Op.getStartLoc(), getContext());
3750 else if (Tok == "sbfx")
3751 Operands[0] = AArch64Operand::CreateToken(
3752 "sbfm", false, Op.getStartLoc(), getContext());
3753 else if (Tok == "ubfx")
3754 Operands[0] = AArch64Operand::CreateToken(
3755 "ubfm", false, Op.getStartLoc(), getContext());
3757 llvm_unreachable("No valid mnemonic for alias?");
3762 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3763 // InstAlias can't quite handle this since the reg classes aren't
3765 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3766 // The source register can be Wn here, but the matcher expects a
3767 // GPR64. Twiddle it here if necessary.
3768 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3770 unsigned Reg = getXRegFromWReg(Op.getReg());
3771 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3772 Op.getEndLoc(), getContext());
3775 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3776 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3777 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3779 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3781 // The source register can be Wn here, but the matcher expects a
3782 // GPR64. Twiddle it here if necessary.
3783 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3785 unsigned Reg = getXRegFromWReg(Op.getReg());
3786 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3787 Op.getEndLoc(), getContext());
3791 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3792 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3793 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3795 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3797 // The source register can be Wn here, but the matcher expects a
3798 // GPR32. Twiddle it here if necessary.
3799 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3801 unsigned Reg = getWRegFromXReg(Op.getReg());
3802 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3803 Op.getEndLoc(), getContext());
3808 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3809 if (NumOperands == 3 && Tok == "fmov") {
3810 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3811 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3812 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3814 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3818 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3819 Op.getEndLoc(), getContext());
3824 // First try to match against the secondary set of tables containing the
3825 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3826 unsigned MatchResult =
3827 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3829 // If that fails, try against the alternate table containing long-form NEON:
3830 // "fadd v0.2s, v1.2s, v2.2s"
3831 if (MatchResult != Match_Success)
3833 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3835 switch (MatchResult) {
3836 case Match_Success: {
3837 // Perform range checking and other semantic validations
3838 SmallVector<SMLoc, 8> OperandLocs;
3839 NumOperands = Operands.size();
3840 for (unsigned i = 1; i < NumOperands; ++i)
3841 OperandLocs.push_back(Operands[i]->getStartLoc());
3842 if (validateInstruction(Inst, OperandLocs))
3846 Out.EmitInstruction(Inst, STI);
3849 case Match_MissingFeature: {
3850 assert(ErrorInfo && "Unknown missing feature!");
3851 // Special case the error message for the very common case where only
3852 // a single subtarget feature is missing (neon, e.g.).
3853 std::string Msg = "instruction requires:";
3855 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3856 if (ErrorInfo & Mask) {
3858 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3862 return Error(IDLoc, Msg);
3864 case Match_MnemonicFail:
3865 return showMatchError(IDLoc, MatchResult);
3866 case Match_InvalidOperand: {
3867 SMLoc ErrorLoc = IDLoc;
3868 if (ErrorInfo != ~0ULL) {
3869 if (ErrorInfo >= Operands.size())
3870 return Error(IDLoc, "too few operands for instruction");
3872 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3873 if (ErrorLoc == SMLoc())
3876 // If the match failed on a suffix token operand, tweak the diagnostic
3878 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3879 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3880 MatchResult = Match_InvalidSuffix;
3882 return showMatchError(ErrorLoc, MatchResult);
3884 case Match_InvalidMemoryIndexed1:
3885 case Match_InvalidMemoryIndexed2:
3886 case Match_InvalidMemoryIndexed4:
3887 case Match_InvalidMemoryIndexed8:
3888 case Match_InvalidMemoryIndexed16:
3889 case Match_InvalidCondCode:
3890 case Match_AddSubRegExtendSmall:
3891 case Match_AddSubRegExtendLarge:
3892 case Match_AddSubSecondSource:
3893 case Match_LogicalSecondSource:
3894 case Match_AddSubRegShift32:
3895 case Match_AddSubRegShift64:
3896 case Match_InvalidMovImm32Shift:
3897 case Match_InvalidMovImm64Shift:
3898 case Match_InvalidFPImm:
3899 case Match_InvalidMemoryWExtend8:
3900 case Match_InvalidMemoryWExtend16:
3901 case Match_InvalidMemoryWExtend32:
3902 case Match_InvalidMemoryWExtend64:
3903 case Match_InvalidMemoryWExtend128:
3904 case Match_InvalidMemoryXExtend8:
3905 case Match_InvalidMemoryXExtend16:
3906 case Match_InvalidMemoryXExtend32:
3907 case Match_InvalidMemoryXExtend64:
3908 case Match_InvalidMemoryXExtend128:
3909 case Match_InvalidMemoryIndexed4SImm7:
3910 case Match_InvalidMemoryIndexed8SImm7:
3911 case Match_InvalidMemoryIndexed16SImm7:
3912 case Match_InvalidMemoryIndexedSImm9:
3913 case Match_InvalidImm0_7:
3914 case Match_InvalidImm0_15:
3915 case Match_InvalidImm0_31:
3916 case Match_InvalidImm0_63:
3917 case Match_InvalidImm0_127:
3918 case Match_InvalidImm0_65535:
3919 case Match_InvalidImm1_8:
3920 case Match_InvalidImm1_16:
3921 case Match_InvalidImm1_32:
3922 case Match_InvalidImm1_64:
3923 case Match_InvalidIndex1:
3924 case Match_InvalidIndexB:
3925 case Match_InvalidIndexH:
3926 case Match_InvalidIndexS:
3927 case Match_InvalidIndexD:
3928 case Match_InvalidLabel:
3931 if (ErrorInfo >= Operands.size())
3932 return Error(IDLoc, "too few operands for instruction");
3933 // Any time we get here, there's nothing fancy to do. Just get the
3934 // operand SMLoc and display the diagnostic.
3935 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3936 if (ErrorLoc == SMLoc())
3938 return showMatchError(ErrorLoc, MatchResult);
3942 llvm_unreachable("Implement any new match types added!");
3945 /// ParseDirective parses the arm specific directives
3946 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3947 const MCObjectFileInfo::Environment Format =
3948 getContext().getObjectFileInfo()->getObjectFileType();
3949 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3950 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3952 StringRef IDVal = DirectiveID.getIdentifier();
3953 SMLoc Loc = DirectiveID.getLoc();
3954 if (IDVal == ".hword")
3955 return parseDirectiveWord(2, Loc);
3956 if (IDVal == ".word")
3957 return parseDirectiveWord(4, Loc);
3958 if (IDVal == ".xword")
3959 return parseDirectiveWord(8, Loc);
3960 if (IDVal == ".tlsdesccall")
3961 return parseDirectiveTLSDescCall(Loc);
3962 if (IDVal == ".ltorg" || IDVal == ".pool")
3963 return parseDirectiveLtorg(Loc);
3964 if (IDVal == ".unreq")
3965 return parseDirectiveUnreq(DirectiveID.getLoc());
3967 if (!IsMachO && !IsCOFF) {
3968 if (IDVal == ".inst")
3969 return parseDirectiveInst(Loc);
3972 return parseDirectiveLOH(IDVal, Loc);
3975 /// parseDirectiveWord
3976 /// ::= .word [ expression (, expression)* ]
3977 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3978 MCAsmParser &Parser = getParser();
3979 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3981 const MCExpr *Value;
3982 if (getParser().parseExpression(Value))
3985 getParser().getStreamer().EmitValue(Value, Size);
3987 if (getLexer().is(AsmToken::EndOfStatement))
3990 // FIXME: Improve diagnostic.
3991 if (getLexer().isNot(AsmToken::Comma))
3992 return Error(L, "unexpected token in directive");
4001 /// parseDirectiveInst
4002 /// ::= .inst opcode [, ...]
4003 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4004 MCAsmParser &Parser = getParser();
4005 if (getLexer().is(AsmToken::EndOfStatement)) {
4006 Parser.eatToEndOfStatement();
4007 Error(Loc, "expected expression following directive");
4014 if (getParser().parseExpression(Expr)) {
4015 Error(Loc, "expected expression");
4019 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4021 Error(Loc, "expected constant expression");
4025 getTargetStreamer().emitInst(Value->getValue());
4027 if (getLexer().is(AsmToken::EndOfStatement))
4030 if (getLexer().isNot(AsmToken::Comma)) {
4031 Error(Loc, "unexpected token in directive");
4035 Parser.Lex(); // Eat comma.
4042 // parseDirectiveTLSDescCall:
4043 // ::= .tlsdesccall symbol
4044 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4046 if (getParser().parseIdentifier(Name))
4047 return Error(L, "expected symbol after directive");
4049 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4050 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4051 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4054 Inst.setOpcode(AArch64::TLSDESCCALL);
4055 Inst.addOperand(MCOperand::CreateExpr(Expr));
4057 getParser().getStreamer().EmitInstruction(Inst, STI);
4061 /// ::= .loh <lohName | lohId> label1, ..., labelN
4062 /// The number of arguments depends on the loh identifier.
4063 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4064 if (IDVal != MCLOHDirectiveName())
4067 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4068 if (getParser().getTok().isNot(AsmToken::Integer))
4069 return TokError("expected an identifier or a number in directive");
4070 // We successfully get a numeric value for the identifier.
4071 // Check if it is valid.
4072 int64_t Id = getParser().getTok().getIntVal();
4073 if (Id <= -1U && !isValidMCLOHType(Id))
4074 return TokError("invalid numeric identifier in directive");
4075 Kind = (MCLOHType)Id;
4077 StringRef Name = getTok().getIdentifier();
4078 // We successfully parse an identifier.
4079 // Check if it is a recognized one.
4080 int Id = MCLOHNameToId(Name);
4083 return TokError("invalid identifier in directive");
4084 Kind = (MCLOHType)Id;
4086 // Consume the identifier.
4088 // Get the number of arguments of this LOH.
4089 int NbArgs = MCLOHIdToNbArgs(Kind);
4091 assert(NbArgs != -1 && "Invalid number of arguments");
4093 SmallVector<MCSymbol *, 3> Args;
4094 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4096 if (getParser().parseIdentifier(Name))
4097 return TokError("expected identifier in directive");
4098 Args.push_back(getContext().GetOrCreateSymbol(Name));
4100 if (Idx + 1 == NbArgs)
4102 if (getLexer().isNot(AsmToken::Comma))
4103 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4106 if (getLexer().isNot(AsmToken::EndOfStatement))
4107 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4109 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4113 /// parseDirectiveLtorg
4114 /// ::= .ltorg | .pool
4115 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4116 getTargetStreamer().emitCurrentConstantPool();
4120 /// parseDirectiveReq
4121 /// ::= name .req registername
4122 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4123 MCAsmParser &Parser = getParser();
4124 Parser.Lex(); // Eat the '.req' token.
4125 SMLoc SRegLoc = getLoc();
4126 unsigned RegNum = tryParseRegister();
4127 bool IsVector = false;
4129 if (RegNum == static_cast<unsigned>(-1)) {
4131 RegNum = tryMatchVectorRegister(Kind, false);
4132 if (!Kind.empty()) {
4133 Error(SRegLoc, "vector register without type specifier expected");
4139 if (RegNum == static_cast<unsigned>(-1)) {
4140 Parser.eatToEndOfStatement();
4141 Error(SRegLoc, "register name or alias expected");
4145 // Shouldn't be anything else.
4146 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4147 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4148 Parser.eatToEndOfStatement();
4152 Parser.Lex(); // Consume the EndOfStatement
4154 auto pair = std::make_pair(IsVector, RegNum);
4155 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4156 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4161 /// parseDirectiveUneq
4162 /// ::= .unreq registername
4163 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4164 MCAsmParser &Parser = getParser();
4165 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4166 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4167 Parser.eatToEndOfStatement();
4170 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4171 Parser.Lex(); // Eat the identifier.
4176 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4177 AArch64MCExpr::VariantKind &ELFRefKind,
4178 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4180 ELFRefKind = AArch64MCExpr::VK_INVALID;
4181 DarwinRefKind = MCSymbolRefExpr::VK_None;
4184 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4185 ELFRefKind = AE->getKind();
4186 Expr = AE->getSubExpr();
4189 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4191 // It's a simple symbol reference with no addend.
4192 DarwinRefKind = SE->getKind();
4196 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4200 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4203 DarwinRefKind = SE->getKind();
4205 if (BE->getOpcode() != MCBinaryExpr::Add &&
4206 BE->getOpcode() != MCBinaryExpr::Sub)
4209 // See if the addend is is a constant, otherwise there's more going
4210 // on here than we can deal with.
4211 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4215 Addend = AddendExpr->getValue();
4216 if (BE->getOpcode() == MCBinaryExpr::Sub)
4219 // It's some symbol reference + a constant addend, but really
4220 // shouldn't use both Darwin and ELF syntax.
4221 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4222 DarwinRefKind == MCSymbolRefExpr::VK_None;
4225 /// Force static initialization.
4226 extern "C" void LLVMInitializeAArch64AsmParser() {
4227 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4228 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4229 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4232 #define GET_REGISTER_MATCHER
4233 #define GET_SUBTARGET_FEATURE_NAME
4234 #define GET_MATCHER_IMPLEMENTATION
4235 #include "AArch64GenAsmMatcher.inc"
4237 // Define this matcher function after the auto-generated include so we
4238 // have the match class enum definitions.
4239 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4241 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4242 // If the kind is a token for a literal immediate, check if our asm
4243 // operand matches. This is for InstAliases which have a fixed-value
4244 // immediate in the syntax.
4245 int64_t ExpectedVal;
4248 return Match_InvalidOperand;
4290 return Match_InvalidOperand;
4291 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4293 return Match_InvalidOperand;
4294 if (CE->getValue() == ExpectedVal)
4295 return Match_Success;
4296 return Match_InvalidOperand;