1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
216 uint32_t PStateField;
229 struct ShiftExtendOp {
230 AArch64_AM::ShiftExtendType Type;
232 bool HasExplicitAmount;
242 struct VectorListOp VectorList;
243 struct VectorIndexOp VectorIndex;
245 struct ShiftedImmOp ShiftedImm;
246 struct CondCodeOp CondCode;
247 struct FPImmOp FPImm;
248 struct BarrierOp Barrier;
249 struct SysRegOp SysReg;
250 struct SysCRImmOp SysCRImm;
251 struct PrefetchOp Prefetch;
252 struct ShiftExtendOp ShiftExtend;
255 // Keep the MCContext around as the MCExprs may need manipulated during
256 // the add<>Operands() calls.
260 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
262 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
264 StartLoc = o.StartLoc;
274 ShiftedImm = o.ShiftedImm;
277 CondCode = o.CondCode;
289 VectorList = o.VectorList;
292 VectorIndex = o.VectorIndex;
298 SysCRImm = o.SysCRImm;
301 Prefetch = o.Prefetch;
304 ShiftExtend = o.ShiftExtend;
309 /// getStartLoc - Get the location of the first token of this operand.
310 SMLoc getStartLoc() const override { return StartLoc; }
311 /// getEndLoc - Get the location of the last token of this operand.
312 SMLoc getEndLoc() const override { return EndLoc; }
314 StringRef getToken() const {
315 assert(Kind == k_Token && "Invalid access!");
316 return StringRef(Tok.Data, Tok.Length);
319 bool isTokenSuffix() const {
320 assert(Kind == k_Token && "Invalid access!");
324 const MCExpr *getImm() const {
325 assert(Kind == k_Immediate && "Invalid access!");
329 const MCExpr *getShiftedImmVal() const {
330 assert(Kind == k_ShiftedImm && "Invalid access!");
331 return ShiftedImm.Val;
334 unsigned getShiftedImmShift() const {
335 assert(Kind == k_ShiftedImm && "Invalid access!");
336 return ShiftedImm.ShiftAmount;
339 AArch64CC::CondCode getCondCode() const {
340 assert(Kind == k_CondCode && "Invalid access!");
341 return CondCode.Code;
344 unsigned getFPImm() const {
345 assert(Kind == k_FPImm && "Invalid access!");
349 unsigned getBarrier() const {
350 assert(Kind == k_Barrier && "Invalid access!");
354 StringRef getBarrierName() const {
355 assert(Kind == k_Barrier && "Invalid access!");
356 return StringRef(Barrier.Data, Barrier.Length);
359 unsigned getReg() const override {
360 assert(Kind == k_Register && "Invalid access!");
364 unsigned getVectorListStart() const {
365 assert(Kind == k_VectorList && "Invalid access!");
366 return VectorList.RegNum;
369 unsigned getVectorListCount() const {
370 assert(Kind == k_VectorList && "Invalid access!");
371 return VectorList.Count;
374 unsigned getVectorIndex() const {
375 assert(Kind == k_VectorIndex && "Invalid access!");
376 return VectorIndex.Val;
379 StringRef getSysReg() const {
380 assert(Kind == k_SysReg && "Invalid access!");
381 return StringRef(SysReg.Data, SysReg.Length);
384 unsigned getSysCR() const {
385 assert(Kind == k_SysCR && "Invalid access!");
389 unsigned getPrefetch() const {
390 assert(Kind == k_Prefetch && "Invalid access!");
394 StringRef getPrefetchName() const {
395 assert(Kind == k_Prefetch && "Invalid access!");
396 return StringRef(Prefetch.Data, Prefetch.Length);
399 AArch64_AM::ShiftExtendType getShiftExtendType() const {
400 assert(Kind == k_ShiftExtend && "Invalid access!");
401 return ShiftExtend.Type;
404 unsigned getShiftExtendAmount() const {
405 assert(Kind == k_ShiftExtend && "Invalid access!");
406 return ShiftExtend.Amount;
409 bool hasShiftExtendAmount() const {
410 assert(Kind == k_ShiftExtend && "Invalid access!");
411 return ShiftExtend.HasExplicitAmount;
414 bool isImm() const override { return Kind == k_Immediate; }
415 bool isMem() const override { return false; }
416 bool isSImm9() const {
419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 int64_t Val = MCE->getValue();
423 return (Val >= -256 && Val < 256);
425 bool isSImm7s4() const {
428 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 int64_t Val = MCE->getValue();
432 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
434 bool isSImm7s8() const {
437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 int64_t Val = MCE->getValue();
441 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
443 bool isSImm7s16() const {
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
449 int64_t Val = MCE->getValue();
450 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
453 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
454 AArch64MCExpr::VariantKind ELFRefKind;
455 MCSymbolRefExpr::VariantKind DarwinRefKind;
457 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
459 // If we don't understand the expression, assume the best and
460 // let the fixup and relocation code deal with it.
464 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
465 ELFRefKind == AArch64MCExpr::VK_LO12 ||
466 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
469 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
473 // Note that we don't range-check the addend. It's adjusted modulo page
474 // size when converted, so there is no "out of range" condition when using
476 return Addend >= 0 && (Addend % Scale) == 0;
477 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
478 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
479 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
486 template <int Scale> bool isUImm12Offset() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 return isSymbolicUImm12Offset(getImm(), Scale);
494 int64_t Val = MCE->getValue();
495 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
498 bool isImm0_7() const {
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 8);
507 bool isImm1_8() const {
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 int64_t Val = MCE->getValue();
514 return (Val > 0 && Val < 9);
516 bool isImm0_15() const {
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 int64_t Val = MCE->getValue();
523 return (Val >= 0 && Val < 16);
525 bool isImm1_16() const {
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 int64_t Val = MCE->getValue();
532 return (Val > 0 && Val < 17);
534 bool isImm0_31() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 32);
543 bool isImm1_31() const {
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 int64_t Val = MCE->getValue();
550 return (Val >= 1 && Val < 32);
552 bool isImm1_32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 int64_t Val = MCE->getValue();
559 return (Val >= 1 && Val < 33);
561 bool isImm0_63() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
568 return (Val >= 0 && Val < 64);
570 bool isImm1_63() const {
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 int64_t Val = MCE->getValue();
577 return (Val >= 1 && Val < 64);
579 bool isImm1_64() const {
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 65);
588 bool isImm0_127() const {
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 int64_t Val = MCE->getValue();
595 return (Val >= 0 && Val < 128);
597 bool isImm0_255() const {
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 256);
606 bool isImm0_65535() const {
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 int64_t Val = MCE->getValue();
613 return (Val >= 0 && Val < 65536);
615 bool isImm32_63() const {
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 int64_t Val = MCE->getValue();
622 return (Val >= 32 && Val < 64);
624 bool isLogicalImm32() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 int64_t Val = MCE->getValue();
631 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
634 return AArch64_AM::isLogicalImmediate(Val, 32);
636 bool isLogicalImm64() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
644 bool isLogicalImm32Not() const {
647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
651 return AArch64_AM::isLogicalImmediate(Val, 32);
653 bool isLogicalImm64Not() const {
656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
661 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
662 bool isAddSubImm() const {
663 if (!isShiftedImm() && !isImm())
668 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
669 if (isShiftedImm()) {
670 unsigned Shift = ShiftedImm.ShiftAmount;
671 Expr = ShiftedImm.Val;
672 if (Shift != 0 && Shift != 12)
678 AArch64MCExpr::VariantKind ELFRefKind;
679 MCSymbolRefExpr::VariantKind DarwinRefKind;
681 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
682 DarwinRefKind, Addend)) {
683 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
684 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
685 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
686 || ELFRefKind == AArch64MCExpr::VK_LO12
687 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
690 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
693 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
696 // Otherwise it should be a real immediate in range:
697 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
698 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
700 bool isCondCode() const { return Kind == k_CondCode; }
701 bool isSIMDImmType10() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
709 bool isBranchTarget26() const {
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
715 int64_t Val = MCE->getValue();
718 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
720 bool isPCRelLabel19() const {
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 int64_t Val = MCE->getValue();
729 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
731 bool isBranchTarget14() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 int64_t Val = MCE->getValue();
740 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
744 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
748 AArch64MCExpr::VariantKind ELFRefKind;
749 MCSymbolRefExpr::VariantKind DarwinRefKind;
751 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
752 DarwinRefKind, Addend)) {
755 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
758 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
759 if (ELFRefKind == AllowedModifiers[i])
766 bool isMovZSymbolG3() const {
767 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
770 bool isMovZSymbolG2() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
772 AArch64MCExpr::VK_TPREL_G2,
773 AArch64MCExpr::VK_DTPREL_G2});
776 bool isMovZSymbolG1() const {
777 return isMovWSymbol({
778 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
779 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
780 AArch64MCExpr::VK_DTPREL_G1,
784 bool isMovZSymbolG0() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
786 AArch64MCExpr::VK_TPREL_G0,
787 AArch64MCExpr::VK_DTPREL_G0});
790 bool isMovKSymbolG3() const {
791 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
794 bool isMovKSymbolG2() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
798 bool isMovKSymbolG1() const {
799 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
800 AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC});
804 bool isMovKSymbolG0() const {
806 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
807 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
810 template<int RegWidth, int Shift>
811 bool isMOVZMovAlias() const {
812 if (!isImm()) return false;
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
819 Value &= 0xffffffffULL;
821 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
822 if (Value == 0 && Shift != 0)
825 return (Value & ~(0xffffULL << Shift)) == 0;
828 template<int RegWidth, int Shift>
829 bool isMOVNMovAlias() const {
830 if (!isImm()) return false;
832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833 if (!CE) return false;
834 uint64_t Value = CE->getValue();
836 // MOVZ takes precedence over MOVN.
837 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
838 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 Value &= 0xffffffffULL;
845 return (Value & ~(0xffffULL << Shift)) == 0;
848 bool isFPImm() const { return Kind == k_FPImm; }
849 bool isBarrier() const { return Kind == k_Barrier; }
850 bool isSysReg() const { return Kind == k_SysReg; }
851 bool isMRSSystemRegister() const {
852 if (!isSysReg()) return false;
854 return SysReg.MRSReg != -1U;
856 bool isMSRSystemRegister() const {
857 if (!isSysReg()) return false;
859 return SysReg.MSRReg != -1U;
861 bool isSystemPStateField() const {
862 if (!isSysReg()) return false;
864 return SysReg.PStateField != -1U;
866 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
867 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
868 bool isVectorRegLo() const {
869 return Kind == k_Register && Reg.isVector &&
870 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
873 bool isGPR32as64() const {
874 return Kind == k_Register && !Reg.isVector &&
875 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
878 bool isGPR64sp0() const {
879 return Kind == k_Register && !Reg.isVector &&
880 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
883 /// Is this a vector list with the type implicit (presumably attached to the
884 /// instruction itself)?
885 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
886 return Kind == k_VectorList && VectorList.Count == NumRegs &&
887 !VectorList.ElementKind;
890 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
891 bool isTypedVectorList() const {
892 if (Kind != k_VectorList)
894 if (VectorList.Count != NumRegs)
896 if (VectorList.ElementKind != ElementKind)
898 return VectorList.NumElements == NumElements;
901 bool isVectorIndex1() const {
902 return Kind == k_VectorIndex && VectorIndex.Val == 1;
904 bool isVectorIndexB() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 16;
907 bool isVectorIndexH() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 8;
910 bool isVectorIndexS() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 4;
913 bool isVectorIndexD() const {
914 return Kind == k_VectorIndex && VectorIndex.Val < 2;
916 bool isToken() const override { return Kind == k_Token; }
917 bool isTokenEqual(StringRef Str) const {
918 return Kind == k_Token && getToken() == Str;
920 bool isSysCR() const { return Kind == k_SysCR; }
921 bool isPrefetch() const { return Kind == k_Prefetch; }
922 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
923 bool isShifter() const {
924 if (!isShiftExtend())
927 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
928 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
929 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
930 ST == AArch64_AM::MSL);
932 bool isExtend() const {
933 if (!isShiftExtend())
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
938 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
939 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
940 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
941 ET == AArch64_AM::LSL) &&
942 getShiftExtendAmount() <= 4;
945 bool isExtend64() const {
948 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
949 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
950 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
952 bool isExtendLSL64() const {
955 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
957 ET == AArch64_AM::LSL) &&
958 getShiftExtendAmount() <= 4;
961 template<int Width> bool isMemXExtend() const {
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
966 (getShiftExtendAmount() == Log2_32(Width / 8) ||
967 getShiftExtendAmount() == 0);
970 template<int Width> bool isMemWExtend() const {
973 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
974 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
975 (getShiftExtendAmount() == Log2_32(Width / 8) ||
976 getShiftExtendAmount() == 0);
979 template <unsigned width>
980 bool isArithmeticShifter() const {
984 // An arithmetic shifter is LSL, LSR, or ASR.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
987 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
990 template <unsigned width>
991 bool isLogicalShifter() const {
995 // A logical shifter is LSL, LSR, ASR or ROR.
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
999 getShiftExtendAmount() < width;
1002 bool isMovImm32Shifter() const {
1006 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16);
1014 bool isMovImm64Shifter() const {
1018 // A MOVi shifter is LSL of 0 or 16.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1026 bool isLogicalVecShifter() const {
1030 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1031 unsigned Shift = getShiftExtendAmount();
1032 return getShiftExtendType() == AArch64_AM::LSL &&
1033 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1036 bool isLogicalVecHalfWordShifter() const {
1037 if (!isLogicalVecShifter())
1040 // A logical vector shifter is a left shift by 0 or 8.
1041 unsigned Shift = getShiftExtendAmount();
1042 return getShiftExtendType() == AArch64_AM::LSL &&
1043 (Shift == 0 || Shift == 8);
1046 bool isMoveVecShifter() const {
1047 if (!isShiftExtend())
1050 // A logical vector shifter is a left shift by 8 or 16.
1051 unsigned Shift = getShiftExtendAmount();
1052 return getShiftExtendType() == AArch64_AM::MSL &&
1053 (Shift == 8 || Shift == 16);
1056 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1057 // to LDUR/STUR when the offset is not legal for the former but is for
1058 // the latter. As such, in addition to checking for being a legal unscaled
1059 // address, also check that it is not a legal scaled address. This avoids
1060 // ambiguity in the matcher.
1062 bool isSImm9OffsetFB() const {
1063 return isSImm9() && !isUImm12Offset<Width / 8>();
1066 bool isAdrpLabel() const {
1067 // Validation was handled during parsing, so we just sanity check that
1068 // something didn't go haywire.
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073 int64_t Val = CE->getValue();
1074 int64_t Min = - (4096 * (1LL << (21 - 1)));
1075 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1076 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1082 bool isAdrLabel() const {
1083 // Validation was handled during parsing, so we just sanity check that
1084 // something didn't go haywire.
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Min = - (1LL << (21 - 1));
1091 int64_t Max = ((1LL << (21 - 1)) - 1);
1092 return Val >= Min && Val <= Max;
1098 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1099 // Add as immediates when possible. Null MCExpr = 0.
1101 Inst.addOperand(MCOperand::CreateImm(0));
1102 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1103 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1105 Inst.addOperand(MCOperand::CreateExpr(Expr));
1108 void addRegOperands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 Inst.addOperand(MCOperand::CreateReg(getReg()));
1113 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1118 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1119 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1120 RI->getEncodingValue(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(Reg));
1125 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1132 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1135 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(getReg()));
1139 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::CreateReg(getReg()));
1144 template <unsigned NumRegs>
1145 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1148 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1152 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1155 template <unsigned NumRegs>
1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1159 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1160 unsigned FirstReg = FirstRegs[NumRegs - 1];
1163 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1171 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1176 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1181 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1186 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1191 void addImmOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 // If this is a pageoff symrefexpr with an addend, adjust the addend
1194 // to be only the page-offset portion. Otherwise, just add the expr
1196 addExpr(Inst, getImm());
1199 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 2 && "Invalid number of operands!");
1201 if (isShiftedImm()) {
1202 addExpr(Inst, getShiftedImmVal());
1203 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1205 addExpr(Inst, getImm());
1206 Inst.addOperand(MCOperand::CreateImm(0));
1210 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1215 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1224 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1225 addImmOperands(Inst, N);
1229 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1240 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1246 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1252 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1264 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1270 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1276 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1301 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1307 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1313 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1319 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1325 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1343 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1349 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1354 Inst.addOperand(MCOperand::CreateImm(encoding));
1357 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1361 Inst.addOperand(MCOperand::CreateImm(encoding));
1364 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1372 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::CreateImm(encoding));
1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1387 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 addExpr(Inst, getImm());
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1401 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1415 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1429 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1434 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1439 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1442 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1448 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1451 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1454 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1457 void addSysCROperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1459 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1462 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1467 void addShifterOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1470 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1471 Inst.addOperand(MCOperand::CreateImm(Imm));
1474 void addExtendOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1477 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1478 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::CreateImm(Imm));
1482 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::CreateImm(Imm));
1490 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1495 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1498 // For 8-bit load/store instructions with a register offset, both the
1499 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1500 // they're disambiguated by whether the shift was explicit or implicit rather
1502 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1507 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1511 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1514 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1515 uint64_t Value = CE->getValue();
1516 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1520 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1524 uint64_t Value = CE->getValue();
1525 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1528 void print(raw_ostream &OS) const override;
1530 static std::unique_ptr<AArch64Operand>
1531 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1532 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1533 Op->Tok.Data = Str.data();
1534 Op->Tok.Length = Str.size();
1535 Op->Tok.IsSuffix = IsSuffix;
1541 static std::unique_ptr<AArch64Operand>
1542 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1543 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1544 Op->Reg.RegNum = RegNum;
1545 Op->Reg.isVector = isVector;
1551 static std::unique_ptr<AArch64Operand>
1552 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1553 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1555 Op->VectorList.RegNum = RegNum;
1556 Op->VectorList.Count = Count;
1557 Op->VectorList.NumElements = NumElements;
1558 Op->VectorList.ElementKind = ElementKind;
1564 static std::unique_ptr<AArch64Operand>
1565 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1567 Op->VectorIndex.Val = Idx;
1573 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1574 SMLoc E, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1582 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1583 unsigned ShiftAmount,
1586 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1587 Op->ShiftedImm .Val = Val;
1588 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1594 static std::unique_ptr<AArch64Operand>
1595 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1596 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1597 Op->CondCode.Code = Code;
1603 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1605 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1606 Op->FPImm.Val = Val;
1612 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1616 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1617 Op->Barrier.Val = Val;
1618 Op->Barrier.Data = Str.data();
1619 Op->Barrier.Length = Str.size();
1625 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1628 uint32_t PStateField,
1630 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1631 Op->SysReg.Data = Str.data();
1632 Op->SysReg.Length = Str.size();
1633 Op->SysReg.MRSReg = MRSReg;
1634 Op->SysReg.MSRReg = MSRReg;
1635 Op->SysReg.PStateField = PStateField;
1641 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1642 SMLoc E, MCContext &Ctx) {
1643 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1644 Op->SysCRImm.Val = Val;
1650 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1654 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1655 Op->Prefetch.Val = Val;
1656 Op->Barrier.Data = Str.data();
1657 Op->Barrier.Length = Str.size();
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1685 StringRef Name = getBarrierName();
1687 OS << "<barrier " << Name << ">";
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1693 getImm()->print(OS);
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 getShiftedImmVal()->print(OS);
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1703 OS << "<condcode " << getCondCode() << ">";
1706 OS << "<register " << getReg() << ">";
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1720 OS << "<sysreg: " << getSysReg() << '>';
1723 OS << "'" << getToken() << "'";
1726 OS << "c" << getSysCR();
1729 StringRef Name = getPrefetchName();
1731 OS << "<prfop " << Name << ">";
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1736 case k_ShiftExtend: {
1737 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1738 << getShiftExtendAmount();
1739 if (!hasShiftExtendAmount())
1747 /// @name Auto-generated Match Functions
1750 static unsigned MatchRegisterName(StringRef Name);
1754 static unsigned matchVectorRegName(StringRef Name) {
1755 return StringSwitch<unsigned>(Name)
1756 .Case("v0", AArch64::Q0)
1757 .Case("v1", AArch64::Q1)
1758 .Case("v2", AArch64::Q2)
1759 .Case("v3", AArch64::Q3)
1760 .Case("v4", AArch64::Q4)
1761 .Case("v5", AArch64::Q5)
1762 .Case("v6", AArch64::Q6)
1763 .Case("v7", AArch64::Q7)
1764 .Case("v8", AArch64::Q8)
1765 .Case("v9", AArch64::Q9)
1766 .Case("v10", AArch64::Q10)
1767 .Case("v11", AArch64::Q11)
1768 .Case("v12", AArch64::Q12)
1769 .Case("v13", AArch64::Q13)
1770 .Case("v14", AArch64::Q14)
1771 .Case("v15", AArch64::Q15)
1772 .Case("v16", AArch64::Q16)
1773 .Case("v17", AArch64::Q17)
1774 .Case("v18", AArch64::Q18)
1775 .Case("v19", AArch64::Q19)
1776 .Case("v20", AArch64::Q20)
1777 .Case("v21", AArch64::Q21)
1778 .Case("v22", AArch64::Q22)
1779 .Case("v23", AArch64::Q23)
1780 .Case("v24", AArch64::Q24)
1781 .Case("v25", AArch64::Q25)
1782 .Case("v26", AArch64::Q26)
1783 .Case("v27", AArch64::Q27)
1784 .Case("v28", AArch64::Q28)
1785 .Case("v29", AArch64::Q29)
1786 .Case("v30", AArch64::Q30)
1787 .Case("v31", AArch64::Q31)
1791 static bool isValidVectorKind(StringRef Name) {
1792 return StringSwitch<bool>(Name.lower())
1802 // Accept the width neutral ones, too, for verbose syntax. If those
1803 // aren't used in the right places, the token operand won't match so
1804 // all will work out.
1812 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1813 char &ElementKind) {
1814 assert(isValidVectorKind(Name));
1816 ElementKind = Name.lower()[Name.size() - 1];
1819 if (Name.size() == 2)
1822 // Parse the lane count
1823 Name = Name.drop_front();
1824 while (isdigit(Name.front())) {
1825 NumElements = 10 * NumElements + (Name.front() - '0');
1826 Name = Name.drop_front();
1830 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1832 StartLoc = getLoc();
1833 RegNo = tryParseRegister();
1834 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1835 return (RegNo == (unsigned)-1);
1838 // Matches a register name or register alias previously defined by '.req'
1839 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1841 unsigned RegNum = isVector ? matchVectorRegName(Name)
1842 : MatchRegisterName(Name);
1845 // Check for aliases registered via .req. Canonicalize to lower case.
1846 // That's more consistent since register names are case insensitive, and
1847 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1848 auto Entry = RegisterReqs.find(Name.lower());
1849 if (Entry == RegisterReqs.end())
1851 // set RegNum if the match is the right kind of register
1852 if (isVector == Entry->getValue().first)
1853 RegNum = Entry->getValue().second;
1858 /// tryParseRegister - Try to parse a register name. The token must be an
1859 /// Identifier when called, and if it is a register name the token is eaten and
1860 /// the register is added to the operand list.
1861 int AArch64AsmParser::tryParseRegister() {
1862 MCAsmParser &Parser = getParser();
1863 const AsmToken &Tok = Parser.getTok();
1864 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1866 std::string lowerCase = Tok.getString().lower();
1867 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1868 // Also handle a few aliases of registers.
1870 RegNum = StringSwitch<unsigned>(lowerCase)
1871 .Case("fp", AArch64::FP)
1872 .Case("lr", AArch64::LR)
1873 .Case("x31", AArch64::XZR)
1874 .Case("w31", AArch64::WZR)
1880 Parser.Lex(); // Eat identifier token.
1884 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1885 /// kind specifier. If it is a register specifier, eat the token and return it.
1886 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1887 MCAsmParser &Parser = getParser();
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1908 Parser.Lex(); // Eat the register token.
1913 TokError("vector register expected");
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1920 MCAsmParser &Parser = getParser();
1923 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1928 StringRef Tok = Parser.getTok().getIdentifier();
1929 if (Tok[0] != 'c' && Tok[0] != 'C') {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1935 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1936 if (BadNum || CRNum > 15) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1941 Parser.Lex(); // Eat identifier token.
1943 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1944 return MatchOperand_Success;
1947 /// tryParsePrefetch - Try to parse a prefetch operand.
1948 AArch64AsmParser::OperandMatchResultTy
1949 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 MCAsmParser &Parser = getParser();
1952 const AsmToken &Tok = Parser.getTok();
1953 // Either an identifier for named values or a 5-bit immediate.
1954 bool Hash = Tok.is(AsmToken::Hash);
1955 if (Hash || Tok.is(AsmToken::Integer)) {
1957 Parser.Lex(); // Eat hash token.
1958 const MCExpr *ImmVal;
1959 if (getParser().parseExpression(ImmVal))
1960 return MatchOperand_ParseFail;
1962 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1964 TokError("immediate value expected for prefetch operand");
1965 return MatchOperand_ParseFail;
1967 unsigned prfop = MCE->getValue();
1969 TokError("prefetch operand out of range, [0,31] expected");
1970 return MatchOperand_ParseFail;
1974 auto Mapper = AArch64PRFM::PRFMMapper();
1975 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
1976 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1978 return MatchOperand_Success;
1981 if (Tok.isNot(AsmToken::Identifier)) {
1982 TokError("pre-fetch hint expected");
1983 return MatchOperand_ParseFail;
1987 auto Mapper = AArch64PRFM::PRFMMapper();
1988 unsigned prfop = Mapper.fromString(Tok.getString(), Valid);
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1994 Parser.Lex(); // Eat identifier token.
1995 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
1997 return MatchOperand_Success;
2000 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2002 AArch64AsmParser::OperandMatchResultTy
2003 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2004 MCAsmParser &Parser = getParser();
2008 if (Parser.getTok().is(AsmToken::Hash)) {
2009 Parser.Lex(); // Eat hash token.
2012 if (parseSymbolicImmVal(Expr))
2013 return MatchOperand_ParseFail;
2015 AArch64MCExpr::VariantKind ELFRefKind;
2016 MCSymbolRefExpr::VariantKind DarwinRefKind;
2018 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2019 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2020 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2021 // No modifier was specified at all; this is the syntax for an ELF basic
2022 // ADRP relocation (unfortunately).
2024 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2025 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2026 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2028 Error(S, "gotpage label reference not allowed an addend");
2029 return MatchOperand_ParseFail;
2030 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2031 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2032 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2033 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2034 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2035 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2036 // The operand must be an @page or @gotpage qualified symbolref.
2037 Error(S, "page or gotpage label reference expected");
2038 return MatchOperand_ParseFail;
2042 // We have either a label reference possibly with addend or an immediate. The
2043 // addend is a raw value here. The linker will adjust it to only reference the
2045 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2046 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2048 return MatchOperand_Success;
2051 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2053 AArch64AsmParser::OperandMatchResultTy
2054 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2055 MCAsmParser &Parser = getParser();
2059 if (Parser.getTok().is(AsmToken::Hash)) {
2060 Parser.Lex(); // Eat hash token.
2063 if (getParser().parseExpression(Expr))
2064 return MatchOperand_ParseFail;
2066 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2067 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2069 return MatchOperand_Success;
2072 /// tryParseFPImm - A floating point immediate expression operand.
2073 AArch64AsmParser::OperandMatchResultTy
2074 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2075 MCAsmParser &Parser = getParser();
2079 if (Parser.getTok().is(AsmToken::Hash)) {
2080 Parser.Lex(); // Eat '#'
2084 // Handle negation, as that still comes through as a separate token.
2085 bool isNegative = false;
2086 if (Parser.getTok().is(AsmToken::Minus)) {
2090 const AsmToken &Tok = Parser.getTok();
2091 if (Tok.is(AsmToken::Real)) {
2092 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2093 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2094 // If we had a '-' in front, toggle the sign bit.
2095 IntVal ^= (uint64_t)isNegative << 63;
2096 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2097 Parser.Lex(); // Eat the token.
2098 // Check for out of range values. As an exception, we let Zero through,
2099 // as we handle that special case in post-processing before matching in
2100 // order to use the zero register for it.
2101 if (Val == -1 && !RealVal.isZero()) {
2102 TokError("expected compatible register or floating-point constant");
2103 return MatchOperand_ParseFail;
2105 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2106 return MatchOperand_Success;
2108 if (Tok.is(AsmToken::Integer)) {
2110 if (!isNegative && Tok.getString().startswith("0x")) {
2111 Val = Tok.getIntVal();
2112 if (Val > 255 || Val < 0) {
2113 TokError("encoded floating point value out of range");
2114 return MatchOperand_ParseFail;
2117 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2118 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2119 // If we had a '-' in front, toggle the sign bit.
2120 IntVal ^= (uint64_t)isNegative << 63;
2121 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2123 Parser.Lex(); // Eat the token.
2124 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2125 return MatchOperand_Success;
2129 return MatchOperand_NoMatch;
2131 TokError("invalid floating point immediate");
2132 return MatchOperand_ParseFail;
2135 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2136 AArch64AsmParser::OperandMatchResultTy
2137 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2138 MCAsmParser &Parser = getParser();
2141 if (Parser.getTok().is(AsmToken::Hash))
2142 Parser.Lex(); // Eat '#'
2143 else if (Parser.getTok().isNot(AsmToken::Integer))
2144 // Operand should start from # or should be integer, emit error otherwise.
2145 return MatchOperand_NoMatch;
2148 if (parseSymbolicImmVal(Imm))
2149 return MatchOperand_ParseFail;
2150 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2151 uint64_t ShiftAmount = 0;
2152 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2154 int64_t Val = MCE->getValue();
2155 if (Val > 0xfff && (Val & 0xfff) == 0) {
2156 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2160 SMLoc E = Parser.getTok().getLoc();
2161 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2163 return MatchOperand_Success;
2169 // The optional operand must be "lsl #N" where N is non-negative.
2170 if (!Parser.getTok().is(AsmToken::Identifier) ||
2171 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2172 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2173 return MatchOperand_ParseFail;
2179 if (Parser.getTok().is(AsmToken::Hash)) {
2183 if (Parser.getTok().isNot(AsmToken::Integer)) {
2184 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2185 return MatchOperand_ParseFail;
2188 int64_t ShiftAmount = Parser.getTok().getIntVal();
2190 if (ShiftAmount < 0) {
2191 Error(Parser.getTok().getLoc(), "positive shift amount required");
2192 return MatchOperand_ParseFail;
2194 Parser.Lex(); // Eat the number
2196 SMLoc E = Parser.getTok().getLoc();
2197 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2198 S, E, getContext()));
2199 return MatchOperand_Success;
2202 /// parseCondCodeString - Parse a Condition Code string.
2203 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2204 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2205 .Case("eq", AArch64CC::EQ)
2206 .Case("ne", AArch64CC::NE)
2207 .Case("cs", AArch64CC::HS)
2208 .Case("hs", AArch64CC::HS)
2209 .Case("cc", AArch64CC::LO)
2210 .Case("lo", AArch64CC::LO)
2211 .Case("mi", AArch64CC::MI)
2212 .Case("pl", AArch64CC::PL)
2213 .Case("vs", AArch64CC::VS)
2214 .Case("vc", AArch64CC::VC)
2215 .Case("hi", AArch64CC::HI)
2216 .Case("ls", AArch64CC::LS)
2217 .Case("ge", AArch64CC::GE)
2218 .Case("lt", AArch64CC::LT)
2219 .Case("gt", AArch64CC::GT)
2220 .Case("le", AArch64CC::LE)
2221 .Case("al", AArch64CC::AL)
2222 .Case("nv", AArch64CC::NV)
2223 .Default(AArch64CC::Invalid);
2227 /// parseCondCode - Parse a Condition Code operand.
2228 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2229 bool invertCondCode) {
2230 MCAsmParser &Parser = getParser();
2232 const AsmToken &Tok = Parser.getTok();
2233 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2235 StringRef Cond = Tok.getString();
2236 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2237 if (CC == AArch64CC::Invalid)
2238 return TokError("invalid condition code");
2239 Parser.Lex(); // Eat identifier token.
2241 if (invertCondCode) {
2242 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2243 return TokError("condition codes AL and NV are invalid for this instruction");
2244 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2248 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2252 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2253 /// them if present.
2254 AArch64AsmParser::OperandMatchResultTy
2255 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2256 MCAsmParser &Parser = getParser();
2257 const AsmToken &Tok = Parser.getTok();
2258 std::string LowerID = Tok.getString().lower();
2259 AArch64_AM::ShiftExtendType ShOp =
2260 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2261 .Case("lsl", AArch64_AM::LSL)
2262 .Case("lsr", AArch64_AM::LSR)
2263 .Case("asr", AArch64_AM::ASR)
2264 .Case("ror", AArch64_AM::ROR)
2265 .Case("msl", AArch64_AM::MSL)
2266 .Case("uxtb", AArch64_AM::UXTB)
2267 .Case("uxth", AArch64_AM::UXTH)
2268 .Case("uxtw", AArch64_AM::UXTW)
2269 .Case("uxtx", AArch64_AM::UXTX)
2270 .Case("sxtb", AArch64_AM::SXTB)
2271 .Case("sxth", AArch64_AM::SXTH)
2272 .Case("sxtw", AArch64_AM::SXTW)
2273 .Case("sxtx", AArch64_AM::SXTX)
2274 .Default(AArch64_AM::InvalidShiftExtend);
2276 if (ShOp == AArch64_AM::InvalidShiftExtend)
2277 return MatchOperand_NoMatch;
2279 SMLoc S = Tok.getLoc();
2282 bool Hash = getLexer().is(AsmToken::Hash);
2283 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2284 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2285 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2286 ShOp == AArch64_AM::MSL) {
2287 // We expect a number here.
2288 TokError("expected #imm after shift specifier");
2289 return MatchOperand_ParseFail;
2292 // "extend" type operatoins don't need an immediate, #0 is implicit.
2293 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2295 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2296 return MatchOperand_Success;
2300 Parser.Lex(); // Eat the '#'.
2302 // Make sure we do actually have a number or a parenthesized expression.
2303 SMLoc E = Parser.getTok().getLoc();
2304 if (!Parser.getTok().is(AsmToken::Integer) &&
2305 !Parser.getTok().is(AsmToken::LParen)) {
2306 Error(E, "expected integer shift amount");
2307 return MatchOperand_ParseFail;
2310 const MCExpr *ImmVal;
2311 if (getParser().parseExpression(ImmVal))
2312 return MatchOperand_ParseFail;
2314 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2316 Error(E, "expected constant '#imm' after shift specifier");
2317 return MatchOperand_ParseFail;
2320 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2321 Operands.push_back(AArch64Operand::CreateShiftExtend(
2322 ShOp, MCE->getValue(), true, S, E, getContext()));
2323 return MatchOperand_Success;
2326 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2327 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2328 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2329 OperandVector &Operands) {
2330 if (Name.find('.') != StringRef::npos)
2331 return TokError("invalid operand");
2335 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2337 MCAsmParser &Parser = getParser();
2338 const AsmToken &Tok = Parser.getTok();
2339 StringRef Op = Tok.getString();
2340 SMLoc S = Tok.getLoc();
2342 const MCExpr *Expr = nullptr;
2344 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2346 Expr = MCConstantExpr::Create(op1, getContext()); \
2347 Operands.push_back( \
2348 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2349 Operands.push_back( \
2350 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2351 Operands.push_back( \
2352 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2353 Expr = MCConstantExpr::Create(op2, getContext()); \
2354 Operands.push_back( \
2355 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2358 if (Mnemonic == "ic") {
2359 if (!Op.compare_lower("ialluis")) {
2360 // SYS #0, C7, C1, #0
2361 SYS_ALIAS(0, 7, 1, 0);
2362 } else if (!Op.compare_lower("iallu")) {
2363 // SYS #0, C7, C5, #0
2364 SYS_ALIAS(0, 7, 5, 0);
2365 } else if (!Op.compare_lower("ivau")) {
2366 // SYS #3, C7, C5, #1
2367 SYS_ALIAS(3, 7, 5, 1);
2369 return TokError("invalid operand for IC instruction");
2371 } else if (Mnemonic == "dc") {
2372 if (!Op.compare_lower("zva")) {
2373 // SYS #3, C7, C4, #1
2374 SYS_ALIAS(3, 7, 4, 1);
2375 } else if (!Op.compare_lower("ivac")) {
2376 // SYS #3, C7, C6, #1
2377 SYS_ALIAS(0, 7, 6, 1);
2378 } else if (!Op.compare_lower("isw")) {
2379 // SYS #0, C7, C6, #2
2380 SYS_ALIAS(0, 7, 6, 2);
2381 } else if (!Op.compare_lower("cvac")) {
2382 // SYS #3, C7, C10, #1
2383 SYS_ALIAS(3, 7, 10, 1);
2384 } else if (!Op.compare_lower("csw")) {
2385 // SYS #0, C7, C10, #2
2386 SYS_ALIAS(0, 7, 10, 2);
2387 } else if (!Op.compare_lower("cvau")) {
2388 // SYS #3, C7, C11, #1
2389 SYS_ALIAS(3, 7, 11, 1);
2390 } else if (!Op.compare_lower("civac")) {
2391 // SYS #3, C7, C14, #1
2392 SYS_ALIAS(3, 7, 14, 1);
2393 } else if (!Op.compare_lower("cisw")) {
2394 // SYS #0, C7, C14, #2
2395 SYS_ALIAS(0, 7, 14, 2);
2397 return TokError("invalid operand for DC instruction");
2399 } else if (Mnemonic == "at") {
2400 if (!Op.compare_lower("s1e1r")) {
2401 // SYS #0, C7, C8, #0
2402 SYS_ALIAS(0, 7, 8, 0);
2403 } else if (!Op.compare_lower("s1e2r")) {
2404 // SYS #4, C7, C8, #0
2405 SYS_ALIAS(4, 7, 8, 0);
2406 } else if (!Op.compare_lower("s1e3r")) {
2407 // SYS #6, C7, C8, #0
2408 SYS_ALIAS(6, 7, 8, 0);
2409 } else if (!Op.compare_lower("s1e1w")) {
2410 // SYS #0, C7, C8, #1
2411 SYS_ALIAS(0, 7, 8, 1);
2412 } else if (!Op.compare_lower("s1e2w")) {
2413 // SYS #4, C7, C8, #1
2414 SYS_ALIAS(4, 7, 8, 1);
2415 } else if (!Op.compare_lower("s1e3w")) {
2416 // SYS #6, C7, C8, #1
2417 SYS_ALIAS(6, 7, 8, 1);
2418 } else if (!Op.compare_lower("s1e0r")) {
2419 // SYS #0, C7, C8, #3
2420 SYS_ALIAS(0, 7, 8, 2);
2421 } else if (!Op.compare_lower("s1e0w")) {
2422 // SYS #0, C7, C8, #3
2423 SYS_ALIAS(0, 7, 8, 3);
2424 } else if (!Op.compare_lower("s12e1r")) {
2425 // SYS #4, C7, C8, #4
2426 SYS_ALIAS(4, 7, 8, 4);
2427 } else if (!Op.compare_lower("s12e1w")) {
2428 // SYS #4, C7, C8, #5
2429 SYS_ALIAS(4, 7, 8, 5);
2430 } else if (!Op.compare_lower("s12e0r")) {
2431 // SYS #4, C7, C8, #6
2432 SYS_ALIAS(4, 7, 8, 6);
2433 } else if (!Op.compare_lower("s12e0w")) {
2434 // SYS #4, C7, C8, #7
2435 SYS_ALIAS(4, 7, 8, 7);
2437 return TokError("invalid operand for AT instruction");
2439 } else if (Mnemonic == "tlbi") {
2440 if (!Op.compare_lower("vmalle1is")) {
2441 // SYS #0, C8, C3, #0
2442 SYS_ALIAS(0, 8, 3, 0);
2443 } else if (!Op.compare_lower("alle2is")) {
2444 // SYS #4, C8, C3, #0
2445 SYS_ALIAS(4, 8, 3, 0);
2446 } else if (!Op.compare_lower("alle3is")) {
2447 // SYS #6, C8, C3, #0
2448 SYS_ALIAS(6, 8, 3, 0);
2449 } else if (!Op.compare_lower("vae1is")) {
2450 // SYS #0, C8, C3, #1
2451 SYS_ALIAS(0, 8, 3, 1);
2452 } else if (!Op.compare_lower("vae2is")) {
2453 // SYS #4, C8, C3, #1
2454 SYS_ALIAS(4, 8, 3, 1);
2455 } else if (!Op.compare_lower("vae3is")) {
2456 // SYS #6, C8, C3, #1
2457 SYS_ALIAS(6, 8, 3, 1);
2458 } else if (!Op.compare_lower("aside1is")) {
2459 // SYS #0, C8, C3, #2
2460 SYS_ALIAS(0, 8, 3, 2);
2461 } else if (!Op.compare_lower("vaae1is")) {
2462 // SYS #0, C8, C3, #3
2463 SYS_ALIAS(0, 8, 3, 3);
2464 } else if (!Op.compare_lower("alle1is")) {
2465 // SYS #4, C8, C3, #4
2466 SYS_ALIAS(4, 8, 3, 4);
2467 } else if (!Op.compare_lower("vale1is")) {
2468 // SYS #0, C8, C3, #5
2469 SYS_ALIAS(0, 8, 3, 5);
2470 } else if (!Op.compare_lower("vaale1is")) {
2471 // SYS #0, C8, C3, #7
2472 SYS_ALIAS(0, 8, 3, 7);
2473 } else if (!Op.compare_lower("vmalle1")) {
2474 // SYS #0, C8, C7, #0
2475 SYS_ALIAS(0, 8, 7, 0);
2476 } else if (!Op.compare_lower("alle2")) {
2477 // SYS #4, C8, C7, #0
2478 SYS_ALIAS(4, 8, 7, 0);
2479 } else if (!Op.compare_lower("vale2is")) {
2480 // SYS #4, C8, C3, #5
2481 SYS_ALIAS(4, 8, 3, 5);
2482 } else if (!Op.compare_lower("vale3is")) {
2483 // SYS #6, C8, C3, #5
2484 SYS_ALIAS(6, 8, 3, 5);
2485 } else if (!Op.compare_lower("alle3")) {
2486 // SYS #6, C8, C7, #0
2487 SYS_ALIAS(6, 8, 7, 0);
2488 } else if (!Op.compare_lower("vae1")) {
2489 // SYS #0, C8, C7, #1
2490 SYS_ALIAS(0, 8, 7, 1);
2491 } else if (!Op.compare_lower("vae2")) {
2492 // SYS #4, C8, C7, #1
2493 SYS_ALIAS(4, 8, 7, 1);
2494 } else if (!Op.compare_lower("vae3")) {
2495 // SYS #6, C8, C7, #1
2496 SYS_ALIAS(6, 8, 7, 1);
2497 } else if (!Op.compare_lower("aside1")) {
2498 // SYS #0, C8, C7, #2
2499 SYS_ALIAS(0, 8, 7, 2);
2500 } else if (!Op.compare_lower("vaae1")) {
2501 // SYS #0, C8, C7, #3
2502 SYS_ALIAS(0, 8, 7, 3);
2503 } else if (!Op.compare_lower("alle1")) {
2504 // SYS #4, C8, C7, #4
2505 SYS_ALIAS(4, 8, 7, 4);
2506 } else if (!Op.compare_lower("vale1")) {
2507 // SYS #0, C8, C7, #5
2508 SYS_ALIAS(0, 8, 7, 5);
2509 } else if (!Op.compare_lower("vale2")) {
2510 // SYS #4, C8, C7, #5
2511 SYS_ALIAS(4, 8, 7, 5);
2512 } else if (!Op.compare_lower("vale3")) {
2513 // SYS #6, C8, C7, #5
2514 SYS_ALIAS(6, 8, 7, 5);
2515 } else if (!Op.compare_lower("vaale1")) {
2516 // SYS #0, C8, C7, #7
2517 SYS_ALIAS(0, 8, 7, 7);
2518 } else if (!Op.compare_lower("ipas2e1")) {
2519 // SYS #4, C8, C4, #1
2520 SYS_ALIAS(4, 8, 4, 1);
2521 } else if (!Op.compare_lower("ipas2le1")) {
2522 // SYS #4, C8, C4, #5
2523 SYS_ALIAS(4, 8, 4, 5);
2524 } else if (!Op.compare_lower("ipas2e1is")) {
2525 // SYS #4, C8, C4, #1
2526 SYS_ALIAS(4, 8, 0, 1);
2527 } else if (!Op.compare_lower("ipas2le1is")) {
2528 // SYS #4, C8, C4, #5
2529 SYS_ALIAS(4, 8, 0, 5);
2530 } else if (!Op.compare_lower("vmalls12e1")) {
2531 // SYS #4, C8, C7, #6
2532 SYS_ALIAS(4, 8, 7, 6);
2533 } else if (!Op.compare_lower("vmalls12e1is")) {
2534 // SYS #4, C8, C3, #6
2535 SYS_ALIAS(4, 8, 3, 6);
2537 return TokError("invalid operand for TLBI instruction");
2543 Parser.Lex(); // Eat operand.
2545 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2546 bool HasRegister = false;
2548 // Check for the optional register operand.
2549 if (getLexer().is(AsmToken::Comma)) {
2550 Parser.Lex(); // Eat comma.
2552 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2553 return TokError("expected register operand");
2558 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2559 Parser.eatToEndOfStatement();
2560 return TokError("unexpected token in argument list");
2563 if (ExpectRegister && !HasRegister) {
2564 return TokError("specified " + Mnemonic + " op requires a register");
2566 else if (!ExpectRegister && HasRegister) {
2567 return TokError("specified " + Mnemonic + " op does not use a register");
2570 Parser.Lex(); // Consume the EndOfStatement
2574 AArch64AsmParser::OperandMatchResultTy
2575 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2576 MCAsmParser &Parser = getParser();
2577 const AsmToken &Tok = Parser.getTok();
2579 // Can be either a #imm style literal or an option name
2580 bool Hash = Tok.is(AsmToken::Hash);
2581 if (Hash || Tok.is(AsmToken::Integer)) {
2582 // Immediate operand.
2584 Parser.Lex(); // Eat the '#'
2585 const MCExpr *ImmVal;
2586 SMLoc ExprLoc = getLoc();
2587 if (getParser().parseExpression(ImmVal))
2588 return MatchOperand_ParseFail;
2589 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2591 Error(ExprLoc, "immediate value expected for barrier operand");
2592 return MatchOperand_ParseFail;
2594 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2595 Error(ExprLoc, "barrier operand out of range");
2596 return MatchOperand_ParseFail;
2599 auto Mapper = AArch64DB::DBarrierMapper();
2600 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
2601 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2602 ExprLoc, getContext()));
2603 return MatchOperand_Success;
2606 if (Tok.isNot(AsmToken::Identifier)) {
2607 TokError("invalid operand for instruction");
2608 return MatchOperand_ParseFail;
2612 auto Mapper = AArch64DB::DBarrierMapper();
2613 unsigned Opt = Mapper.fromString(Tok.getString(), Valid);
2615 TokError("invalid barrier option name");
2616 return MatchOperand_ParseFail;
2619 // The only valid named option for ISB is 'sy'
2620 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2621 TokError("'sy' or #imm operand expected");
2622 return MatchOperand_ParseFail;
2625 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2626 getLoc(), getContext()));
2627 Parser.Lex(); // Consume the option
2629 return MatchOperand_Success;
2632 AArch64AsmParser::OperandMatchResultTy
2633 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2634 MCAsmParser &Parser = getParser();
2635 const AsmToken &Tok = Parser.getTok();
2637 if (Tok.isNot(AsmToken::Identifier))
2638 return MatchOperand_NoMatch;
2641 auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
2642 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
2643 assert(IsKnown == (MRSReg != -1U) &&
2644 "register should be -1 if and only if it's unknown");
2646 auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
2647 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
2648 assert(IsKnown == (MSRReg != -1U) &&
2649 "register should be -1 if and only if it's unknown");
2651 auto PStateMapper = AArch64PState::PStateMapper();
2652 uint32_t PStateField = PStateMapper.fromString(Tok.getString(), IsKnown);
2653 assert(IsKnown == (PStateField != -1U) &&
2654 "register should be -1 if and only if it's unknown");
2656 Operands.push_back(AArch64Operand::CreateSysReg(
2657 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2658 Parser.Lex(); // Eat identifier
2660 return MatchOperand_Success;
2663 /// tryParseVectorRegister - Parse a vector register operand.
2664 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2665 MCAsmParser &Parser = getParser();
2666 if (Parser.getTok().isNot(AsmToken::Identifier))
2670 // Check for a vector register specifier first.
2672 int64_t Reg = tryMatchVectorRegister(Kind, false);
2676 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2677 // If there was an explicit qualifier, that goes on as a literal text
2681 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2683 // If there is an index specifier following the register, parse that too.
2684 if (Parser.getTok().is(AsmToken::LBrac)) {
2685 SMLoc SIdx = getLoc();
2686 Parser.Lex(); // Eat left bracket token.
2688 const MCExpr *ImmVal;
2689 if (getParser().parseExpression(ImmVal))
2691 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2693 TokError("immediate value expected for vector index");
2698 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2699 Error(E, "']' expected");
2703 Parser.Lex(); // Eat right bracket token.
2705 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2712 /// parseRegister - Parse a non-vector register operand.
2713 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2714 MCAsmParser &Parser = getParser();
2716 // Try for a vector register.
2717 if (!tryParseVectorRegister(Operands))
2720 // Try for a scalar register.
2721 int64_t Reg = tryParseRegister();
2725 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2727 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2728 // as a string token in the instruction itself.
2729 if (getLexer().getKind() == AsmToken::LBrac) {
2730 SMLoc LBracS = getLoc();
2732 const AsmToken &Tok = Parser.getTok();
2733 if (Tok.is(AsmToken::Integer)) {
2734 SMLoc IntS = getLoc();
2735 int64_t Val = Tok.getIntVal();
2738 if (getLexer().getKind() == AsmToken::RBrac) {
2739 SMLoc RBracS = getLoc();
2742 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2744 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2746 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2756 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2757 MCAsmParser &Parser = getParser();
2758 bool HasELFModifier = false;
2759 AArch64MCExpr::VariantKind RefKind;
2761 if (Parser.getTok().is(AsmToken::Colon)) {
2762 Parser.Lex(); // Eat ':"
2763 HasELFModifier = true;
2765 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2766 Error(Parser.getTok().getLoc(),
2767 "expect relocation specifier in operand after ':'");
2771 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2772 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2773 .Case("lo12", AArch64MCExpr::VK_LO12)
2774 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2775 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2776 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2777 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2778 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2779 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2780 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2781 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2782 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2783 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2784 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2785 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2786 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2787 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2788 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2789 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2790 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2791 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2792 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2793 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2794 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2795 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2796 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2797 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2798 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2799 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2800 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2801 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2802 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2803 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2804 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2805 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2806 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2807 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2808 .Default(AArch64MCExpr::VK_INVALID);
2810 if (RefKind == AArch64MCExpr::VK_INVALID) {
2811 Error(Parser.getTok().getLoc(),
2812 "expect relocation specifier in operand after ':'");
2816 Parser.Lex(); // Eat identifier
2818 if (Parser.getTok().isNot(AsmToken::Colon)) {
2819 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2822 Parser.Lex(); // Eat ':'
2825 if (getParser().parseExpression(ImmVal))
2829 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2834 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2835 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2836 MCAsmParser &Parser = getParser();
2837 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2839 Parser.Lex(); // Eat left bracket token.
2841 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2844 int64_t PrevReg = FirstReg;
2847 if (Parser.getTok().is(AsmToken::Minus)) {
2848 Parser.Lex(); // Eat the minus.
2850 SMLoc Loc = getLoc();
2852 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2855 // Any Kind suffices must match on all regs in the list.
2856 if (Kind != NextKind)
2857 return Error(Loc, "mismatched register size suffix");
2859 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2861 if (Space == 0 || Space > 3) {
2862 return Error(Loc, "invalid number of vectors");
2868 while (Parser.getTok().is(AsmToken::Comma)) {
2869 Parser.Lex(); // Eat the comma token.
2871 SMLoc Loc = getLoc();
2873 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2876 // Any Kind suffices must match on all regs in the list.
2877 if (Kind != NextKind)
2878 return Error(Loc, "mismatched register size suffix");
2880 // Registers must be incremental (with wraparound at 31)
2881 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2882 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2883 return Error(Loc, "registers must be sequential");
2890 if (Parser.getTok().isNot(AsmToken::RCurly))
2891 return Error(getLoc(), "'}' expected");
2892 Parser.Lex(); // Eat the '}' token.
2895 return Error(S, "invalid number of vectors");
2897 unsigned NumElements = 0;
2898 char ElementKind = 0;
2900 parseValidVectorKind(Kind, NumElements, ElementKind);
2902 Operands.push_back(AArch64Operand::CreateVectorList(
2903 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2905 // If there is an index specifier following the list, parse that too.
2906 if (Parser.getTok().is(AsmToken::LBrac)) {
2907 SMLoc SIdx = getLoc();
2908 Parser.Lex(); // Eat left bracket token.
2910 const MCExpr *ImmVal;
2911 if (getParser().parseExpression(ImmVal))
2913 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2915 TokError("immediate value expected for vector index");
2920 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2921 Error(E, "']' expected");
2925 Parser.Lex(); // Eat right bracket token.
2927 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2933 AArch64AsmParser::OperandMatchResultTy
2934 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2935 MCAsmParser &Parser = getParser();
2936 const AsmToken &Tok = Parser.getTok();
2937 if (!Tok.is(AsmToken::Identifier))
2938 return MatchOperand_NoMatch;
2940 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2942 MCContext &Ctx = getContext();
2943 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2944 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2945 return MatchOperand_NoMatch;
2948 Parser.Lex(); // Eat register
2950 if (Parser.getTok().isNot(AsmToken::Comma)) {
2952 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2953 return MatchOperand_Success;
2955 Parser.Lex(); // Eat comma.
2957 if (Parser.getTok().is(AsmToken::Hash))
2958 Parser.Lex(); // Eat hash
2960 if (Parser.getTok().isNot(AsmToken::Integer)) {
2961 Error(getLoc(), "index must be absent or #0");
2962 return MatchOperand_ParseFail;
2965 const MCExpr *ImmVal;
2966 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2967 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2968 Error(getLoc(), "index must be absent or #0");
2969 return MatchOperand_ParseFail;
2973 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2974 return MatchOperand_Success;
2977 /// parseOperand - Parse a arm instruction operand. For now this parses the
2978 /// operand regardless of the mnemonic.
2979 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2980 bool invertCondCode) {
2981 MCAsmParser &Parser = getParser();
2982 // Check if the current operand has a custom associated parser, if so, try to
2983 // custom parse the operand, or fallback to the general approach.
2984 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2985 if (ResTy == MatchOperand_Success)
2987 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2988 // there was a match, but an error occurred, in which case, just return that
2989 // the operand parsing failed.
2990 if (ResTy == MatchOperand_ParseFail)
2993 // Nothing custom, so do general case parsing.
2995 switch (getLexer().getKind()) {
2999 if (parseSymbolicImmVal(Expr))
3000 return Error(S, "invalid operand");
3002 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3003 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3006 case AsmToken::LBrac: {
3007 SMLoc Loc = Parser.getTok().getLoc();
3008 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3010 Parser.Lex(); // Eat '['
3012 // There's no comma after a '[', so we can parse the next operand
3014 return parseOperand(Operands, false, false);
3016 case AsmToken::LCurly:
3017 return parseVectorList(Operands);
3018 case AsmToken::Identifier: {
3019 // If we're expecting a Condition Code operand, then just parse that.
3021 return parseCondCode(Operands, invertCondCode);
3023 // If it's a register name, parse it.
3024 if (!parseRegister(Operands))
3027 // This could be an optional "shift" or "extend" operand.
3028 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3029 // We can only continue if no tokens were eaten.
3030 if (GotShift != MatchOperand_NoMatch)
3033 // This was not a register so parse other operands that start with an
3034 // identifier (like labels) as expressions and create them as immediates.
3035 const MCExpr *IdVal;
3037 if (getParser().parseExpression(IdVal))
3040 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3041 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3044 case AsmToken::Integer:
3045 case AsmToken::Real:
3046 case AsmToken::Hash: {
3047 // #42 -> immediate.
3049 if (getLexer().is(AsmToken::Hash))
3052 // Parse a negative sign
3053 bool isNegative = false;
3054 if (Parser.getTok().is(AsmToken::Minus)) {
3056 // We need to consume this token only when we have a Real, otherwise
3057 // we let parseSymbolicImmVal take care of it
3058 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3062 // The only Real that should come through here is a literal #0.0 for
3063 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3064 // so convert the value.
3065 const AsmToken &Tok = Parser.getTok();
3066 if (Tok.is(AsmToken::Real)) {
3067 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3068 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3069 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3070 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3071 Mnemonic != "fcmlt")
3072 return TokError("unexpected floating point literal");
3073 else if (IntVal != 0 || isNegative)
3074 return TokError("expected floating-point constant #0.0");
3075 Parser.Lex(); // Eat the token.
3078 AArch64Operand::CreateToken("#0", false, S, getContext()));
3080 AArch64Operand::CreateToken(".0", false, S, getContext()));
3084 const MCExpr *ImmVal;
3085 if (parseSymbolicImmVal(ImmVal))
3088 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3089 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3092 case AsmToken::Equal: {
3093 SMLoc Loc = Parser.getTok().getLoc();
3094 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3095 return Error(Loc, "unexpected token in operand");
3096 Parser.Lex(); // Eat '='
3097 const MCExpr *SubExprVal;
3098 if (getParser().parseExpression(SubExprVal))
3101 if (Operands.size() < 2 ||
3102 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3106 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3107 Operands[1]->getReg());
3109 MCContext& Ctx = getContext();
3110 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3111 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3112 if (isa<MCConstantExpr>(SubExprVal)) {
3113 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3114 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3115 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3119 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3120 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3121 Operands.push_back(AArch64Operand::CreateImm(
3122 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3124 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3125 ShiftAmt, true, S, E, Ctx));
3128 APInt Simm = APInt(64, Imm << ShiftAmt);
3129 // check if the immediate is an unsigned or signed 32-bit int for W regs
3130 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3131 return Error(Loc, "Immediate too large for register");
3133 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3134 const MCExpr *CPLoc =
3135 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3136 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3142 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3144 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3145 StringRef Name, SMLoc NameLoc,
3146 OperandVector &Operands) {
3147 MCAsmParser &Parser = getParser();
3148 Name = StringSwitch<StringRef>(Name.lower())
3149 .Case("beq", "b.eq")
3150 .Case("bne", "b.ne")
3151 .Case("bhs", "b.hs")
3152 .Case("bcs", "b.cs")
3153 .Case("blo", "b.lo")
3154 .Case("bcc", "b.cc")
3155 .Case("bmi", "b.mi")
3156 .Case("bpl", "b.pl")
3157 .Case("bvs", "b.vs")
3158 .Case("bvc", "b.vc")
3159 .Case("bhi", "b.hi")
3160 .Case("bls", "b.ls")
3161 .Case("bge", "b.ge")
3162 .Case("blt", "b.lt")
3163 .Case("bgt", "b.gt")
3164 .Case("ble", "b.le")
3165 .Case("bal", "b.al")
3166 .Case("bnv", "b.nv")
3169 // First check for the AArch64-specific .req directive.
3170 if (Parser.getTok().is(AsmToken::Identifier) &&
3171 Parser.getTok().getIdentifier() == ".req") {
3172 parseDirectiveReq(Name, NameLoc);
3173 // We always return 'error' for this, as we're done with this
3174 // statement and don't need to match the 'instruction."
3178 // Create the leading tokens for the mnemonic, split by '.' characters.
3179 size_t Start = 0, Next = Name.find('.');
3180 StringRef Head = Name.slice(Start, Next);
3182 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3183 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3184 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3185 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3186 Parser.eatToEndOfStatement();
3191 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3194 // Handle condition codes for a branch mnemonic
3195 if (Head == "b" && Next != StringRef::npos) {
3197 Next = Name.find('.', Start + 1);
3198 Head = Name.slice(Start + 1, Next);
3200 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3201 (Head.data() - Name.data()));
3202 AArch64CC::CondCode CC = parseCondCodeString(Head);
3203 if (CC == AArch64CC::Invalid)
3204 return Error(SuffixLoc, "invalid condition code");
3206 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3208 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3211 // Add the remaining tokens in the mnemonic.
3212 while (Next != StringRef::npos) {
3214 Next = Name.find('.', Start + 1);
3215 Head = Name.slice(Start, Next);
3216 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3217 (Head.data() - Name.data()) + 1);
3219 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3222 // Conditional compare instructions have a Condition Code operand, which needs
3223 // to be parsed and an immediate operand created.
3224 bool condCodeFourthOperand =
3225 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3226 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3227 Head == "csinc" || Head == "csinv" || Head == "csneg");
3229 // These instructions are aliases to some of the conditional select
3230 // instructions. However, the condition code is inverted in the aliased
3233 // FIXME: Is this the correct way to handle these? Or should the parser
3234 // generate the aliased instructions directly?
3235 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3236 bool condCodeThirdOperand =
3237 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3239 // Read the remaining operands.
3240 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3241 // Read the first operand.
3242 if (parseOperand(Operands, false, false)) {
3243 Parser.eatToEndOfStatement();
3248 while (getLexer().is(AsmToken::Comma)) {
3249 Parser.Lex(); // Eat the comma.
3251 // Parse and remember the operand.
3252 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3253 (N == 3 && condCodeThirdOperand) ||
3254 (N == 2 && condCodeSecondOperand),
3255 condCodeSecondOperand || condCodeThirdOperand)) {
3256 Parser.eatToEndOfStatement();
3260 // After successfully parsing some operands there are two special cases to
3261 // consider (i.e. notional operands not separated by commas). Both are due
3262 // to memory specifiers:
3263 // + An RBrac will end an address for load/store/prefetch
3264 // + An '!' will indicate a pre-indexed operation.
3266 // It's someone else's responsibility to make sure these tokens are sane
3267 // in the given context!
3268 if (Parser.getTok().is(AsmToken::RBrac)) {
3269 SMLoc Loc = Parser.getTok().getLoc();
3270 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3275 if (Parser.getTok().is(AsmToken::Exclaim)) {
3276 SMLoc Loc = Parser.getTok().getLoc();
3277 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3286 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3287 SMLoc Loc = Parser.getTok().getLoc();
3288 Parser.eatToEndOfStatement();
3289 return Error(Loc, "unexpected token in argument list");
3292 Parser.Lex(); // Consume the EndOfStatement
3296 // FIXME: This entire function is a giant hack to provide us with decent
3297 // operand range validation/diagnostics until TableGen/MC can be extended
3298 // to support autogeneration of this kind of validation.
3299 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3300 SmallVectorImpl<SMLoc> &Loc) {
3301 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3302 // Check for indexed addressing modes w/ the base register being the
3303 // same as a destination/source register or pair load where
3304 // the Rt == Rt2. All of those are undefined behaviour.
3305 switch (Inst.getOpcode()) {
3306 case AArch64::LDPSWpre:
3307 case AArch64::LDPWpost:
3308 case AArch64::LDPWpre:
3309 case AArch64::LDPXpost:
3310 case AArch64::LDPXpre: {
3311 unsigned Rt = Inst.getOperand(1).getReg();
3312 unsigned Rt2 = Inst.getOperand(2).getReg();
3313 unsigned Rn = Inst.getOperand(3).getReg();
3314 if (RI->isSubRegisterEq(Rn, Rt))
3315 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3316 "is also a destination");
3317 if (RI->isSubRegisterEq(Rn, Rt2))
3318 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3319 "is also a destination");
3322 case AArch64::LDPDi:
3323 case AArch64::LDPQi:
3324 case AArch64::LDPSi:
3325 case AArch64::LDPSWi:
3326 case AArch64::LDPWi:
3327 case AArch64::LDPXi: {
3328 unsigned Rt = Inst.getOperand(0).getReg();
3329 unsigned Rt2 = Inst.getOperand(1).getReg();
3331 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3334 case AArch64::LDPDpost:
3335 case AArch64::LDPDpre:
3336 case AArch64::LDPQpost:
3337 case AArch64::LDPQpre:
3338 case AArch64::LDPSpost:
3339 case AArch64::LDPSpre:
3340 case AArch64::LDPSWpost: {
3341 unsigned Rt = Inst.getOperand(1).getReg();
3342 unsigned Rt2 = Inst.getOperand(2).getReg();
3344 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3347 case AArch64::STPDpost:
3348 case AArch64::STPDpre:
3349 case AArch64::STPQpost:
3350 case AArch64::STPQpre:
3351 case AArch64::STPSpost:
3352 case AArch64::STPSpre:
3353 case AArch64::STPWpost:
3354 case AArch64::STPWpre:
3355 case AArch64::STPXpost:
3356 case AArch64::STPXpre: {
3357 unsigned Rt = Inst.getOperand(1).getReg();
3358 unsigned Rt2 = Inst.getOperand(2).getReg();
3359 unsigned Rn = Inst.getOperand(3).getReg();
3360 if (RI->isSubRegisterEq(Rn, Rt))
3361 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3362 "is also a source");
3363 if (RI->isSubRegisterEq(Rn, Rt2))
3364 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3365 "is also a source");
3368 case AArch64::LDRBBpre:
3369 case AArch64::LDRBpre:
3370 case AArch64::LDRHHpre:
3371 case AArch64::LDRHpre:
3372 case AArch64::LDRSBWpre:
3373 case AArch64::LDRSBXpre:
3374 case AArch64::LDRSHWpre:
3375 case AArch64::LDRSHXpre:
3376 case AArch64::LDRSWpre:
3377 case AArch64::LDRWpre:
3378 case AArch64::LDRXpre:
3379 case AArch64::LDRBBpost:
3380 case AArch64::LDRBpost:
3381 case AArch64::LDRHHpost:
3382 case AArch64::LDRHpost:
3383 case AArch64::LDRSBWpost:
3384 case AArch64::LDRSBXpost:
3385 case AArch64::LDRSHWpost:
3386 case AArch64::LDRSHXpost:
3387 case AArch64::LDRSWpost:
3388 case AArch64::LDRWpost:
3389 case AArch64::LDRXpost: {
3390 unsigned Rt = Inst.getOperand(1).getReg();
3391 unsigned Rn = Inst.getOperand(2).getReg();
3392 if (RI->isSubRegisterEq(Rn, Rt))
3393 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3394 "is also a source");
3397 case AArch64::STRBBpost:
3398 case AArch64::STRBpost:
3399 case AArch64::STRHHpost:
3400 case AArch64::STRHpost:
3401 case AArch64::STRWpost:
3402 case AArch64::STRXpost:
3403 case AArch64::STRBBpre:
3404 case AArch64::STRBpre:
3405 case AArch64::STRHHpre:
3406 case AArch64::STRHpre:
3407 case AArch64::STRWpre:
3408 case AArch64::STRXpre: {
3409 unsigned Rt = Inst.getOperand(1).getReg();
3410 unsigned Rn = Inst.getOperand(2).getReg();
3411 if (RI->isSubRegisterEq(Rn, Rt))
3412 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3413 "is also a source");
3418 // Now check immediate ranges. Separate from the above as there is overlap
3419 // in the instructions being checked and this keeps the nested conditionals
3421 switch (Inst.getOpcode()) {
3422 case AArch64::ADDSWri:
3423 case AArch64::ADDSXri:
3424 case AArch64::ADDWri:
3425 case AArch64::ADDXri:
3426 case AArch64::SUBSWri:
3427 case AArch64::SUBSXri:
3428 case AArch64::SUBWri:
3429 case AArch64::SUBXri: {
3430 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3431 // some slight duplication here.
3432 if (Inst.getOperand(2).isExpr()) {
3433 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3434 AArch64MCExpr::VariantKind ELFRefKind;
3435 MCSymbolRefExpr::VariantKind DarwinRefKind;
3437 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3438 return Error(Loc[2], "invalid immediate expression");
3441 // Only allow these with ADDXri.
3442 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3443 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3444 Inst.getOpcode() == AArch64::ADDXri)
3447 // Only allow these with ADDXri/ADDWri
3448 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3449 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3450 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3451 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3452 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3453 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3454 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3455 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3456 (Inst.getOpcode() == AArch64::ADDXri ||
3457 Inst.getOpcode() == AArch64::ADDWri))
3460 // Don't allow expressions in the immediate field otherwise
3461 return Error(Loc[2], "invalid immediate expression");
3470 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3472 case Match_MissingFeature:
3474 "instruction requires a CPU feature not currently enabled");
3475 case Match_InvalidOperand:
3476 return Error(Loc, "invalid operand for instruction");
3477 case Match_InvalidSuffix:
3478 return Error(Loc, "invalid type suffix for instruction");
3479 case Match_InvalidCondCode:
3480 return Error(Loc, "expected AArch64 condition code");
3481 case Match_AddSubRegExtendSmall:
3483 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3484 case Match_AddSubRegExtendLarge:
3486 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3487 case Match_AddSubSecondSource:
3489 "expected compatible register, symbol or integer in range [0, 4095]");
3490 case Match_LogicalSecondSource:
3491 return Error(Loc, "expected compatible register or logical immediate");
3492 case Match_InvalidMovImm32Shift:
3493 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3494 case Match_InvalidMovImm64Shift:
3495 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3496 case Match_AddSubRegShift32:
3498 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3499 case Match_AddSubRegShift64:
3501 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3502 case Match_InvalidFPImm:
3504 "expected compatible register or floating-point constant");
3505 case Match_InvalidMemoryIndexedSImm9:
3506 return Error(Loc, "index must be an integer in range [-256, 255].");
3507 case Match_InvalidMemoryIndexed4SImm7:
3508 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3509 case Match_InvalidMemoryIndexed8SImm7:
3510 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3511 case Match_InvalidMemoryIndexed16SImm7:
3512 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3513 case Match_InvalidMemoryWExtend8:
3515 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3516 case Match_InvalidMemoryWExtend16:
3518 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3519 case Match_InvalidMemoryWExtend32:
3521 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3522 case Match_InvalidMemoryWExtend64:
3524 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3525 case Match_InvalidMemoryWExtend128:
3527 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3528 case Match_InvalidMemoryXExtend8:
3530 "expected 'lsl' or 'sxtx' with optional shift of #0");
3531 case Match_InvalidMemoryXExtend16:
3533 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3534 case Match_InvalidMemoryXExtend32:
3536 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3537 case Match_InvalidMemoryXExtend64:
3539 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3540 case Match_InvalidMemoryXExtend128:
3542 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3543 case Match_InvalidMemoryIndexed1:
3544 return Error(Loc, "index must be an integer in range [0, 4095].");
3545 case Match_InvalidMemoryIndexed2:
3546 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3547 case Match_InvalidMemoryIndexed4:
3548 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3549 case Match_InvalidMemoryIndexed8:
3550 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3551 case Match_InvalidMemoryIndexed16:
3552 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3553 case Match_InvalidImm0_7:
3554 return Error(Loc, "immediate must be an integer in range [0, 7].");
3555 case Match_InvalidImm0_15:
3556 return Error(Loc, "immediate must be an integer in range [0, 15].");
3557 case Match_InvalidImm0_31:
3558 return Error(Loc, "immediate must be an integer in range [0, 31].");
3559 case Match_InvalidImm0_63:
3560 return Error(Loc, "immediate must be an integer in range [0, 63].");
3561 case Match_InvalidImm0_127:
3562 return Error(Loc, "immediate must be an integer in range [0, 127].");
3563 case Match_InvalidImm0_65535:
3564 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3565 case Match_InvalidImm1_8:
3566 return Error(Loc, "immediate must be an integer in range [1, 8].");
3567 case Match_InvalidImm1_16:
3568 return Error(Loc, "immediate must be an integer in range [1, 16].");
3569 case Match_InvalidImm1_32:
3570 return Error(Loc, "immediate must be an integer in range [1, 32].");
3571 case Match_InvalidImm1_64:
3572 return Error(Loc, "immediate must be an integer in range [1, 64].");
3573 case Match_InvalidIndex1:
3574 return Error(Loc, "expected lane specifier '[1]'");
3575 case Match_InvalidIndexB:
3576 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3577 case Match_InvalidIndexH:
3578 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3579 case Match_InvalidIndexS:
3580 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3581 case Match_InvalidIndexD:
3582 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3583 case Match_InvalidLabel:
3584 return Error(Loc, "expected label or encodable integer pc offset");
3586 return Error(Loc, "expected readable system register");
3588 return Error(Loc, "expected writable system register or pstate");
3589 case Match_MnemonicFail:
3590 return Error(Loc, "unrecognized instruction mnemonic");
3592 llvm_unreachable("unexpected error code!");
3596 static const char *getSubtargetFeatureName(uint64_t Val);
3598 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3599 OperandVector &Operands,
3601 uint64_t &ErrorInfo,
3602 bool MatchingInlineAsm) {
3603 assert(!Operands.empty() && "Unexpect empty operand list!");
3604 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3605 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3607 StringRef Tok = Op.getToken();
3608 unsigned NumOperands = Operands.size();
3610 if (NumOperands == 4 && Tok == "lsl") {
3611 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3612 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3613 if (Op2.isReg() && Op3.isImm()) {
3614 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3616 uint64_t Op3Val = Op3CE->getValue();
3617 uint64_t NewOp3Val = 0;
3618 uint64_t NewOp4Val = 0;
3619 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3621 NewOp3Val = (32 - Op3Val) & 0x1f;
3622 NewOp4Val = 31 - Op3Val;
3624 NewOp3Val = (64 - Op3Val) & 0x3f;
3625 NewOp4Val = 63 - Op3Val;
3628 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3629 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3631 Operands[0] = AArch64Operand::CreateToken(
3632 "ubfm", false, Op.getStartLoc(), getContext());
3633 Operands.push_back(AArch64Operand::CreateImm(
3634 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3635 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3636 Op3.getEndLoc(), getContext());
3639 } else if (NumOperands == 5) {
3640 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3641 // UBFIZ -> UBFM aliases.
3642 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3643 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3644 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3645 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3647 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3648 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3649 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3651 if (Op3CE && Op4CE) {
3652 uint64_t Op3Val = Op3CE->getValue();
3653 uint64_t Op4Val = Op4CE->getValue();
3655 uint64_t RegWidth = 0;
3656 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3662 if (Op3Val >= RegWidth)
3663 return Error(Op3.getStartLoc(),
3664 "expected integer in range [0, 31]");
3665 if (Op4Val < 1 || Op4Val > RegWidth)
3666 return Error(Op4.getStartLoc(),
3667 "expected integer in range [1, 32]");
3669 uint64_t NewOp3Val = 0;
3670 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3672 NewOp3Val = (32 - Op3Val) & 0x1f;
3674 NewOp3Val = (64 - Op3Val) & 0x3f;
3676 uint64_t NewOp4Val = Op4Val - 1;
3678 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3679 return Error(Op4.getStartLoc(),
3680 "requested insert overflows register");
3682 const MCExpr *NewOp3 =
3683 MCConstantExpr::Create(NewOp3Val, getContext());
3684 const MCExpr *NewOp4 =
3685 MCConstantExpr::Create(NewOp4Val, getContext());
3686 Operands[3] = AArch64Operand::CreateImm(
3687 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3688 Operands[4] = AArch64Operand::CreateImm(
3689 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3691 Operands[0] = AArch64Operand::CreateToken(
3692 "bfm", false, Op.getStartLoc(), getContext());
3693 else if (Tok == "sbfiz")
3694 Operands[0] = AArch64Operand::CreateToken(
3695 "sbfm", false, Op.getStartLoc(), getContext());
3696 else if (Tok == "ubfiz")
3697 Operands[0] = AArch64Operand::CreateToken(
3698 "ubfm", false, Op.getStartLoc(), getContext());
3700 llvm_unreachable("No valid mnemonic for alias?");
3704 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3705 // UBFX -> UBFM aliases.
3706 } else if (NumOperands == 5 &&
3707 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3708 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3709 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3710 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3712 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3713 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3714 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3716 if (Op3CE && Op4CE) {
3717 uint64_t Op3Val = Op3CE->getValue();
3718 uint64_t Op4Val = Op4CE->getValue();
3720 uint64_t RegWidth = 0;
3721 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3727 if (Op3Val >= RegWidth)
3728 return Error(Op3.getStartLoc(),
3729 "expected integer in range [0, 31]");
3730 if (Op4Val < 1 || Op4Val > RegWidth)
3731 return Error(Op4.getStartLoc(),
3732 "expected integer in range [1, 32]");
3734 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3736 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3737 return Error(Op4.getStartLoc(),
3738 "requested extract overflows register");
3740 const MCExpr *NewOp4 =
3741 MCConstantExpr::Create(NewOp4Val, getContext());
3742 Operands[4] = AArch64Operand::CreateImm(
3743 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3745 Operands[0] = AArch64Operand::CreateToken(
3746 "bfm", false, Op.getStartLoc(), getContext());
3747 else if (Tok == "sbfx")
3748 Operands[0] = AArch64Operand::CreateToken(
3749 "sbfm", false, Op.getStartLoc(), getContext());
3750 else if (Tok == "ubfx")
3751 Operands[0] = AArch64Operand::CreateToken(
3752 "ubfm", false, Op.getStartLoc(), getContext());
3754 llvm_unreachable("No valid mnemonic for alias?");
3759 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3760 // InstAlias can't quite handle this since the reg classes aren't
3762 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3763 // The source register can be Wn here, but the matcher expects a
3764 // GPR64. Twiddle it here if necessary.
3765 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3767 unsigned Reg = getXRegFromWReg(Op.getReg());
3768 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3769 Op.getEndLoc(), getContext());
3772 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3773 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3774 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3776 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3778 // The source register can be Wn here, but the matcher expects a
3779 // GPR64. Twiddle it here if necessary.
3780 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3782 unsigned Reg = getXRegFromWReg(Op.getReg());
3783 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3784 Op.getEndLoc(), getContext());
3788 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3789 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3790 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3792 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3794 // The source register can be Wn here, but the matcher expects a
3795 // GPR32. Twiddle it here if necessary.
3796 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3798 unsigned Reg = getWRegFromXReg(Op.getReg());
3799 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3800 Op.getEndLoc(), getContext());
3805 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3806 if (NumOperands == 3 && Tok == "fmov") {
3807 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3808 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3809 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3811 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3815 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3816 Op.getEndLoc(), getContext());
3821 // First try to match against the secondary set of tables containing the
3822 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3823 unsigned MatchResult =
3824 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3826 // If that fails, try against the alternate table containing long-form NEON:
3827 // "fadd v0.2s, v1.2s, v2.2s"
3828 if (MatchResult != Match_Success)
3830 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3832 switch (MatchResult) {
3833 case Match_Success: {
3834 // Perform range checking and other semantic validations
3835 SmallVector<SMLoc, 8> OperandLocs;
3836 NumOperands = Operands.size();
3837 for (unsigned i = 1; i < NumOperands; ++i)
3838 OperandLocs.push_back(Operands[i]->getStartLoc());
3839 if (validateInstruction(Inst, OperandLocs))
3843 Out.EmitInstruction(Inst, STI);
3846 case Match_MissingFeature: {
3847 assert(ErrorInfo && "Unknown missing feature!");
3848 // Special case the error message for the very common case where only
3849 // a single subtarget feature is missing (neon, e.g.).
3850 std::string Msg = "instruction requires:";
3852 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3853 if (ErrorInfo & Mask) {
3855 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3859 return Error(IDLoc, Msg);
3861 case Match_MnemonicFail:
3862 return showMatchError(IDLoc, MatchResult);
3863 case Match_InvalidOperand: {
3864 SMLoc ErrorLoc = IDLoc;
3865 if (ErrorInfo != ~0ULL) {
3866 if (ErrorInfo >= Operands.size())
3867 return Error(IDLoc, "too few operands for instruction");
3869 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3870 if (ErrorLoc == SMLoc())
3873 // If the match failed on a suffix token operand, tweak the diagnostic
3875 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3876 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3877 MatchResult = Match_InvalidSuffix;
3879 return showMatchError(ErrorLoc, MatchResult);
3881 case Match_InvalidMemoryIndexed1:
3882 case Match_InvalidMemoryIndexed2:
3883 case Match_InvalidMemoryIndexed4:
3884 case Match_InvalidMemoryIndexed8:
3885 case Match_InvalidMemoryIndexed16:
3886 case Match_InvalidCondCode:
3887 case Match_AddSubRegExtendSmall:
3888 case Match_AddSubRegExtendLarge:
3889 case Match_AddSubSecondSource:
3890 case Match_LogicalSecondSource:
3891 case Match_AddSubRegShift32:
3892 case Match_AddSubRegShift64:
3893 case Match_InvalidMovImm32Shift:
3894 case Match_InvalidMovImm64Shift:
3895 case Match_InvalidFPImm:
3896 case Match_InvalidMemoryWExtend8:
3897 case Match_InvalidMemoryWExtend16:
3898 case Match_InvalidMemoryWExtend32:
3899 case Match_InvalidMemoryWExtend64:
3900 case Match_InvalidMemoryWExtend128:
3901 case Match_InvalidMemoryXExtend8:
3902 case Match_InvalidMemoryXExtend16:
3903 case Match_InvalidMemoryXExtend32:
3904 case Match_InvalidMemoryXExtend64:
3905 case Match_InvalidMemoryXExtend128:
3906 case Match_InvalidMemoryIndexed4SImm7:
3907 case Match_InvalidMemoryIndexed8SImm7:
3908 case Match_InvalidMemoryIndexed16SImm7:
3909 case Match_InvalidMemoryIndexedSImm9:
3910 case Match_InvalidImm0_7:
3911 case Match_InvalidImm0_15:
3912 case Match_InvalidImm0_31:
3913 case Match_InvalidImm0_63:
3914 case Match_InvalidImm0_127:
3915 case Match_InvalidImm0_65535:
3916 case Match_InvalidImm1_8:
3917 case Match_InvalidImm1_16:
3918 case Match_InvalidImm1_32:
3919 case Match_InvalidImm1_64:
3920 case Match_InvalidIndex1:
3921 case Match_InvalidIndexB:
3922 case Match_InvalidIndexH:
3923 case Match_InvalidIndexS:
3924 case Match_InvalidIndexD:
3925 case Match_InvalidLabel:
3928 if (ErrorInfo >= Operands.size())
3929 return Error(IDLoc, "too few operands for instruction");
3930 // Any time we get here, there's nothing fancy to do. Just get the
3931 // operand SMLoc and display the diagnostic.
3932 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3933 if (ErrorLoc == SMLoc())
3935 return showMatchError(ErrorLoc, MatchResult);
3939 llvm_unreachable("Implement any new match types added!");
3942 /// ParseDirective parses the arm specific directives
3943 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3944 const MCObjectFileInfo::Environment Format =
3945 getContext().getObjectFileInfo()->getObjectFileType();
3946 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3947 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3949 StringRef IDVal = DirectiveID.getIdentifier();
3950 SMLoc Loc = DirectiveID.getLoc();
3951 if (IDVal == ".hword")
3952 return parseDirectiveWord(2, Loc);
3953 if (IDVal == ".word")
3954 return parseDirectiveWord(4, Loc);
3955 if (IDVal == ".xword")
3956 return parseDirectiveWord(8, Loc);
3957 if (IDVal == ".tlsdesccall")
3958 return parseDirectiveTLSDescCall(Loc);
3959 if (IDVal == ".ltorg" || IDVal == ".pool")
3960 return parseDirectiveLtorg(Loc);
3961 if (IDVal == ".unreq")
3962 return parseDirectiveUnreq(DirectiveID.getLoc());
3964 if (!IsMachO && !IsCOFF) {
3965 if (IDVal == ".inst")
3966 return parseDirectiveInst(Loc);
3969 return parseDirectiveLOH(IDVal, Loc);
3972 /// parseDirectiveWord
3973 /// ::= .word [ expression (, expression)* ]
3974 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3975 MCAsmParser &Parser = getParser();
3976 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3978 const MCExpr *Value;
3979 if (getParser().parseExpression(Value))
3982 getParser().getStreamer().EmitValue(Value, Size);
3984 if (getLexer().is(AsmToken::EndOfStatement))
3987 // FIXME: Improve diagnostic.
3988 if (getLexer().isNot(AsmToken::Comma))
3989 return Error(L, "unexpected token in directive");
3998 /// parseDirectiveInst
3999 /// ::= .inst opcode [, ...]
4000 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4001 MCAsmParser &Parser = getParser();
4002 if (getLexer().is(AsmToken::EndOfStatement)) {
4003 Parser.eatToEndOfStatement();
4004 Error(Loc, "expected expression following directive");
4011 if (getParser().parseExpression(Expr)) {
4012 Error(Loc, "expected expression");
4016 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4018 Error(Loc, "expected constant expression");
4022 getTargetStreamer().emitInst(Value->getValue());
4024 if (getLexer().is(AsmToken::EndOfStatement))
4027 if (getLexer().isNot(AsmToken::Comma)) {
4028 Error(Loc, "unexpected token in directive");
4032 Parser.Lex(); // Eat comma.
4039 // parseDirectiveTLSDescCall:
4040 // ::= .tlsdesccall symbol
4041 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4043 if (getParser().parseIdentifier(Name))
4044 return Error(L, "expected symbol after directive");
4046 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4047 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4048 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4051 Inst.setOpcode(AArch64::TLSDESCCALL);
4052 Inst.addOperand(MCOperand::CreateExpr(Expr));
4054 getParser().getStreamer().EmitInstruction(Inst, STI);
4058 /// ::= .loh <lohName | lohId> label1, ..., labelN
4059 /// The number of arguments depends on the loh identifier.
4060 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4061 if (IDVal != MCLOHDirectiveName())
4064 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4065 if (getParser().getTok().isNot(AsmToken::Integer))
4066 return TokError("expected an identifier or a number in directive");
4067 // We successfully get a numeric value for the identifier.
4068 // Check if it is valid.
4069 int64_t Id = getParser().getTok().getIntVal();
4070 if (Id <= -1U && !isValidMCLOHType(Id))
4071 return TokError("invalid numeric identifier in directive");
4072 Kind = (MCLOHType)Id;
4074 StringRef Name = getTok().getIdentifier();
4075 // We successfully parse an identifier.
4076 // Check if it is a recognized one.
4077 int Id = MCLOHNameToId(Name);
4080 return TokError("invalid identifier in directive");
4081 Kind = (MCLOHType)Id;
4083 // Consume the identifier.
4085 // Get the number of arguments of this LOH.
4086 int NbArgs = MCLOHIdToNbArgs(Kind);
4088 assert(NbArgs != -1 && "Invalid number of arguments");
4090 SmallVector<MCSymbol *, 3> Args;
4091 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4093 if (getParser().parseIdentifier(Name))
4094 return TokError("expected identifier in directive");
4095 Args.push_back(getContext().GetOrCreateSymbol(Name));
4097 if (Idx + 1 == NbArgs)
4099 if (getLexer().isNot(AsmToken::Comma))
4100 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4103 if (getLexer().isNot(AsmToken::EndOfStatement))
4104 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4106 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4110 /// parseDirectiveLtorg
4111 /// ::= .ltorg | .pool
4112 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4113 getTargetStreamer().emitCurrentConstantPool();
4117 /// parseDirectiveReq
4118 /// ::= name .req registername
4119 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4120 MCAsmParser &Parser = getParser();
4121 Parser.Lex(); // Eat the '.req' token.
4122 SMLoc SRegLoc = getLoc();
4123 unsigned RegNum = tryParseRegister();
4124 bool IsVector = false;
4126 if (RegNum == static_cast<unsigned>(-1)) {
4128 RegNum = tryMatchVectorRegister(Kind, false);
4129 if (!Kind.empty()) {
4130 Error(SRegLoc, "vector register without type specifier expected");
4136 if (RegNum == static_cast<unsigned>(-1)) {
4137 Parser.eatToEndOfStatement();
4138 Error(SRegLoc, "register name or alias expected");
4142 // Shouldn't be anything else.
4143 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4144 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4145 Parser.eatToEndOfStatement();
4149 Parser.Lex(); // Consume the EndOfStatement
4151 auto pair = std::make_pair(IsVector, RegNum);
4152 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4153 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4158 /// parseDirectiveUneq
4159 /// ::= .unreq registername
4160 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4161 MCAsmParser &Parser = getParser();
4162 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4163 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4164 Parser.eatToEndOfStatement();
4167 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4168 Parser.Lex(); // Eat the identifier.
4173 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4174 AArch64MCExpr::VariantKind &ELFRefKind,
4175 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4177 ELFRefKind = AArch64MCExpr::VK_INVALID;
4178 DarwinRefKind = MCSymbolRefExpr::VK_None;
4181 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4182 ELFRefKind = AE->getKind();
4183 Expr = AE->getSubExpr();
4186 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4188 // It's a simple symbol reference with no addend.
4189 DarwinRefKind = SE->getKind();
4193 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4197 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4200 DarwinRefKind = SE->getKind();
4202 if (BE->getOpcode() != MCBinaryExpr::Add &&
4203 BE->getOpcode() != MCBinaryExpr::Sub)
4206 // See if the addend is is a constant, otherwise there's more going
4207 // on here than we can deal with.
4208 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4212 Addend = AddendExpr->getValue();
4213 if (BE->getOpcode() == MCBinaryExpr::Sub)
4216 // It's some symbol reference + a constant addend, but really
4217 // shouldn't use both Darwin and ELF syntax.
4218 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4219 DarwinRefKind == MCSymbolRefExpr::VK_None;
4222 /// Force static initialization.
4223 extern "C" void LLVMInitializeAArch64AsmParser() {
4224 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4225 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4226 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4229 #define GET_REGISTER_MATCHER
4230 #define GET_SUBTARGET_FEATURE_NAME
4231 #define GET_MATCHER_IMPLEMENTATION
4232 #include "AArch64GenAsmMatcher.inc"
4234 // Define this matcher function after the auto-generated include so we
4235 // have the match class enum definitions.
4236 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4238 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4239 // If the kind is a token for a literal immediate, check if our asm
4240 // operand matches. This is for InstAliases which have a fixed-value
4241 // immediate in the syntax.
4242 int64_t ExpectedVal;
4245 return Match_InvalidOperand;
4287 return Match_InvalidOperand;
4288 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4290 return Match_InvalidOperand;
4291 if (CE->getValue() == ExpectedVal)
4292 return Match_Success;
4293 return Match_InvalidOperand;