1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
140 } // end anonymous namespace
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 class AArch64Operand : public MCParsedAsmOperand {
164 SMLoc StartLoc, EndLoc;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 struct VectorListOp {
180 unsigned NumElements;
181 unsigned ElementKind;
184 struct VectorIndexOp {
192 struct ShiftedImmOp {
194 unsigned ShiftAmount;
198 AArch64CC::CondCode Code;
202 unsigned Val; // Encoded 8-bit representation.
206 unsigned Val; // Not the enum since not all values have names.
216 uint32_t PStateField;
229 struct ShiftExtendOp {
230 AArch64_AM::ShiftExtendType Type;
232 bool HasExplicitAmount;
242 struct VectorListOp VectorList;
243 struct VectorIndexOp VectorIndex;
245 struct ShiftedImmOp ShiftedImm;
246 struct CondCodeOp CondCode;
247 struct FPImmOp FPImm;
248 struct BarrierOp Barrier;
249 struct SysRegOp SysReg;
250 struct SysCRImmOp SysCRImm;
251 struct PrefetchOp Prefetch;
252 struct ShiftExtendOp ShiftExtend;
255 // Keep the MCContext around as the MCExprs may need manipulated during
256 // the add<>Operands() calls.
260 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
262 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
264 StartLoc = o.StartLoc;
274 ShiftedImm = o.ShiftedImm;
277 CondCode = o.CondCode;
289 VectorList = o.VectorList;
292 VectorIndex = o.VectorIndex;
298 SysCRImm = o.SysCRImm;
301 Prefetch = o.Prefetch;
304 ShiftExtend = o.ShiftExtend;
309 /// getStartLoc - Get the location of the first token of this operand.
310 SMLoc getStartLoc() const override { return StartLoc; }
311 /// getEndLoc - Get the location of the last token of this operand.
312 SMLoc getEndLoc() const override { return EndLoc; }
314 StringRef getToken() const {
315 assert(Kind == k_Token && "Invalid access!");
316 return StringRef(Tok.Data, Tok.Length);
319 bool isTokenSuffix() const {
320 assert(Kind == k_Token && "Invalid access!");
324 const MCExpr *getImm() const {
325 assert(Kind == k_Immediate && "Invalid access!");
329 const MCExpr *getShiftedImmVal() const {
330 assert(Kind == k_ShiftedImm && "Invalid access!");
331 return ShiftedImm.Val;
334 unsigned getShiftedImmShift() const {
335 assert(Kind == k_ShiftedImm && "Invalid access!");
336 return ShiftedImm.ShiftAmount;
339 AArch64CC::CondCode getCondCode() const {
340 assert(Kind == k_CondCode && "Invalid access!");
341 return CondCode.Code;
344 unsigned getFPImm() const {
345 assert(Kind == k_FPImm && "Invalid access!");
349 unsigned getBarrier() const {
350 assert(Kind == k_Barrier && "Invalid access!");
354 StringRef getBarrierName() const {
355 assert(Kind == k_Barrier && "Invalid access!");
356 return StringRef(Barrier.Data, Barrier.Length);
359 unsigned getReg() const override {
360 assert(Kind == k_Register && "Invalid access!");
364 unsigned getVectorListStart() const {
365 assert(Kind == k_VectorList && "Invalid access!");
366 return VectorList.RegNum;
369 unsigned getVectorListCount() const {
370 assert(Kind == k_VectorList && "Invalid access!");
371 return VectorList.Count;
374 unsigned getVectorIndex() const {
375 assert(Kind == k_VectorIndex && "Invalid access!");
376 return VectorIndex.Val;
379 StringRef getSysReg() const {
380 assert(Kind == k_SysReg && "Invalid access!");
381 return StringRef(SysReg.Data, SysReg.Length);
384 unsigned getSysCR() const {
385 assert(Kind == k_SysCR && "Invalid access!");
389 unsigned getPrefetch() const {
390 assert(Kind == k_Prefetch && "Invalid access!");
394 StringRef getPrefetchName() const {
395 assert(Kind == k_Prefetch && "Invalid access!");
396 return StringRef(Prefetch.Data, Prefetch.Length);
399 AArch64_AM::ShiftExtendType getShiftExtendType() const {
400 assert(Kind == k_ShiftExtend && "Invalid access!");
401 return ShiftExtend.Type;
404 unsigned getShiftExtendAmount() const {
405 assert(Kind == k_ShiftExtend && "Invalid access!");
406 return ShiftExtend.Amount;
409 bool hasShiftExtendAmount() const {
410 assert(Kind == k_ShiftExtend && "Invalid access!");
411 return ShiftExtend.HasExplicitAmount;
414 bool isImm() const override { return Kind == k_Immediate; }
415 bool isMem() const override { return false; }
416 bool isSImm9() const {
419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 int64_t Val = MCE->getValue();
423 return (Val >= -256 && Val < 256);
425 bool isSImm7s4() const {
428 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 int64_t Val = MCE->getValue();
432 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
434 bool isSImm7s8() const {
437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 int64_t Val = MCE->getValue();
441 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
443 bool isSImm7s16() const {
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
449 int64_t Val = MCE->getValue();
450 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
453 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
454 AArch64MCExpr::VariantKind ELFRefKind;
455 MCSymbolRefExpr::VariantKind DarwinRefKind;
457 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
459 // If we don't understand the expression, assume the best and
460 // let the fixup and relocation code deal with it.
464 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
465 ELFRefKind == AArch64MCExpr::VK_LO12 ||
466 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
469 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
473 // Note that we don't range-check the addend. It's adjusted modulo page
474 // size when converted, so there is no "out of range" condition when using
476 return Addend >= 0 && (Addend % Scale) == 0;
477 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
478 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
479 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
486 template <int Scale> bool isUImm12Offset() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 return isSymbolicUImm12Offset(getImm(), Scale);
494 int64_t Val = MCE->getValue();
495 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
498 bool isImm0_7() const {
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 8);
507 bool isImm1_8() const {
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 int64_t Val = MCE->getValue();
514 return (Val > 0 && Val < 9);
516 bool isImm0_15() const {
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 int64_t Val = MCE->getValue();
523 return (Val >= 0 && Val < 16);
525 bool isImm1_16() const {
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 int64_t Val = MCE->getValue();
532 return (Val > 0 && Val < 17);
534 bool isImm0_31() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 32);
543 bool isImm1_31() const {
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 int64_t Val = MCE->getValue();
550 return (Val >= 1 && Val < 32);
552 bool isImm1_32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 int64_t Val = MCE->getValue();
559 return (Val >= 1 && Val < 33);
561 bool isImm0_63() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
568 return (Val >= 0 && Val < 64);
570 bool isImm1_63() const {
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 int64_t Val = MCE->getValue();
577 return (Val >= 1 && Val < 64);
579 bool isImm1_64() const {
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 65);
588 bool isImm0_127() const {
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 int64_t Val = MCE->getValue();
595 return (Val >= 0 && Val < 128);
597 bool isImm0_255() const {
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 256);
606 bool isImm0_65535() const {
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 int64_t Val = MCE->getValue();
613 return (Val >= 0 && Val < 65536);
615 bool isImm32_63() const {
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 int64_t Val = MCE->getValue();
622 return (Val >= 32 && Val < 64);
624 bool isLogicalImm32() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 int64_t Val = MCE->getValue();
631 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
634 return AArch64_AM::isLogicalImmediate(Val, 32);
636 bool isLogicalImm64() const {
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
644 bool isLogicalImm32Not() const {
647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
651 return AArch64_AM::isLogicalImmediate(Val, 32);
653 bool isLogicalImm64Not() const {
656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
661 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
662 bool isAddSubImm() const {
663 if (!isShiftedImm() && !isImm())
668 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
669 if (isShiftedImm()) {
670 unsigned Shift = ShiftedImm.ShiftAmount;
671 Expr = ShiftedImm.Val;
672 if (Shift != 0 && Shift != 12)
678 AArch64MCExpr::VariantKind ELFRefKind;
679 MCSymbolRefExpr::VariantKind DarwinRefKind;
681 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
682 DarwinRefKind, Addend)) {
683 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
684 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
685 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
686 || ELFRefKind == AArch64MCExpr::VK_LO12
687 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
690 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
693 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
696 // Otherwise it should be a real immediate in range:
697 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
698 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
700 bool isCondCode() const { return Kind == k_CondCode; }
701 bool isSIMDImmType10() const {
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
709 bool isBranchTarget26() const {
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
715 int64_t Val = MCE->getValue();
718 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
720 bool isPCRelLabel19() const {
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726 int64_t Val = MCE->getValue();
729 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
731 bool isBranchTarget14() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 int64_t Val = MCE->getValue();
740 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
744 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
748 AArch64MCExpr::VariantKind ELFRefKind;
749 MCSymbolRefExpr::VariantKind DarwinRefKind;
751 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
752 DarwinRefKind, Addend)) {
755 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
758 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
759 if (ELFRefKind == AllowedModifiers[i])
766 bool isMovZSymbolG3() const {
767 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
770 bool isMovZSymbolG2() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
772 AArch64MCExpr::VK_TPREL_G2,
773 AArch64MCExpr::VK_DTPREL_G2});
776 bool isMovZSymbolG1() const {
777 return isMovWSymbol({
778 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
779 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
780 AArch64MCExpr::VK_DTPREL_G1,
784 bool isMovZSymbolG0() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
786 AArch64MCExpr::VK_TPREL_G0,
787 AArch64MCExpr::VK_DTPREL_G0});
790 bool isMovKSymbolG3() const {
791 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
794 bool isMovKSymbolG2() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
798 bool isMovKSymbolG1() const {
799 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
800 AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC});
804 bool isMovKSymbolG0() const {
806 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
807 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
810 template<int RegWidth, int Shift>
811 bool isMOVZMovAlias() const {
812 if (!isImm()) return false;
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
819 Value &= 0xffffffffULL;
821 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
822 if (Value == 0 && Shift != 0)
825 return (Value & ~(0xffffULL << Shift)) == 0;
828 template<int RegWidth, int Shift>
829 bool isMOVNMovAlias() const {
830 if (!isImm()) return false;
832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833 if (!CE) return false;
834 uint64_t Value = CE->getValue();
836 // MOVZ takes precedence over MOVN.
837 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
838 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 Value &= 0xffffffffULL;
845 return (Value & ~(0xffffULL << Shift)) == 0;
848 bool isFPImm() const { return Kind == k_FPImm; }
849 bool isBarrier() const { return Kind == k_Barrier; }
850 bool isSysReg() const { return Kind == k_SysReg; }
851 bool isMRSSystemRegister() const {
852 if (!isSysReg()) return false;
854 return SysReg.MRSReg != -1U;
856 bool isMSRSystemRegister() const {
857 if (!isSysReg()) return false;
859 return SysReg.MSRReg != -1U;
861 bool isSystemPStateField() const {
862 if (!isSysReg()) return false;
864 return SysReg.PStateField != -1U;
866 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
867 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
868 bool isVectorRegLo() const {
869 return Kind == k_Register && Reg.isVector &&
870 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
873 bool isGPR32as64() const {
874 return Kind == k_Register && !Reg.isVector &&
875 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
878 bool isGPR64sp0() const {
879 return Kind == k_Register && !Reg.isVector &&
880 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
883 /// Is this a vector list with the type implicit (presumably attached to the
884 /// instruction itself)?
885 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
886 return Kind == k_VectorList && VectorList.Count == NumRegs &&
887 !VectorList.ElementKind;
890 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
891 bool isTypedVectorList() const {
892 if (Kind != k_VectorList)
894 if (VectorList.Count != NumRegs)
896 if (VectorList.ElementKind != ElementKind)
898 return VectorList.NumElements == NumElements;
901 bool isVectorIndex1() const {
902 return Kind == k_VectorIndex && VectorIndex.Val == 1;
904 bool isVectorIndexB() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 16;
907 bool isVectorIndexH() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 8;
910 bool isVectorIndexS() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 4;
913 bool isVectorIndexD() const {
914 return Kind == k_VectorIndex && VectorIndex.Val < 2;
916 bool isToken() const override { return Kind == k_Token; }
917 bool isTokenEqual(StringRef Str) const {
918 return Kind == k_Token && getToken() == Str;
920 bool isSysCR() const { return Kind == k_SysCR; }
921 bool isPrefetch() const { return Kind == k_Prefetch; }
922 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
923 bool isShifter() const {
924 if (!isShiftExtend())
927 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
928 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
929 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
930 ST == AArch64_AM::MSL);
932 bool isExtend() const {
933 if (!isShiftExtend())
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
938 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
939 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
940 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
941 ET == AArch64_AM::LSL) &&
942 getShiftExtendAmount() <= 4;
945 bool isExtend64() const {
948 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
949 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
950 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
952 bool isExtendLSL64() const {
955 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
957 ET == AArch64_AM::LSL) &&
958 getShiftExtendAmount() <= 4;
961 template<int Width> bool isMemXExtend() const {
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
966 (getShiftExtendAmount() == Log2_32(Width / 8) ||
967 getShiftExtendAmount() == 0);
970 template<int Width> bool isMemWExtend() const {
973 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
974 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
975 (getShiftExtendAmount() == Log2_32(Width / 8) ||
976 getShiftExtendAmount() == 0);
979 template <unsigned width>
980 bool isArithmeticShifter() const {
984 // An arithmetic shifter is LSL, LSR, or ASR.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
987 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
990 template <unsigned width>
991 bool isLogicalShifter() const {
995 // A logical shifter is LSL, LSR, ASR or ROR.
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
999 getShiftExtendAmount() < width;
1002 bool isMovImm32Shifter() const {
1006 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16);
1014 bool isMovImm64Shifter() const {
1018 // A MOVi shifter is LSL of 0 or 16.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1026 bool isLogicalVecShifter() const {
1030 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1031 unsigned Shift = getShiftExtendAmount();
1032 return getShiftExtendType() == AArch64_AM::LSL &&
1033 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1036 bool isLogicalVecHalfWordShifter() const {
1037 if (!isLogicalVecShifter())
1040 // A logical vector shifter is a left shift by 0 or 8.
1041 unsigned Shift = getShiftExtendAmount();
1042 return getShiftExtendType() == AArch64_AM::LSL &&
1043 (Shift == 0 || Shift == 8);
1046 bool isMoveVecShifter() const {
1047 if (!isShiftExtend())
1050 // A logical vector shifter is a left shift by 8 or 16.
1051 unsigned Shift = getShiftExtendAmount();
1052 return getShiftExtendType() == AArch64_AM::MSL &&
1053 (Shift == 8 || Shift == 16);
1056 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1057 // to LDUR/STUR when the offset is not legal for the former but is for
1058 // the latter. As such, in addition to checking for being a legal unscaled
1059 // address, also check that it is not a legal scaled address. This avoids
1060 // ambiguity in the matcher.
1062 bool isSImm9OffsetFB() const {
1063 return isSImm9() && !isUImm12Offset<Width / 8>();
1066 bool isAdrpLabel() const {
1067 // Validation was handled during parsing, so we just sanity check that
1068 // something didn't go haywire.
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073 int64_t Val = CE->getValue();
1074 int64_t Min = - (4096 * (1LL << (21 - 1)));
1075 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1076 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1082 bool isAdrLabel() const {
1083 // Validation was handled during parsing, so we just sanity check that
1084 // something didn't go haywire.
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Min = - (1LL << (21 - 1));
1091 int64_t Max = ((1LL << (21 - 1)) - 1);
1092 return Val >= Min && Val <= Max;
1098 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1099 // Add as immediates when possible. Null MCExpr = 0.
1101 Inst.addOperand(MCOperand::CreateImm(0));
1102 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1103 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1105 Inst.addOperand(MCOperand::CreateExpr(Expr));
1108 void addRegOperands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 Inst.addOperand(MCOperand::CreateReg(getReg()));
1113 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1118 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1119 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1120 RI->getEncodingValue(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(Reg));
1125 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1132 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1135 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(getReg()));
1139 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::CreateReg(getReg()));
1144 template <unsigned NumRegs>
1145 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1148 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1152 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1155 template <unsigned NumRegs>
1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1159 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1160 unsigned FirstReg = FirstRegs[NumRegs - 1];
1163 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1166 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1171 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1176 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1181 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1186 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1191 void addImmOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 // If this is a pageoff symrefexpr with an addend, adjust the addend
1194 // to be only the page-offset portion. Otherwise, just add the expr
1196 addExpr(Inst, getImm());
1199 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 2 && "Invalid number of operands!");
1201 if (isShiftedImm()) {
1202 addExpr(Inst, getShiftedImmVal());
1203 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1205 addExpr(Inst, getImm());
1206 Inst.addOperand(MCOperand::CreateImm(0));
1210 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1215 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1224 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1225 addImmOperands(Inst, N);
1229 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1240 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1246 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1252 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1264 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1270 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1276 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1301 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1307 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1313 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1319 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1325 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1343 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1349 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1354 Inst.addOperand(MCOperand::CreateImm(encoding));
1357 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1361 Inst.addOperand(MCOperand::CreateImm(encoding));
1364 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1372 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::CreateImm(encoding));
1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1387 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1394 addExpr(Inst, getImm());
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1401 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 addExpr(Inst, getImm());
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1415 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 addExpr(Inst, getImm());
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1429 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1434 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1439 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1442 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1448 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1451 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1454 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1457 void addSysCROperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1459 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1462 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1467 void addShifterOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1470 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1471 Inst.addOperand(MCOperand::CreateImm(Imm));
1474 void addExtendOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1477 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1478 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::CreateImm(Imm));
1482 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::CreateImm(Imm));
1490 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1495 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1498 // For 8-bit load/store instructions with a register offset, both the
1499 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1500 // they're disambiguated by whether the shift was explicit or implicit rather
1502 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1507 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1511 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1514 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1515 uint64_t Value = CE->getValue();
1516 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1520 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1524 uint64_t Value = CE->getValue();
1525 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1528 void print(raw_ostream &OS) const override;
1530 static std::unique_ptr<AArch64Operand>
1531 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1532 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1533 Op->Tok.Data = Str.data();
1534 Op->Tok.Length = Str.size();
1535 Op->Tok.IsSuffix = IsSuffix;
1541 static std::unique_ptr<AArch64Operand>
1542 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1543 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1544 Op->Reg.RegNum = RegNum;
1545 Op->Reg.isVector = isVector;
1551 static std::unique_ptr<AArch64Operand>
1552 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1553 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1555 Op->VectorList.RegNum = RegNum;
1556 Op->VectorList.Count = Count;
1557 Op->VectorList.NumElements = NumElements;
1558 Op->VectorList.ElementKind = ElementKind;
1564 static std::unique_ptr<AArch64Operand>
1565 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1567 Op->VectorIndex.Val = Idx;
1573 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1574 SMLoc E, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1582 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1583 unsigned ShiftAmount,
1586 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1587 Op->ShiftedImm .Val = Val;
1588 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1594 static std::unique_ptr<AArch64Operand>
1595 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1596 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1597 Op->CondCode.Code = Code;
1603 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1605 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1606 Op->FPImm.Val = Val;
1612 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1616 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1617 Op->Barrier.Val = Val;
1618 Op->Barrier.Data = Str.data();
1619 Op->Barrier.Length = Str.size();
1625 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1628 uint32_t PStateField,
1630 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1631 Op->SysReg.Data = Str.data();
1632 Op->SysReg.Length = Str.size();
1633 Op->SysReg.MRSReg = MRSReg;
1634 Op->SysReg.MSRReg = MSRReg;
1635 Op->SysReg.PStateField = PStateField;
1641 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1642 SMLoc E, MCContext &Ctx) {
1643 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1644 Op->SysCRImm.Val = Val;
1650 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1654 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1655 Op->Prefetch.Val = Val;
1656 Op->Barrier.Data = Str.data();
1657 Op->Barrier.Length = Str.size();
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1685 StringRef Name = getBarrierName();
1687 OS << "<barrier " << Name << ">";
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1693 getImm()->print(OS);
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 getShiftedImmVal()->print(OS);
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1703 OS << "<condcode " << getCondCode() << ">";
1706 OS << "<register " << getReg() << ">";
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1720 OS << "<sysreg: " << getSysReg() << '>';
1723 OS << "'" << getToken() << "'";
1726 OS << "c" << getSysCR();
1729 StringRef Name = getPrefetchName();
1731 OS << "<prfop " << Name << ">";
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1736 case k_ShiftExtend: {
1737 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1738 << getShiftExtendAmount();
1739 if (!hasShiftExtendAmount())
1747 /// @name Auto-generated Match Functions
1750 static unsigned MatchRegisterName(StringRef Name);
1754 static unsigned matchVectorRegName(StringRef Name) {
1755 return StringSwitch<unsigned>(Name)
1756 .Case("v0", AArch64::Q0)
1757 .Case("v1", AArch64::Q1)
1758 .Case("v2", AArch64::Q2)
1759 .Case("v3", AArch64::Q3)
1760 .Case("v4", AArch64::Q4)
1761 .Case("v5", AArch64::Q5)
1762 .Case("v6", AArch64::Q6)
1763 .Case("v7", AArch64::Q7)
1764 .Case("v8", AArch64::Q8)
1765 .Case("v9", AArch64::Q9)
1766 .Case("v10", AArch64::Q10)
1767 .Case("v11", AArch64::Q11)
1768 .Case("v12", AArch64::Q12)
1769 .Case("v13", AArch64::Q13)
1770 .Case("v14", AArch64::Q14)
1771 .Case("v15", AArch64::Q15)
1772 .Case("v16", AArch64::Q16)
1773 .Case("v17", AArch64::Q17)
1774 .Case("v18", AArch64::Q18)
1775 .Case("v19", AArch64::Q19)
1776 .Case("v20", AArch64::Q20)
1777 .Case("v21", AArch64::Q21)
1778 .Case("v22", AArch64::Q22)
1779 .Case("v23", AArch64::Q23)
1780 .Case("v24", AArch64::Q24)
1781 .Case("v25", AArch64::Q25)
1782 .Case("v26", AArch64::Q26)
1783 .Case("v27", AArch64::Q27)
1784 .Case("v28", AArch64::Q28)
1785 .Case("v29", AArch64::Q29)
1786 .Case("v30", AArch64::Q30)
1787 .Case("v31", AArch64::Q31)
1791 static bool isValidVectorKind(StringRef Name) {
1792 return StringSwitch<bool>(Name.lower())
1802 // Accept the width neutral ones, too, for verbose syntax. If those
1803 // aren't used in the right places, the token operand won't match so
1804 // all will work out.
1812 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1813 char &ElementKind) {
1814 assert(isValidVectorKind(Name));
1816 ElementKind = Name.lower()[Name.size() - 1];
1819 if (Name.size() == 2)
1822 // Parse the lane count
1823 Name = Name.drop_front();
1824 while (isdigit(Name.front())) {
1825 NumElements = 10 * NumElements + (Name.front() - '0');
1826 Name = Name.drop_front();
1830 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1832 StartLoc = getLoc();
1833 RegNo = tryParseRegister();
1834 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1835 return (RegNo == (unsigned)-1);
1838 // Matches a register name or register alias previously defined by '.req'
1839 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1841 unsigned RegNum = isVector ? matchVectorRegName(Name)
1842 : MatchRegisterName(Name);
1845 // Check for aliases registered via .req. Canonicalize to lower case.
1846 // That's more consistent since register names are case insensitive, and
1847 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1848 auto Entry = RegisterReqs.find(Name.lower());
1849 if (Entry == RegisterReqs.end())
1851 // set RegNum if the match is the right kind of register
1852 if (isVector == Entry->getValue().first)
1853 RegNum = Entry->getValue().second;
1858 /// tryParseRegister - Try to parse a register name. The token must be an
1859 /// Identifier when called, and if it is a register name the token is eaten and
1860 /// the register is added to the operand list.
1861 int AArch64AsmParser::tryParseRegister() {
1862 MCAsmParser &Parser = getParser();
1863 const AsmToken &Tok = Parser.getTok();
1864 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1866 std::string lowerCase = Tok.getString().lower();
1867 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1868 // Also handle a few aliases of registers.
1870 RegNum = StringSwitch<unsigned>(lowerCase)
1871 .Case("fp", AArch64::FP)
1872 .Case("lr", AArch64::LR)
1873 .Case("x31", AArch64::XZR)
1874 .Case("w31", AArch64::WZR)
1880 Parser.Lex(); // Eat identifier token.
1884 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1885 /// kind specifier. If it is a register specifier, eat the token and return it.
1886 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1887 MCAsmParser &Parser = getParser();
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1908 Parser.Lex(); // Eat the register token.
1913 TokError("vector register expected");
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1920 MCAsmParser &Parser = getParser();
1923 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1928 StringRef Tok = Parser.getTok().getIdentifier();
1929 if (Tok[0] != 'c' && Tok[0] != 'C') {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1935 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1936 if (BadNum || CRNum > 15) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1941 Parser.Lex(); // Eat identifier token.
1943 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1944 return MatchOperand_Success;
1947 /// tryParsePrefetch - Try to parse a prefetch operand.
1948 AArch64AsmParser::OperandMatchResultTy
1949 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 MCAsmParser &Parser = getParser();
1952 const AsmToken &Tok = Parser.getTok();
1953 // Either an identifier for named values or a 5-bit immediate.
1954 bool Hash = Tok.is(AsmToken::Hash);
1955 if (Hash || Tok.is(AsmToken::Integer)) {
1957 Parser.Lex(); // Eat hash token.
1958 const MCExpr *ImmVal;
1959 if (getParser().parseExpression(ImmVal))
1960 return MatchOperand_ParseFail;
1962 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1964 TokError("immediate value expected for prefetch operand");
1965 return MatchOperand_ParseFail;
1967 unsigned prfop = MCE->getValue();
1969 TokError("prefetch operand out of range, [0,31] expected");
1970 return MatchOperand_ParseFail;
1974 auto Mapper = AArch64PRFM::PRFMMapper();
1975 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
1976 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1978 return MatchOperand_Success;
1981 if (Tok.isNot(AsmToken::Identifier)) {
1982 TokError("pre-fetch hint expected");
1983 return MatchOperand_ParseFail;
1987 auto Mapper = AArch64PRFM::PRFMMapper();
1988 unsigned prfop = Mapper.fromString(Tok.getString(), Valid);
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1994 Parser.Lex(); // Eat identifier token.
1995 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
1997 return MatchOperand_Success;
2000 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2002 AArch64AsmParser::OperandMatchResultTy
2003 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2004 MCAsmParser &Parser = getParser();
2008 if (Parser.getTok().is(AsmToken::Hash)) {
2009 Parser.Lex(); // Eat hash token.
2012 if (parseSymbolicImmVal(Expr))
2013 return MatchOperand_ParseFail;
2015 AArch64MCExpr::VariantKind ELFRefKind;
2016 MCSymbolRefExpr::VariantKind DarwinRefKind;
2018 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2019 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2020 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2021 // No modifier was specified at all; this is the syntax for an ELF basic
2022 // ADRP relocation (unfortunately).
2024 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2025 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2026 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2028 Error(S, "gotpage label reference not allowed an addend");
2029 return MatchOperand_ParseFail;
2030 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2031 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2032 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2033 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2034 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2035 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2036 // The operand must be an @page or @gotpage qualified symbolref.
2037 Error(S, "page or gotpage label reference expected");
2038 return MatchOperand_ParseFail;
2042 // We have either a label reference possibly with addend or an immediate. The
2043 // addend is a raw value here. The linker will adjust it to only reference the
2045 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2046 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2048 return MatchOperand_Success;
2051 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2053 AArch64AsmParser::OperandMatchResultTy
2054 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2055 MCAsmParser &Parser = getParser();
2059 if (Parser.getTok().is(AsmToken::Hash)) {
2060 Parser.Lex(); // Eat hash token.
2063 if (getParser().parseExpression(Expr))
2064 return MatchOperand_ParseFail;
2066 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2067 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2069 return MatchOperand_Success;
2072 /// tryParseFPImm - A floating point immediate expression operand.
2073 AArch64AsmParser::OperandMatchResultTy
2074 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2075 MCAsmParser &Parser = getParser();
2079 if (Parser.getTok().is(AsmToken::Hash)) {
2080 Parser.Lex(); // Eat '#'
2084 // Handle negation, as that still comes through as a separate token.
2085 bool isNegative = false;
2086 if (Parser.getTok().is(AsmToken::Minus)) {
2090 const AsmToken &Tok = Parser.getTok();
2091 if (Tok.is(AsmToken::Real)) {
2092 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2093 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2094 // If we had a '-' in front, toggle the sign bit.
2095 IntVal ^= (uint64_t)isNegative << 63;
2096 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2097 Parser.Lex(); // Eat the token.
2098 // Check for out of range values. As an exception, we let Zero through,
2099 // as we handle that special case in post-processing before matching in
2100 // order to use the zero register for it.
2101 if (Val == -1 && !RealVal.isZero()) {
2102 TokError("expected compatible register or floating-point constant");
2103 return MatchOperand_ParseFail;
2105 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2106 return MatchOperand_Success;
2108 if (Tok.is(AsmToken::Integer)) {
2110 if (!isNegative && Tok.getString().startswith("0x")) {
2111 Val = Tok.getIntVal();
2112 if (Val > 255 || Val < 0) {
2113 TokError("encoded floating point value out of range");
2114 return MatchOperand_ParseFail;
2117 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2118 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2119 // If we had a '-' in front, toggle the sign bit.
2120 IntVal ^= (uint64_t)isNegative << 63;
2121 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2123 Parser.Lex(); // Eat the token.
2124 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2125 return MatchOperand_Success;
2129 return MatchOperand_NoMatch;
2131 TokError("invalid floating point immediate");
2132 return MatchOperand_ParseFail;
2135 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2136 AArch64AsmParser::OperandMatchResultTy
2137 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2138 MCAsmParser &Parser = getParser();
2141 if (Parser.getTok().is(AsmToken::Hash))
2142 Parser.Lex(); // Eat '#'
2143 else if (Parser.getTok().isNot(AsmToken::Integer))
2144 // Operand should start from # or should be integer, emit error otherwise.
2145 return MatchOperand_NoMatch;
2148 if (parseSymbolicImmVal(Imm))
2149 return MatchOperand_ParseFail;
2150 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2151 uint64_t ShiftAmount = 0;
2152 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2154 int64_t Val = MCE->getValue();
2155 if (Val > 0xfff && (Val & 0xfff) == 0) {
2156 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2160 SMLoc E = Parser.getTok().getLoc();
2161 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2163 return MatchOperand_Success;
2169 // The optional operand must be "lsl #N" where N is non-negative.
2170 if (!Parser.getTok().is(AsmToken::Identifier) ||
2171 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2172 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2173 return MatchOperand_ParseFail;
2179 if (Parser.getTok().is(AsmToken::Hash)) {
2183 if (Parser.getTok().isNot(AsmToken::Integer)) {
2184 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2185 return MatchOperand_ParseFail;
2188 int64_t ShiftAmount = Parser.getTok().getIntVal();
2190 if (ShiftAmount < 0) {
2191 Error(Parser.getTok().getLoc(), "positive shift amount required");
2192 return MatchOperand_ParseFail;
2194 Parser.Lex(); // Eat the number
2196 SMLoc E = Parser.getTok().getLoc();
2197 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2198 S, E, getContext()));
2199 return MatchOperand_Success;
2202 /// parseCondCodeString - Parse a Condition Code string.
2203 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2204 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2205 .Case("eq", AArch64CC::EQ)
2206 .Case("ne", AArch64CC::NE)
2207 .Case("cs", AArch64CC::HS)
2208 .Case("hs", AArch64CC::HS)
2209 .Case("cc", AArch64CC::LO)
2210 .Case("lo", AArch64CC::LO)
2211 .Case("mi", AArch64CC::MI)
2212 .Case("pl", AArch64CC::PL)
2213 .Case("vs", AArch64CC::VS)
2214 .Case("vc", AArch64CC::VC)
2215 .Case("hi", AArch64CC::HI)
2216 .Case("ls", AArch64CC::LS)
2217 .Case("ge", AArch64CC::GE)
2218 .Case("lt", AArch64CC::LT)
2219 .Case("gt", AArch64CC::GT)
2220 .Case("le", AArch64CC::LE)
2221 .Case("al", AArch64CC::AL)
2222 .Case("nv", AArch64CC::NV)
2223 .Default(AArch64CC::Invalid);
2227 /// parseCondCode - Parse a Condition Code operand.
2228 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2229 bool invertCondCode) {
2230 MCAsmParser &Parser = getParser();
2232 const AsmToken &Tok = Parser.getTok();
2233 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2235 StringRef Cond = Tok.getString();
2236 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2237 if (CC == AArch64CC::Invalid)
2238 return TokError("invalid condition code");
2239 Parser.Lex(); // Eat identifier token.
2241 if (invertCondCode) {
2242 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2243 return TokError("condition codes AL and NV are invalid for this instruction");
2244 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2248 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2252 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2253 /// them if present.
2254 AArch64AsmParser::OperandMatchResultTy
2255 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2256 MCAsmParser &Parser = getParser();
2257 const AsmToken &Tok = Parser.getTok();
2258 std::string LowerID = Tok.getString().lower();
2259 AArch64_AM::ShiftExtendType ShOp =
2260 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2261 .Case("lsl", AArch64_AM::LSL)
2262 .Case("lsr", AArch64_AM::LSR)
2263 .Case("asr", AArch64_AM::ASR)
2264 .Case("ror", AArch64_AM::ROR)
2265 .Case("msl", AArch64_AM::MSL)
2266 .Case("uxtb", AArch64_AM::UXTB)
2267 .Case("uxth", AArch64_AM::UXTH)
2268 .Case("uxtw", AArch64_AM::UXTW)
2269 .Case("uxtx", AArch64_AM::UXTX)
2270 .Case("sxtb", AArch64_AM::SXTB)
2271 .Case("sxth", AArch64_AM::SXTH)
2272 .Case("sxtw", AArch64_AM::SXTW)
2273 .Case("sxtx", AArch64_AM::SXTX)
2274 .Default(AArch64_AM::InvalidShiftExtend);
2276 if (ShOp == AArch64_AM::InvalidShiftExtend)
2277 return MatchOperand_NoMatch;
2279 SMLoc S = Tok.getLoc();
2282 bool Hash = getLexer().is(AsmToken::Hash);
2283 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2284 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2285 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2286 ShOp == AArch64_AM::MSL) {
2287 // We expect a number here.
2288 TokError("expected #imm after shift specifier");
2289 return MatchOperand_ParseFail;
2292 // "extend" type operatoins don't need an immediate, #0 is implicit.
2293 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2295 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2296 return MatchOperand_Success;
2300 Parser.Lex(); // Eat the '#'.
2302 // Make sure we do actually have a number or a parenthesized expression.
2303 SMLoc E = Parser.getTok().getLoc();
2304 if (!Parser.getTok().is(AsmToken::Integer) &&
2305 !Parser.getTok().is(AsmToken::LParen)) {
2306 Error(E, "expected integer shift amount");
2307 return MatchOperand_ParseFail;
2310 const MCExpr *ImmVal;
2311 if (getParser().parseExpression(ImmVal))
2312 return MatchOperand_ParseFail;
2314 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2316 Error(E, "expected constant '#imm' after shift specifier");
2317 return MatchOperand_ParseFail;
2320 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2321 Operands.push_back(AArch64Operand::CreateShiftExtend(
2322 ShOp, MCE->getValue(), true, S, E, getContext()));
2323 return MatchOperand_Success;
2326 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2327 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2328 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2329 OperandVector &Operands) {
2330 if (Name.find('.') != StringRef::npos)
2331 return TokError("invalid operand");
2335 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2337 MCAsmParser &Parser = getParser();
2338 const AsmToken &Tok = Parser.getTok();
2339 StringRef Op = Tok.getString();
2340 SMLoc S = Tok.getLoc();
2342 const MCExpr *Expr = nullptr;
2344 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2346 Expr = MCConstantExpr::Create(op1, getContext()); \
2347 Operands.push_back( \
2348 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2349 Operands.push_back( \
2350 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2351 Operands.push_back( \
2352 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2353 Expr = MCConstantExpr::Create(op2, getContext()); \
2354 Operands.push_back( \
2355 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2358 if (Mnemonic == "ic") {
2359 if (!Op.compare_lower("ialluis")) {
2360 // SYS #0, C7, C1, #0
2361 SYS_ALIAS(0, 7, 1, 0);
2362 } else if (!Op.compare_lower("iallu")) {
2363 // SYS #0, C7, C5, #0
2364 SYS_ALIAS(0, 7, 5, 0);
2365 } else if (!Op.compare_lower("ivau")) {
2366 // SYS #3, C7, C5, #1
2367 SYS_ALIAS(3, 7, 5, 1);
2369 return TokError("invalid operand for IC instruction");
2371 } else if (Mnemonic == "dc") {
2372 if (!Op.compare_lower("zva")) {
2373 // SYS #3, C7, C4, #1
2374 SYS_ALIAS(3, 7, 4, 1);
2375 } else if (!Op.compare_lower("ivac")) {
2376 // SYS #3, C7, C6, #1
2377 SYS_ALIAS(0, 7, 6, 1);
2378 } else if (!Op.compare_lower("isw")) {
2379 // SYS #0, C7, C6, #2
2380 SYS_ALIAS(0, 7, 6, 2);
2381 } else if (!Op.compare_lower("cvac")) {
2382 // SYS #3, C7, C10, #1
2383 SYS_ALIAS(3, 7, 10, 1);
2384 } else if (!Op.compare_lower("csw")) {
2385 // SYS #0, C7, C10, #2
2386 SYS_ALIAS(0, 7, 10, 2);
2387 } else if (!Op.compare_lower("cvau")) {
2388 // SYS #3, C7, C11, #1
2389 SYS_ALIAS(3, 7, 11, 1);
2390 } else if (!Op.compare_lower("civac")) {
2391 // SYS #3, C7, C14, #1
2392 SYS_ALIAS(3, 7, 14, 1);
2393 } else if (!Op.compare_lower("cisw")) {
2394 // SYS #0, C7, C14, #2
2395 SYS_ALIAS(0, 7, 14, 2);
2397 return TokError("invalid operand for DC instruction");
2399 } else if (Mnemonic == "at") {
2400 if (!Op.compare_lower("s1e1r")) {
2401 // SYS #0, C7, C8, #0
2402 SYS_ALIAS(0, 7, 8, 0);
2403 } else if (!Op.compare_lower("s1e2r")) {
2404 // SYS #4, C7, C8, #0
2405 SYS_ALIAS(4, 7, 8, 0);
2406 } else if (!Op.compare_lower("s1e3r")) {
2407 // SYS #6, C7, C8, #0
2408 SYS_ALIAS(6, 7, 8, 0);
2409 } else if (!Op.compare_lower("s1e1w")) {
2410 // SYS #0, C7, C8, #1
2411 SYS_ALIAS(0, 7, 8, 1);
2412 } else if (!Op.compare_lower("s1e2w")) {
2413 // SYS #4, C7, C8, #1
2414 SYS_ALIAS(4, 7, 8, 1);
2415 } else if (!Op.compare_lower("s1e3w")) {
2416 // SYS #6, C7, C8, #1
2417 SYS_ALIAS(6, 7, 8, 1);
2418 } else if (!Op.compare_lower("s1e0r")) {
2419 // SYS #0, C7, C8, #3
2420 SYS_ALIAS(0, 7, 8, 2);
2421 } else if (!Op.compare_lower("s1e0w")) {
2422 // SYS #0, C7, C8, #3
2423 SYS_ALIAS(0, 7, 8, 3);
2424 } else if (!Op.compare_lower("s12e1r")) {
2425 // SYS #4, C7, C8, #4
2426 SYS_ALIAS(4, 7, 8, 4);
2427 } else if (!Op.compare_lower("s12e1w")) {
2428 // SYS #4, C7, C8, #5
2429 SYS_ALIAS(4, 7, 8, 5);
2430 } else if (!Op.compare_lower("s12e0r")) {
2431 // SYS #4, C7, C8, #6
2432 SYS_ALIAS(4, 7, 8, 6);
2433 } else if (!Op.compare_lower("s12e0w")) {
2434 // SYS #4, C7, C8, #7
2435 SYS_ALIAS(4, 7, 8, 7);
2437 return TokError("invalid operand for AT instruction");
2439 } else if (Mnemonic == "tlbi") {
2440 if (!Op.compare_lower("vmalle1is")) {
2441 // SYS #0, C8, C3, #0
2442 SYS_ALIAS(0, 8, 3, 0);
2443 } else if (!Op.compare_lower("alle2is")) {
2444 // SYS #4, C8, C3, #0
2445 SYS_ALIAS(4, 8, 3, 0);
2446 } else if (!Op.compare_lower("alle3is")) {
2447 // SYS #6, C8, C3, #0
2448 SYS_ALIAS(6, 8, 3, 0);
2449 } else if (!Op.compare_lower("vae1is")) {
2450 // SYS #0, C8, C3, #1
2451 SYS_ALIAS(0, 8, 3, 1);
2452 } else if (!Op.compare_lower("vae2is")) {
2453 // SYS #4, C8, C3, #1
2454 SYS_ALIAS(4, 8, 3, 1);
2455 } else if (!Op.compare_lower("vae3is")) {
2456 // SYS #6, C8, C3, #1
2457 SYS_ALIAS(6, 8, 3, 1);
2458 } else if (!Op.compare_lower("aside1is")) {
2459 // SYS #0, C8, C3, #2
2460 SYS_ALIAS(0, 8, 3, 2);
2461 } else if (!Op.compare_lower("vaae1is")) {
2462 // SYS #0, C8, C3, #3
2463 SYS_ALIAS(0, 8, 3, 3);
2464 } else if (!Op.compare_lower("alle1is")) {
2465 // SYS #4, C8, C3, #4
2466 SYS_ALIAS(4, 8, 3, 4);
2467 } else if (!Op.compare_lower("vale1is")) {
2468 // SYS #0, C8, C3, #5
2469 SYS_ALIAS(0, 8, 3, 5);
2470 } else if (!Op.compare_lower("vaale1is")) {
2471 // SYS #0, C8, C3, #7
2472 SYS_ALIAS(0, 8, 3, 7);
2473 } else if (!Op.compare_lower("vmalle1")) {
2474 // SYS #0, C8, C7, #0
2475 SYS_ALIAS(0, 8, 7, 0);
2476 } else if (!Op.compare_lower("alle2")) {
2477 // SYS #4, C8, C7, #0
2478 SYS_ALIAS(4, 8, 7, 0);
2479 } else if (!Op.compare_lower("vale2is")) {
2480 // SYS #4, C8, C3, #5
2481 SYS_ALIAS(4, 8, 3, 5);
2482 } else if (!Op.compare_lower("vale3is")) {
2483 // SYS #6, C8, C3, #5
2484 SYS_ALIAS(6, 8, 3, 5);
2485 } else if (!Op.compare_lower("alle3")) {
2486 // SYS #6, C8, C7, #0
2487 SYS_ALIAS(6, 8, 7, 0);
2488 } else if (!Op.compare_lower("vae1")) {
2489 // SYS #0, C8, C7, #1
2490 SYS_ALIAS(0, 8, 7, 1);
2491 } else if (!Op.compare_lower("vae2")) {
2492 // SYS #4, C8, C7, #1
2493 SYS_ALIAS(4, 8, 7, 1);
2494 } else if (!Op.compare_lower("vae3")) {
2495 // SYS #6, C8, C7, #1
2496 SYS_ALIAS(6, 8, 7, 1);
2497 } else if (!Op.compare_lower("aside1")) {
2498 // SYS #0, C8, C7, #2
2499 SYS_ALIAS(0, 8, 7, 2);
2500 } else if (!Op.compare_lower("vaae1")) {
2501 // SYS #0, C8, C7, #3
2502 SYS_ALIAS(0, 8, 7, 3);
2503 } else if (!Op.compare_lower("alle1")) {
2504 // SYS #4, C8, C7, #4
2505 SYS_ALIAS(4, 8, 7, 4);
2506 } else if (!Op.compare_lower("vale1")) {
2507 // SYS #0, C8, C7, #5
2508 SYS_ALIAS(0, 8, 7, 5);
2509 } else if (!Op.compare_lower("vale2")) {
2510 // SYS #4, C8, C7, #5
2511 SYS_ALIAS(4, 8, 7, 5);
2512 } else if (!Op.compare_lower("vale3")) {
2513 // SYS #6, C8, C7, #5
2514 SYS_ALIAS(6, 8, 7, 5);
2515 } else if (!Op.compare_lower("vaale1")) {
2516 // SYS #0, C8, C7, #7
2517 SYS_ALIAS(0, 8, 7, 7);
2518 } else if (!Op.compare_lower("ipas2e1")) {
2519 // SYS #4, C8, C4, #1
2520 SYS_ALIAS(4, 8, 4, 1);
2521 } else if (!Op.compare_lower("ipas2le1")) {
2522 // SYS #4, C8, C4, #5
2523 SYS_ALIAS(4, 8, 4, 5);
2524 } else if (!Op.compare_lower("ipas2e1is")) {
2525 // SYS #4, C8, C4, #1
2526 SYS_ALIAS(4, 8, 0, 1);
2527 } else if (!Op.compare_lower("ipas2le1is")) {
2528 // SYS #4, C8, C4, #5
2529 SYS_ALIAS(4, 8, 0, 5);
2530 } else if (!Op.compare_lower("vmalls12e1")) {
2531 // SYS #4, C8, C7, #6
2532 SYS_ALIAS(4, 8, 7, 6);
2533 } else if (!Op.compare_lower("vmalls12e1is")) {
2534 // SYS #4, C8, C3, #6
2535 SYS_ALIAS(4, 8, 3, 6);
2537 return TokError("invalid operand for TLBI instruction");
2543 Parser.Lex(); // Eat operand.
2545 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2546 bool HasRegister = false;
2548 // Check for the optional register operand.
2549 if (getLexer().is(AsmToken::Comma)) {
2550 Parser.Lex(); // Eat comma.
2552 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2553 return TokError("expected register operand");
2558 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2559 Parser.eatToEndOfStatement();
2560 return TokError("unexpected token in argument list");
2563 if (ExpectRegister && !HasRegister) {
2564 return TokError("specified " + Mnemonic + " op requires a register");
2566 else if (!ExpectRegister && HasRegister) {
2567 return TokError("specified " + Mnemonic + " op does not use a register");
2570 Parser.Lex(); // Consume the EndOfStatement
2574 AArch64AsmParser::OperandMatchResultTy
2575 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2576 MCAsmParser &Parser = getParser();
2577 const AsmToken &Tok = Parser.getTok();
2579 // Can be either a #imm style literal or an option name
2580 bool Hash = Tok.is(AsmToken::Hash);
2581 if (Hash || Tok.is(AsmToken::Integer)) {
2582 // Immediate operand.
2584 Parser.Lex(); // Eat the '#'
2585 const MCExpr *ImmVal;
2586 SMLoc ExprLoc = getLoc();
2587 if (getParser().parseExpression(ImmVal))
2588 return MatchOperand_ParseFail;
2589 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2591 Error(ExprLoc, "immediate value expected for barrier operand");
2592 return MatchOperand_ParseFail;
2594 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2595 Error(ExprLoc, "barrier operand out of range");
2596 return MatchOperand_ParseFail;
2599 auto Mapper = AArch64DB::DBarrierMapper();
2600 StringRef Name = Mapper.toString(MCE->getValue(), Valid);
2601 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2602 ExprLoc, getContext()));
2603 return MatchOperand_Success;
2606 if (Tok.isNot(AsmToken::Identifier)) {
2607 TokError("invalid operand for instruction");
2608 return MatchOperand_ParseFail;
2612 auto Mapper = AArch64DB::DBarrierMapper();
2613 unsigned Opt = Mapper.fromString(Tok.getString(), Valid);
2615 TokError("invalid barrier option name");
2616 return MatchOperand_ParseFail;
2619 // The only valid named option for ISB is 'sy'
2620 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2621 TokError("'sy' or #imm operand expected");
2622 return MatchOperand_ParseFail;
2625 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2626 getLoc(), getContext()));
2627 Parser.Lex(); // Consume the option
2629 return MatchOperand_Success;
2632 AArch64AsmParser::OperandMatchResultTy
2633 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2634 MCAsmParser &Parser = getParser();
2635 const AsmToken &Tok = Parser.getTok();
2637 if (Tok.isNot(AsmToken::Identifier))
2638 return MatchOperand_NoMatch;
2641 auto MRSMapper = AArch64SysReg::MRSMapper();
2642 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2644 assert(IsKnown == (MRSReg != -1U) &&
2645 "register should be -1 if and only if it's unknown");
2647 auto MSRMapper = AArch64SysReg::MSRMapper();
2648 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2650 assert(IsKnown == (MSRReg != -1U) &&
2651 "register should be -1 if and only if it's unknown");
2653 auto PStateMapper = AArch64PState::PStateMapper();
2654 uint32_t PStateField = PStateMapper.fromString(Tok.getString(), IsKnown);
2655 assert(IsKnown == (PStateField != -1U) &&
2656 "register should be -1 if and only if it's unknown");
2658 Operands.push_back(AArch64Operand::CreateSysReg(
2659 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2660 Parser.Lex(); // Eat identifier
2662 return MatchOperand_Success;
2665 /// tryParseVectorRegister - Parse a vector register operand.
2666 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2667 MCAsmParser &Parser = getParser();
2668 if (Parser.getTok().isNot(AsmToken::Identifier))
2672 // Check for a vector register specifier first.
2674 int64_t Reg = tryMatchVectorRegister(Kind, false);
2678 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2679 // If there was an explicit qualifier, that goes on as a literal text
2683 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2685 // If there is an index specifier following the register, parse that too.
2686 if (Parser.getTok().is(AsmToken::LBrac)) {
2687 SMLoc SIdx = getLoc();
2688 Parser.Lex(); // Eat left bracket token.
2690 const MCExpr *ImmVal;
2691 if (getParser().parseExpression(ImmVal))
2693 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2695 TokError("immediate value expected for vector index");
2700 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2701 Error(E, "']' expected");
2705 Parser.Lex(); // Eat right bracket token.
2707 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2714 /// parseRegister - Parse a non-vector register operand.
2715 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2716 MCAsmParser &Parser = getParser();
2718 // Try for a vector register.
2719 if (!tryParseVectorRegister(Operands))
2722 // Try for a scalar register.
2723 int64_t Reg = tryParseRegister();
2727 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2729 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2730 // as a string token in the instruction itself.
2731 if (getLexer().getKind() == AsmToken::LBrac) {
2732 SMLoc LBracS = getLoc();
2734 const AsmToken &Tok = Parser.getTok();
2735 if (Tok.is(AsmToken::Integer)) {
2736 SMLoc IntS = getLoc();
2737 int64_t Val = Tok.getIntVal();
2740 if (getLexer().getKind() == AsmToken::RBrac) {
2741 SMLoc RBracS = getLoc();
2744 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2746 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2748 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2758 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2759 MCAsmParser &Parser = getParser();
2760 bool HasELFModifier = false;
2761 AArch64MCExpr::VariantKind RefKind;
2763 if (Parser.getTok().is(AsmToken::Colon)) {
2764 Parser.Lex(); // Eat ':"
2765 HasELFModifier = true;
2767 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2768 Error(Parser.getTok().getLoc(),
2769 "expect relocation specifier in operand after ':'");
2773 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2774 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2775 .Case("lo12", AArch64MCExpr::VK_LO12)
2776 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2777 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2778 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2779 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2780 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2781 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2782 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2783 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2784 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2785 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2786 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2787 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2788 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2789 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2790 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2791 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2792 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2793 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2794 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2795 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2796 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2797 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2798 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2799 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2800 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2801 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2802 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2803 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2804 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2805 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2806 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2807 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2808 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2809 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2810 .Default(AArch64MCExpr::VK_INVALID);
2812 if (RefKind == AArch64MCExpr::VK_INVALID) {
2813 Error(Parser.getTok().getLoc(),
2814 "expect relocation specifier in operand after ':'");
2818 Parser.Lex(); // Eat identifier
2820 if (Parser.getTok().isNot(AsmToken::Colon)) {
2821 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2824 Parser.Lex(); // Eat ':'
2827 if (getParser().parseExpression(ImmVal))
2831 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2836 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2837 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2838 MCAsmParser &Parser = getParser();
2839 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2841 Parser.Lex(); // Eat left bracket token.
2843 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2846 int64_t PrevReg = FirstReg;
2849 if (Parser.getTok().is(AsmToken::Minus)) {
2850 Parser.Lex(); // Eat the minus.
2852 SMLoc Loc = getLoc();
2854 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2857 // Any Kind suffices must match on all regs in the list.
2858 if (Kind != NextKind)
2859 return Error(Loc, "mismatched register size suffix");
2861 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2863 if (Space == 0 || Space > 3) {
2864 return Error(Loc, "invalid number of vectors");
2870 while (Parser.getTok().is(AsmToken::Comma)) {
2871 Parser.Lex(); // Eat the comma token.
2873 SMLoc Loc = getLoc();
2875 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2878 // Any Kind suffices must match on all regs in the list.
2879 if (Kind != NextKind)
2880 return Error(Loc, "mismatched register size suffix");
2882 // Registers must be incremental (with wraparound at 31)
2883 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2884 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2885 return Error(Loc, "registers must be sequential");
2892 if (Parser.getTok().isNot(AsmToken::RCurly))
2893 return Error(getLoc(), "'}' expected");
2894 Parser.Lex(); // Eat the '}' token.
2897 return Error(S, "invalid number of vectors");
2899 unsigned NumElements = 0;
2900 char ElementKind = 0;
2902 parseValidVectorKind(Kind, NumElements, ElementKind);
2904 Operands.push_back(AArch64Operand::CreateVectorList(
2905 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2907 // If there is an index specifier following the list, parse that too.
2908 if (Parser.getTok().is(AsmToken::LBrac)) {
2909 SMLoc SIdx = getLoc();
2910 Parser.Lex(); // Eat left bracket token.
2912 const MCExpr *ImmVal;
2913 if (getParser().parseExpression(ImmVal))
2915 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2917 TokError("immediate value expected for vector index");
2922 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2923 Error(E, "']' expected");
2927 Parser.Lex(); // Eat right bracket token.
2929 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2935 AArch64AsmParser::OperandMatchResultTy
2936 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2937 MCAsmParser &Parser = getParser();
2938 const AsmToken &Tok = Parser.getTok();
2939 if (!Tok.is(AsmToken::Identifier))
2940 return MatchOperand_NoMatch;
2942 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2944 MCContext &Ctx = getContext();
2945 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2946 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2947 return MatchOperand_NoMatch;
2950 Parser.Lex(); // Eat register
2952 if (Parser.getTok().isNot(AsmToken::Comma)) {
2954 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2955 return MatchOperand_Success;
2957 Parser.Lex(); // Eat comma.
2959 if (Parser.getTok().is(AsmToken::Hash))
2960 Parser.Lex(); // Eat hash
2962 if (Parser.getTok().isNot(AsmToken::Integer)) {
2963 Error(getLoc(), "index must be absent or #0");
2964 return MatchOperand_ParseFail;
2967 const MCExpr *ImmVal;
2968 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2969 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2970 Error(getLoc(), "index must be absent or #0");
2971 return MatchOperand_ParseFail;
2975 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2976 return MatchOperand_Success;
2979 /// parseOperand - Parse a arm instruction operand. For now this parses the
2980 /// operand regardless of the mnemonic.
2981 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2982 bool invertCondCode) {
2983 MCAsmParser &Parser = getParser();
2984 // Check if the current operand has a custom associated parser, if so, try to
2985 // custom parse the operand, or fallback to the general approach.
2986 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2987 if (ResTy == MatchOperand_Success)
2989 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2990 // there was a match, but an error occurred, in which case, just return that
2991 // the operand parsing failed.
2992 if (ResTy == MatchOperand_ParseFail)
2995 // Nothing custom, so do general case parsing.
2997 switch (getLexer().getKind()) {
3001 if (parseSymbolicImmVal(Expr))
3002 return Error(S, "invalid operand");
3004 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3005 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3008 case AsmToken::LBrac: {
3009 SMLoc Loc = Parser.getTok().getLoc();
3010 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3012 Parser.Lex(); // Eat '['
3014 // There's no comma after a '[', so we can parse the next operand
3016 return parseOperand(Operands, false, false);
3018 case AsmToken::LCurly:
3019 return parseVectorList(Operands);
3020 case AsmToken::Identifier: {
3021 // If we're expecting a Condition Code operand, then just parse that.
3023 return parseCondCode(Operands, invertCondCode);
3025 // If it's a register name, parse it.
3026 if (!parseRegister(Operands))
3029 // This could be an optional "shift" or "extend" operand.
3030 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3031 // We can only continue if no tokens were eaten.
3032 if (GotShift != MatchOperand_NoMatch)
3035 // This was not a register so parse other operands that start with an
3036 // identifier (like labels) as expressions and create them as immediates.
3037 const MCExpr *IdVal;
3039 if (getParser().parseExpression(IdVal))
3042 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3043 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3046 case AsmToken::Integer:
3047 case AsmToken::Real:
3048 case AsmToken::Hash: {
3049 // #42 -> immediate.
3051 if (getLexer().is(AsmToken::Hash))
3054 // Parse a negative sign
3055 bool isNegative = false;
3056 if (Parser.getTok().is(AsmToken::Minus)) {
3058 // We need to consume this token only when we have a Real, otherwise
3059 // we let parseSymbolicImmVal take care of it
3060 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3064 // The only Real that should come through here is a literal #0.0 for
3065 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3066 // so convert the value.
3067 const AsmToken &Tok = Parser.getTok();
3068 if (Tok.is(AsmToken::Real)) {
3069 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3070 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3071 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3072 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3073 Mnemonic != "fcmlt")
3074 return TokError("unexpected floating point literal");
3075 else if (IntVal != 0 || isNegative)
3076 return TokError("expected floating-point constant #0.0");
3077 Parser.Lex(); // Eat the token.
3080 AArch64Operand::CreateToken("#0", false, S, getContext()));
3082 AArch64Operand::CreateToken(".0", false, S, getContext()));
3086 const MCExpr *ImmVal;
3087 if (parseSymbolicImmVal(ImmVal))
3090 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3091 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3094 case AsmToken::Equal: {
3095 SMLoc Loc = Parser.getTok().getLoc();
3096 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3097 return Error(Loc, "unexpected token in operand");
3098 Parser.Lex(); // Eat '='
3099 const MCExpr *SubExprVal;
3100 if (getParser().parseExpression(SubExprVal))
3103 if (Operands.size() < 2 ||
3104 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3108 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3109 Operands[1]->getReg());
3111 MCContext& Ctx = getContext();
3112 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3113 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3114 if (isa<MCConstantExpr>(SubExprVal)) {
3115 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3116 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3117 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3121 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3122 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3123 Operands.push_back(AArch64Operand::CreateImm(
3124 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3126 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3127 ShiftAmt, true, S, E, Ctx));
3130 APInt Simm = APInt(64, Imm << ShiftAmt);
3131 // check if the immediate is an unsigned or signed 32-bit int for W regs
3132 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3133 return Error(Loc, "Immediate too large for register");
3135 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3136 const MCExpr *CPLoc =
3137 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3138 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3144 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3146 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3147 StringRef Name, SMLoc NameLoc,
3148 OperandVector &Operands) {
3149 MCAsmParser &Parser = getParser();
3150 Name = StringSwitch<StringRef>(Name.lower())
3151 .Case("beq", "b.eq")
3152 .Case("bne", "b.ne")
3153 .Case("bhs", "b.hs")
3154 .Case("bcs", "b.cs")
3155 .Case("blo", "b.lo")
3156 .Case("bcc", "b.cc")
3157 .Case("bmi", "b.mi")
3158 .Case("bpl", "b.pl")
3159 .Case("bvs", "b.vs")
3160 .Case("bvc", "b.vc")
3161 .Case("bhi", "b.hi")
3162 .Case("bls", "b.ls")
3163 .Case("bge", "b.ge")
3164 .Case("blt", "b.lt")
3165 .Case("bgt", "b.gt")
3166 .Case("ble", "b.le")
3167 .Case("bal", "b.al")
3168 .Case("bnv", "b.nv")
3171 // First check for the AArch64-specific .req directive.
3172 if (Parser.getTok().is(AsmToken::Identifier) &&
3173 Parser.getTok().getIdentifier() == ".req") {
3174 parseDirectiveReq(Name, NameLoc);
3175 // We always return 'error' for this, as we're done with this
3176 // statement and don't need to match the 'instruction."
3180 // Create the leading tokens for the mnemonic, split by '.' characters.
3181 size_t Start = 0, Next = Name.find('.');
3182 StringRef Head = Name.slice(Start, Next);
3184 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3185 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3186 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3187 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3188 Parser.eatToEndOfStatement();
3193 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3196 // Handle condition codes for a branch mnemonic
3197 if (Head == "b" && Next != StringRef::npos) {
3199 Next = Name.find('.', Start + 1);
3200 Head = Name.slice(Start + 1, Next);
3202 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3203 (Head.data() - Name.data()));
3204 AArch64CC::CondCode CC = parseCondCodeString(Head);
3205 if (CC == AArch64CC::Invalid)
3206 return Error(SuffixLoc, "invalid condition code");
3208 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3210 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3213 // Add the remaining tokens in the mnemonic.
3214 while (Next != StringRef::npos) {
3216 Next = Name.find('.', Start + 1);
3217 Head = Name.slice(Start, Next);
3218 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3219 (Head.data() - Name.data()) + 1);
3221 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3224 // Conditional compare instructions have a Condition Code operand, which needs
3225 // to be parsed and an immediate operand created.
3226 bool condCodeFourthOperand =
3227 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3228 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3229 Head == "csinc" || Head == "csinv" || Head == "csneg");
3231 // These instructions are aliases to some of the conditional select
3232 // instructions. However, the condition code is inverted in the aliased
3235 // FIXME: Is this the correct way to handle these? Or should the parser
3236 // generate the aliased instructions directly?
3237 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3238 bool condCodeThirdOperand =
3239 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3241 // Read the remaining operands.
3242 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3243 // Read the first operand.
3244 if (parseOperand(Operands, false, false)) {
3245 Parser.eatToEndOfStatement();
3250 while (getLexer().is(AsmToken::Comma)) {
3251 Parser.Lex(); // Eat the comma.
3253 // Parse and remember the operand.
3254 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3255 (N == 3 && condCodeThirdOperand) ||
3256 (N == 2 && condCodeSecondOperand),
3257 condCodeSecondOperand || condCodeThirdOperand)) {
3258 Parser.eatToEndOfStatement();
3262 // After successfully parsing some operands there are two special cases to
3263 // consider (i.e. notional operands not separated by commas). Both are due
3264 // to memory specifiers:
3265 // + An RBrac will end an address for load/store/prefetch
3266 // + An '!' will indicate a pre-indexed operation.
3268 // It's someone else's responsibility to make sure these tokens are sane
3269 // in the given context!
3270 if (Parser.getTok().is(AsmToken::RBrac)) {
3271 SMLoc Loc = Parser.getTok().getLoc();
3272 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3277 if (Parser.getTok().is(AsmToken::Exclaim)) {
3278 SMLoc Loc = Parser.getTok().getLoc();
3279 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3288 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3289 SMLoc Loc = Parser.getTok().getLoc();
3290 Parser.eatToEndOfStatement();
3291 return Error(Loc, "unexpected token in argument list");
3294 Parser.Lex(); // Consume the EndOfStatement
3298 // FIXME: This entire function is a giant hack to provide us with decent
3299 // operand range validation/diagnostics until TableGen/MC can be extended
3300 // to support autogeneration of this kind of validation.
3301 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3302 SmallVectorImpl<SMLoc> &Loc) {
3303 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3304 // Check for indexed addressing modes w/ the base register being the
3305 // same as a destination/source register or pair load where
3306 // the Rt == Rt2. All of those are undefined behaviour.
3307 switch (Inst.getOpcode()) {
3308 case AArch64::LDPSWpre:
3309 case AArch64::LDPWpost:
3310 case AArch64::LDPWpre:
3311 case AArch64::LDPXpost:
3312 case AArch64::LDPXpre: {
3313 unsigned Rt = Inst.getOperand(1).getReg();
3314 unsigned Rt2 = Inst.getOperand(2).getReg();
3315 unsigned Rn = Inst.getOperand(3).getReg();
3316 if (RI->isSubRegisterEq(Rn, Rt))
3317 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3318 "is also a destination");
3319 if (RI->isSubRegisterEq(Rn, Rt2))
3320 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3321 "is also a destination");
3324 case AArch64::LDPDi:
3325 case AArch64::LDPQi:
3326 case AArch64::LDPSi:
3327 case AArch64::LDPSWi:
3328 case AArch64::LDPWi:
3329 case AArch64::LDPXi: {
3330 unsigned Rt = Inst.getOperand(0).getReg();
3331 unsigned Rt2 = Inst.getOperand(1).getReg();
3333 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3336 case AArch64::LDPDpost:
3337 case AArch64::LDPDpre:
3338 case AArch64::LDPQpost:
3339 case AArch64::LDPQpre:
3340 case AArch64::LDPSpost:
3341 case AArch64::LDPSpre:
3342 case AArch64::LDPSWpost: {
3343 unsigned Rt = Inst.getOperand(1).getReg();
3344 unsigned Rt2 = Inst.getOperand(2).getReg();
3346 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3349 case AArch64::STPDpost:
3350 case AArch64::STPDpre:
3351 case AArch64::STPQpost:
3352 case AArch64::STPQpre:
3353 case AArch64::STPSpost:
3354 case AArch64::STPSpre:
3355 case AArch64::STPWpost:
3356 case AArch64::STPWpre:
3357 case AArch64::STPXpost:
3358 case AArch64::STPXpre: {
3359 unsigned Rt = Inst.getOperand(1).getReg();
3360 unsigned Rt2 = Inst.getOperand(2).getReg();
3361 unsigned Rn = Inst.getOperand(3).getReg();
3362 if (RI->isSubRegisterEq(Rn, Rt))
3363 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3364 "is also a source");
3365 if (RI->isSubRegisterEq(Rn, Rt2))
3366 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3367 "is also a source");
3370 case AArch64::LDRBBpre:
3371 case AArch64::LDRBpre:
3372 case AArch64::LDRHHpre:
3373 case AArch64::LDRHpre:
3374 case AArch64::LDRSBWpre:
3375 case AArch64::LDRSBXpre:
3376 case AArch64::LDRSHWpre:
3377 case AArch64::LDRSHXpre:
3378 case AArch64::LDRSWpre:
3379 case AArch64::LDRWpre:
3380 case AArch64::LDRXpre:
3381 case AArch64::LDRBBpost:
3382 case AArch64::LDRBpost:
3383 case AArch64::LDRHHpost:
3384 case AArch64::LDRHpost:
3385 case AArch64::LDRSBWpost:
3386 case AArch64::LDRSBXpost:
3387 case AArch64::LDRSHWpost:
3388 case AArch64::LDRSHXpost:
3389 case AArch64::LDRSWpost:
3390 case AArch64::LDRWpost:
3391 case AArch64::LDRXpost: {
3392 unsigned Rt = Inst.getOperand(1).getReg();
3393 unsigned Rn = Inst.getOperand(2).getReg();
3394 if (RI->isSubRegisterEq(Rn, Rt))
3395 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3396 "is also a source");
3399 case AArch64::STRBBpost:
3400 case AArch64::STRBpost:
3401 case AArch64::STRHHpost:
3402 case AArch64::STRHpost:
3403 case AArch64::STRWpost:
3404 case AArch64::STRXpost:
3405 case AArch64::STRBBpre:
3406 case AArch64::STRBpre:
3407 case AArch64::STRHHpre:
3408 case AArch64::STRHpre:
3409 case AArch64::STRWpre:
3410 case AArch64::STRXpre: {
3411 unsigned Rt = Inst.getOperand(1).getReg();
3412 unsigned Rn = Inst.getOperand(2).getReg();
3413 if (RI->isSubRegisterEq(Rn, Rt))
3414 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3415 "is also a source");
3420 // Now check immediate ranges. Separate from the above as there is overlap
3421 // in the instructions being checked and this keeps the nested conditionals
3423 switch (Inst.getOpcode()) {
3424 case AArch64::ADDSWri:
3425 case AArch64::ADDSXri:
3426 case AArch64::ADDWri:
3427 case AArch64::ADDXri:
3428 case AArch64::SUBSWri:
3429 case AArch64::SUBSXri:
3430 case AArch64::SUBWri:
3431 case AArch64::SUBXri: {
3432 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3433 // some slight duplication here.
3434 if (Inst.getOperand(2).isExpr()) {
3435 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3436 AArch64MCExpr::VariantKind ELFRefKind;
3437 MCSymbolRefExpr::VariantKind DarwinRefKind;
3439 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3440 return Error(Loc[2], "invalid immediate expression");
3443 // Only allow these with ADDXri.
3444 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3445 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3446 Inst.getOpcode() == AArch64::ADDXri)
3449 // Only allow these with ADDXri/ADDWri
3450 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3451 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3452 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3453 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3454 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3455 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3456 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3457 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3458 (Inst.getOpcode() == AArch64::ADDXri ||
3459 Inst.getOpcode() == AArch64::ADDWri))
3462 // Don't allow expressions in the immediate field otherwise
3463 return Error(Loc[2], "invalid immediate expression");
3472 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3474 case Match_MissingFeature:
3476 "instruction requires a CPU feature not currently enabled");
3477 case Match_InvalidOperand:
3478 return Error(Loc, "invalid operand for instruction");
3479 case Match_InvalidSuffix:
3480 return Error(Loc, "invalid type suffix for instruction");
3481 case Match_InvalidCondCode:
3482 return Error(Loc, "expected AArch64 condition code");
3483 case Match_AddSubRegExtendSmall:
3485 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3486 case Match_AddSubRegExtendLarge:
3488 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3489 case Match_AddSubSecondSource:
3491 "expected compatible register, symbol or integer in range [0, 4095]");
3492 case Match_LogicalSecondSource:
3493 return Error(Loc, "expected compatible register or logical immediate");
3494 case Match_InvalidMovImm32Shift:
3495 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3496 case Match_InvalidMovImm64Shift:
3497 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3498 case Match_AddSubRegShift32:
3500 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3501 case Match_AddSubRegShift64:
3503 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3504 case Match_InvalidFPImm:
3506 "expected compatible register or floating-point constant");
3507 case Match_InvalidMemoryIndexedSImm9:
3508 return Error(Loc, "index must be an integer in range [-256, 255].");
3509 case Match_InvalidMemoryIndexed4SImm7:
3510 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3511 case Match_InvalidMemoryIndexed8SImm7:
3512 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3513 case Match_InvalidMemoryIndexed16SImm7:
3514 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3515 case Match_InvalidMemoryWExtend8:
3517 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3518 case Match_InvalidMemoryWExtend16:
3520 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3521 case Match_InvalidMemoryWExtend32:
3523 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3524 case Match_InvalidMemoryWExtend64:
3526 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3527 case Match_InvalidMemoryWExtend128:
3529 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3530 case Match_InvalidMemoryXExtend8:
3532 "expected 'lsl' or 'sxtx' with optional shift of #0");
3533 case Match_InvalidMemoryXExtend16:
3535 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3536 case Match_InvalidMemoryXExtend32:
3538 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3539 case Match_InvalidMemoryXExtend64:
3541 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3542 case Match_InvalidMemoryXExtend128:
3544 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3545 case Match_InvalidMemoryIndexed1:
3546 return Error(Loc, "index must be an integer in range [0, 4095].");
3547 case Match_InvalidMemoryIndexed2:
3548 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3549 case Match_InvalidMemoryIndexed4:
3550 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3551 case Match_InvalidMemoryIndexed8:
3552 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3553 case Match_InvalidMemoryIndexed16:
3554 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3555 case Match_InvalidImm0_7:
3556 return Error(Loc, "immediate must be an integer in range [0, 7].");
3557 case Match_InvalidImm0_15:
3558 return Error(Loc, "immediate must be an integer in range [0, 15].");
3559 case Match_InvalidImm0_31:
3560 return Error(Loc, "immediate must be an integer in range [0, 31].");
3561 case Match_InvalidImm0_63:
3562 return Error(Loc, "immediate must be an integer in range [0, 63].");
3563 case Match_InvalidImm0_127:
3564 return Error(Loc, "immediate must be an integer in range [0, 127].");
3565 case Match_InvalidImm0_65535:
3566 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3567 case Match_InvalidImm1_8:
3568 return Error(Loc, "immediate must be an integer in range [1, 8].");
3569 case Match_InvalidImm1_16:
3570 return Error(Loc, "immediate must be an integer in range [1, 16].");
3571 case Match_InvalidImm1_32:
3572 return Error(Loc, "immediate must be an integer in range [1, 32].");
3573 case Match_InvalidImm1_64:
3574 return Error(Loc, "immediate must be an integer in range [1, 64].");
3575 case Match_InvalidIndex1:
3576 return Error(Loc, "expected lane specifier '[1]'");
3577 case Match_InvalidIndexB:
3578 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3579 case Match_InvalidIndexH:
3580 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3581 case Match_InvalidIndexS:
3582 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3583 case Match_InvalidIndexD:
3584 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3585 case Match_InvalidLabel:
3586 return Error(Loc, "expected label or encodable integer pc offset");
3588 return Error(Loc, "expected readable system register");
3590 return Error(Loc, "expected writable system register or pstate");
3591 case Match_MnemonicFail:
3592 return Error(Loc, "unrecognized instruction mnemonic");
3594 llvm_unreachable("unexpected error code!");
3598 static const char *getSubtargetFeatureName(uint64_t Val);
3600 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3601 OperandVector &Operands,
3603 uint64_t &ErrorInfo,
3604 bool MatchingInlineAsm) {
3605 assert(!Operands.empty() && "Unexpect empty operand list!");
3606 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3607 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3609 StringRef Tok = Op.getToken();
3610 unsigned NumOperands = Operands.size();
3612 if (NumOperands == 4 && Tok == "lsl") {
3613 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3614 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3615 if (Op2.isReg() && Op3.isImm()) {
3616 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3618 uint64_t Op3Val = Op3CE->getValue();
3619 uint64_t NewOp3Val = 0;
3620 uint64_t NewOp4Val = 0;
3621 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3623 NewOp3Val = (32 - Op3Val) & 0x1f;
3624 NewOp4Val = 31 - Op3Val;
3626 NewOp3Val = (64 - Op3Val) & 0x3f;
3627 NewOp4Val = 63 - Op3Val;
3630 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3631 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3633 Operands[0] = AArch64Operand::CreateToken(
3634 "ubfm", false, Op.getStartLoc(), getContext());
3635 Operands.push_back(AArch64Operand::CreateImm(
3636 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3637 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3638 Op3.getEndLoc(), getContext());
3641 } else if (NumOperands == 5) {
3642 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3643 // UBFIZ -> UBFM aliases.
3644 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3645 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3646 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3647 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3649 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3650 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3651 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3653 if (Op3CE && Op4CE) {
3654 uint64_t Op3Val = Op3CE->getValue();
3655 uint64_t Op4Val = Op4CE->getValue();
3657 uint64_t RegWidth = 0;
3658 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3664 if (Op3Val >= RegWidth)
3665 return Error(Op3.getStartLoc(),
3666 "expected integer in range [0, 31]");
3667 if (Op4Val < 1 || Op4Val > RegWidth)
3668 return Error(Op4.getStartLoc(),
3669 "expected integer in range [1, 32]");
3671 uint64_t NewOp3Val = 0;
3672 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3674 NewOp3Val = (32 - Op3Val) & 0x1f;
3676 NewOp3Val = (64 - Op3Val) & 0x3f;
3678 uint64_t NewOp4Val = Op4Val - 1;
3680 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3681 return Error(Op4.getStartLoc(),
3682 "requested insert overflows register");
3684 const MCExpr *NewOp3 =
3685 MCConstantExpr::Create(NewOp3Val, getContext());
3686 const MCExpr *NewOp4 =
3687 MCConstantExpr::Create(NewOp4Val, getContext());
3688 Operands[3] = AArch64Operand::CreateImm(
3689 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3690 Operands[4] = AArch64Operand::CreateImm(
3691 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3693 Operands[0] = AArch64Operand::CreateToken(
3694 "bfm", false, Op.getStartLoc(), getContext());
3695 else if (Tok == "sbfiz")
3696 Operands[0] = AArch64Operand::CreateToken(
3697 "sbfm", false, Op.getStartLoc(), getContext());
3698 else if (Tok == "ubfiz")
3699 Operands[0] = AArch64Operand::CreateToken(
3700 "ubfm", false, Op.getStartLoc(), getContext());
3702 llvm_unreachable("No valid mnemonic for alias?");
3706 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3707 // UBFX -> UBFM aliases.
3708 } else if (NumOperands == 5 &&
3709 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3710 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3711 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3712 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3714 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3715 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3716 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3718 if (Op3CE && Op4CE) {
3719 uint64_t Op3Val = Op3CE->getValue();
3720 uint64_t Op4Val = Op4CE->getValue();
3722 uint64_t RegWidth = 0;
3723 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3729 if (Op3Val >= RegWidth)
3730 return Error(Op3.getStartLoc(),
3731 "expected integer in range [0, 31]");
3732 if (Op4Val < 1 || Op4Val > RegWidth)
3733 return Error(Op4.getStartLoc(),
3734 "expected integer in range [1, 32]");
3736 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3738 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3739 return Error(Op4.getStartLoc(),
3740 "requested extract overflows register");
3742 const MCExpr *NewOp4 =
3743 MCConstantExpr::Create(NewOp4Val, getContext());
3744 Operands[4] = AArch64Operand::CreateImm(
3745 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3747 Operands[0] = AArch64Operand::CreateToken(
3748 "bfm", false, Op.getStartLoc(), getContext());
3749 else if (Tok == "sbfx")
3750 Operands[0] = AArch64Operand::CreateToken(
3751 "sbfm", false, Op.getStartLoc(), getContext());
3752 else if (Tok == "ubfx")
3753 Operands[0] = AArch64Operand::CreateToken(
3754 "ubfm", false, Op.getStartLoc(), getContext());
3756 llvm_unreachable("No valid mnemonic for alias?");
3761 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3762 // InstAlias can't quite handle this since the reg classes aren't
3764 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3765 // The source register can be Wn here, but the matcher expects a
3766 // GPR64. Twiddle it here if necessary.
3767 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3769 unsigned Reg = getXRegFromWReg(Op.getReg());
3770 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3771 Op.getEndLoc(), getContext());
3774 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3775 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3776 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3780 // The source register can be Wn here, but the matcher expects a
3781 // GPR64. Twiddle it here if necessary.
3782 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3784 unsigned Reg = getXRegFromWReg(Op.getReg());
3785 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3786 Op.getEndLoc(), getContext());
3790 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3791 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3792 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3794 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3796 // The source register can be Wn here, but the matcher expects a
3797 // GPR32. Twiddle it here if necessary.
3798 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3800 unsigned Reg = getWRegFromXReg(Op.getReg());
3801 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3802 Op.getEndLoc(), getContext());
3807 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3808 if (NumOperands == 3 && Tok == "fmov") {
3809 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3810 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3811 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3813 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3817 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3818 Op.getEndLoc(), getContext());
3823 // First try to match against the secondary set of tables containing the
3824 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3825 unsigned MatchResult =
3826 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3828 // If that fails, try against the alternate table containing long-form NEON:
3829 // "fadd v0.2s, v1.2s, v2.2s"
3830 if (MatchResult != Match_Success)
3832 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3834 switch (MatchResult) {
3835 case Match_Success: {
3836 // Perform range checking and other semantic validations
3837 SmallVector<SMLoc, 8> OperandLocs;
3838 NumOperands = Operands.size();
3839 for (unsigned i = 1; i < NumOperands; ++i)
3840 OperandLocs.push_back(Operands[i]->getStartLoc());
3841 if (validateInstruction(Inst, OperandLocs))
3845 Out.EmitInstruction(Inst, STI);
3848 case Match_MissingFeature: {
3849 assert(ErrorInfo && "Unknown missing feature!");
3850 // Special case the error message for the very common case where only
3851 // a single subtarget feature is missing (neon, e.g.).
3852 std::string Msg = "instruction requires:";
3854 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3855 if (ErrorInfo & Mask) {
3857 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3861 return Error(IDLoc, Msg);
3863 case Match_MnemonicFail:
3864 return showMatchError(IDLoc, MatchResult);
3865 case Match_InvalidOperand: {
3866 SMLoc ErrorLoc = IDLoc;
3867 if (ErrorInfo != ~0ULL) {
3868 if (ErrorInfo >= Operands.size())
3869 return Error(IDLoc, "too few operands for instruction");
3871 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3872 if (ErrorLoc == SMLoc())
3875 // If the match failed on a suffix token operand, tweak the diagnostic
3877 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3878 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3879 MatchResult = Match_InvalidSuffix;
3881 return showMatchError(ErrorLoc, MatchResult);
3883 case Match_InvalidMemoryIndexed1:
3884 case Match_InvalidMemoryIndexed2:
3885 case Match_InvalidMemoryIndexed4:
3886 case Match_InvalidMemoryIndexed8:
3887 case Match_InvalidMemoryIndexed16:
3888 case Match_InvalidCondCode:
3889 case Match_AddSubRegExtendSmall:
3890 case Match_AddSubRegExtendLarge:
3891 case Match_AddSubSecondSource:
3892 case Match_LogicalSecondSource:
3893 case Match_AddSubRegShift32:
3894 case Match_AddSubRegShift64:
3895 case Match_InvalidMovImm32Shift:
3896 case Match_InvalidMovImm64Shift:
3897 case Match_InvalidFPImm:
3898 case Match_InvalidMemoryWExtend8:
3899 case Match_InvalidMemoryWExtend16:
3900 case Match_InvalidMemoryWExtend32:
3901 case Match_InvalidMemoryWExtend64:
3902 case Match_InvalidMemoryWExtend128:
3903 case Match_InvalidMemoryXExtend8:
3904 case Match_InvalidMemoryXExtend16:
3905 case Match_InvalidMemoryXExtend32:
3906 case Match_InvalidMemoryXExtend64:
3907 case Match_InvalidMemoryXExtend128:
3908 case Match_InvalidMemoryIndexed4SImm7:
3909 case Match_InvalidMemoryIndexed8SImm7:
3910 case Match_InvalidMemoryIndexed16SImm7:
3911 case Match_InvalidMemoryIndexedSImm9:
3912 case Match_InvalidImm0_7:
3913 case Match_InvalidImm0_15:
3914 case Match_InvalidImm0_31:
3915 case Match_InvalidImm0_63:
3916 case Match_InvalidImm0_127:
3917 case Match_InvalidImm0_65535:
3918 case Match_InvalidImm1_8:
3919 case Match_InvalidImm1_16:
3920 case Match_InvalidImm1_32:
3921 case Match_InvalidImm1_64:
3922 case Match_InvalidIndex1:
3923 case Match_InvalidIndexB:
3924 case Match_InvalidIndexH:
3925 case Match_InvalidIndexS:
3926 case Match_InvalidIndexD:
3927 case Match_InvalidLabel:
3930 if (ErrorInfo >= Operands.size())
3931 return Error(IDLoc, "too few operands for instruction");
3932 // Any time we get here, there's nothing fancy to do. Just get the
3933 // operand SMLoc and display the diagnostic.
3934 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3935 if (ErrorLoc == SMLoc())
3937 return showMatchError(ErrorLoc, MatchResult);
3941 llvm_unreachable("Implement any new match types added!");
3944 /// ParseDirective parses the arm specific directives
3945 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3946 const MCObjectFileInfo::Environment Format =
3947 getContext().getObjectFileInfo()->getObjectFileType();
3948 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3949 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3951 StringRef IDVal = DirectiveID.getIdentifier();
3952 SMLoc Loc = DirectiveID.getLoc();
3953 if (IDVal == ".hword")
3954 return parseDirectiveWord(2, Loc);
3955 if (IDVal == ".word")
3956 return parseDirectiveWord(4, Loc);
3957 if (IDVal == ".xword")
3958 return parseDirectiveWord(8, Loc);
3959 if (IDVal == ".tlsdesccall")
3960 return parseDirectiveTLSDescCall(Loc);
3961 if (IDVal == ".ltorg" || IDVal == ".pool")
3962 return parseDirectiveLtorg(Loc);
3963 if (IDVal == ".unreq")
3964 return parseDirectiveUnreq(DirectiveID.getLoc());
3966 if (!IsMachO && !IsCOFF) {
3967 if (IDVal == ".inst")
3968 return parseDirectiveInst(Loc);
3971 return parseDirectiveLOH(IDVal, Loc);
3974 /// parseDirectiveWord
3975 /// ::= .word [ expression (, expression)* ]
3976 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3977 MCAsmParser &Parser = getParser();
3978 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3980 const MCExpr *Value;
3981 if (getParser().parseExpression(Value))
3984 getParser().getStreamer().EmitValue(Value, Size);
3986 if (getLexer().is(AsmToken::EndOfStatement))
3989 // FIXME: Improve diagnostic.
3990 if (getLexer().isNot(AsmToken::Comma))
3991 return Error(L, "unexpected token in directive");
4000 /// parseDirectiveInst
4001 /// ::= .inst opcode [, ...]
4002 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4003 MCAsmParser &Parser = getParser();
4004 if (getLexer().is(AsmToken::EndOfStatement)) {
4005 Parser.eatToEndOfStatement();
4006 Error(Loc, "expected expression following directive");
4013 if (getParser().parseExpression(Expr)) {
4014 Error(Loc, "expected expression");
4018 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4020 Error(Loc, "expected constant expression");
4024 getTargetStreamer().emitInst(Value->getValue());
4026 if (getLexer().is(AsmToken::EndOfStatement))
4029 if (getLexer().isNot(AsmToken::Comma)) {
4030 Error(Loc, "unexpected token in directive");
4034 Parser.Lex(); // Eat comma.
4041 // parseDirectiveTLSDescCall:
4042 // ::= .tlsdesccall symbol
4043 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4045 if (getParser().parseIdentifier(Name))
4046 return Error(L, "expected symbol after directive");
4048 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4049 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4050 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4053 Inst.setOpcode(AArch64::TLSDESCCALL);
4054 Inst.addOperand(MCOperand::CreateExpr(Expr));
4056 getParser().getStreamer().EmitInstruction(Inst, STI);
4060 /// ::= .loh <lohName | lohId> label1, ..., labelN
4061 /// The number of arguments depends on the loh identifier.
4062 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4063 if (IDVal != MCLOHDirectiveName())
4066 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4067 if (getParser().getTok().isNot(AsmToken::Integer))
4068 return TokError("expected an identifier or a number in directive");
4069 // We successfully get a numeric value for the identifier.
4070 // Check if it is valid.
4071 int64_t Id = getParser().getTok().getIntVal();
4072 if (Id <= -1U && !isValidMCLOHType(Id))
4073 return TokError("invalid numeric identifier in directive");
4074 Kind = (MCLOHType)Id;
4076 StringRef Name = getTok().getIdentifier();
4077 // We successfully parse an identifier.
4078 // Check if it is a recognized one.
4079 int Id = MCLOHNameToId(Name);
4082 return TokError("invalid identifier in directive");
4083 Kind = (MCLOHType)Id;
4085 // Consume the identifier.
4087 // Get the number of arguments of this LOH.
4088 int NbArgs = MCLOHIdToNbArgs(Kind);
4090 assert(NbArgs != -1 && "Invalid number of arguments");
4092 SmallVector<MCSymbol *, 3> Args;
4093 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4095 if (getParser().parseIdentifier(Name))
4096 return TokError("expected identifier in directive");
4097 Args.push_back(getContext().GetOrCreateSymbol(Name));
4099 if (Idx + 1 == NbArgs)
4101 if (getLexer().isNot(AsmToken::Comma))
4102 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4105 if (getLexer().isNot(AsmToken::EndOfStatement))
4106 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4108 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4112 /// parseDirectiveLtorg
4113 /// ::= .ltorg | .pool
4114 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4115 getTargetStreamer().emitCurrentConstantPool();
4119 /// parseDirectiveReq
4120 /// ::= name .req registername
4121 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4122 MCAsmParser &Parser = getParser();
4123 Parser.Lex(); // Eat the '.req' token.
4124 SMLoc SRegLoc = getLoc();
4125 unsigned RegNum = tryParseRegister();
4126 bool IsVector = false;
4128 if (RegNum == static_cast<unsigned>(-1)) {
4130 RegNum = tryMatchVectorRegister(Kind, false);
4131 if (!Kind.empty()) {
4132 Error(SRegLoc, "vector register without type specifier expected");
4138 if (RegNum == static_cast<unsigned>(-1)) {
4139 Parser.eatToEndOfStatement();
4140 Error(SRegLoc, "register name or alias expected");
4144 // Shouldn't be anything else.
4145 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4146 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4147 Parser.eatToEndOfStatement();
4151 Parser.Lex(); // Consume the EndOfStatement
4153 auto pair = std::make_pair(IsVector, RegNum);
4154 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4155 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4160 /// parseDirectiveUneq
4161 /// ::= .unreq registername
4162 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4163 MCAsmParser &Parser = getParser();
4164 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4165 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4166 Parser.eatToEndOfStatement();
4169 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4170 Parser.Lex(); // Eat the identifier.
4175 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4176 AArch64MCExpr::VariantKind &ELFRefKind,
4177 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4179 ELFRefKind = AArch64MCExpr::VK_INVALID;
4180 DarwinRefKind = MCSymbolRefExpr::VK_None;
4183 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4184 ELFRefKind = AE->getKind();
4185 Expr = AE->getSubExpr();
4188 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4190 // It's a simple symbol reference with no addend.
4191 DarwinRefKind = SE->getKind();
4195 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4199 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4202 DarwinRefKind = SE->getKind();
4204 if (BE->getOpcode() != MCBinaryExpr::Add &&
4205 BE->getOpcode() != MCBinaryExpr::Sub)
4208 // See if the addend is is a constant, otherwise there's more going
4209 // on here than we can deal with.
4210 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4214 Addend = AddendExpr->getValue();
4215 if (BE->getOpcode() == MCBinaryExpr::Sub)
4218 // It's some symbol reference + a constant addend, but really
4219 // shouldn't use both Darwin and ELF syntax.
4220 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4221 DarwinRefKind == MCSymbolRefExpr::VK_None;
4224 /// Force static initialization.
4225 extern "C" void LLVMInitializeAArch64AsmParser() {
4226 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4227 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4228 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4231 #define GET_REGISTER_MATCHER
4232 #define GET_SUBTARGET_FEATURE_NAME
4233 #define GET_MATCHER_IMPLEMENTATION
4234 #include "AArch64GenAsmMatcher.inc"
4236 // Define this matcher function after the auto-generated include so we
4237 // have the match class enum definitions.
4238 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4240 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4241 // If the kind is a token for a literal immediate, check if our asm
4242 // operand matches. This is for InstAliases which have a fixed-value
4243 // immediate in the syntax.
4244 int64_t ExpectedVal;
4247 return Match_InvalidOperand;
4289 return Match_InvalidOperand;
4290 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4292 return Match_InvalidOperand;
4293 if (CE->getValue() == ExpectedVal)
4294 return Match_Success;
4295 return Match_InvalidOperand;