1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
48 // Map of register aliases registers via the .req directive.
49 StringMap<std::pair<bool, unsigned> > RegisterReqs;
51 AArch64TargetStreamer &getTargetStreamer() {
52 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53 return static_cast<AArch64TargetStreamer &>(TS);
56 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
58 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62 int tryParseRegister();
63 int tryMatchVectorRegister(StringRef &Kind, bool expected);
64 bool parseRegister(OperandVector &Operands);
65 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66 bool parseVectorList(OperandVector &Operands);
67 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72 bool showMatchError(SMLoc Loc, unsigned ErrCode);
74 bool parseDirectiveWord(unsigned Size, SMLoc L);
75 bool parseDirectiveInst(SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
113 enum AArch64MatchResultTy {
114 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
118 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
119 const MCInstrInfo &MII, const MCTargetOptions &Options)
120 : MCTargetAsmParser(Options), STI(STI) {
121 MCAsmParserExtension::Initialize(Parser);
122 MCStreamer &S = getParser().getStreamer();
123 if (S.getTargetStreamer() == nullptr)
124 new AArch64TargetStreamer(S);
126 // Initialize the set of available features.
127 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
130 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131 SMLoc NameLoc, OperandVector &Operands) override;
132 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133 bool ParseDirective(AsmToken DirectiveID) override;
134 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135 unsigned Kind) override;
137 static bool classifySymbolRef(const MCExpr *Expr,
138 AArch64MCExpr::VariantKind &ELFRefKind,
139 MCSymbolRefExpr::VariantKind &DarwinRefKind,
142 } // end anonymous namespace
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
148 class AArch64Operand : public MCParsedAsmOperand {
166 SMLoc StartLoc, EndLoc;
171 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
179 struct VectorListOp {
182 unsigned NumElements;
183 unsigned ElementKind;
186 struct VectorIndexOp {
194 struct ShiftedImmOp {
196 unsigned ShiftAmount;
200 AArch64CC::CondCode Code;
204 unsigned Val; // Encoded 8-bit representation.
208 unsigned Val; // Not the enum since not all values have names.
218 uint32_t PStateField;
231 struct ShiftExtendOp {
232 AArch64_AM::ShiftExtendType Type;
234 bool HasExplicitAmount;
244 struct VectorListOp VectorList;
245 struct VectorIndexOp VectorIndex;
247 struct ShiftedImmOp ShiftedImm;
248 struct CondCodeOp CondCode;
249 struct FPImmOp FPImm;
250 struct BarrierOp Barrier;
251 struct SysRegOp SysReg;
252 struct SysCRImmOp SysCRImm;
253 struct PrefetchOp Prefetch;
254 struct ShiftExtendOp ShiftExtend;
257 // Keep the MCContext around as the MCExprs may need manipulated during
258 // the add<>Operands() calls.
262 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
264 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
266 StartLoc = o.StartLoc;
276 ShiftedImm = o.ShiftedImm;
279 CondCode = o.CondCode;
291 VectorList = o.VectorList;
294 VectorIndex = o.VectorIndex;
300 SysCRImm = o.SysCRImm;
303 Prefetch = o.Prefetch;
306 ShiftExtend = o.ShiftExtend;
311 /// getStartLoc - Get the location of the first token of this operand.
312 SMLoc getStartLoc() const override { return StartLoc; }
313 /// getEndLoc - Get the location of the last token of this operand.
314 SMLoc getEndLoc() const override { return EndLoc; }
316 StringRef getToken() const {
317 assert(Kind == k_Token && "Invalid access!");
318 return StringRef(Tok.Data, Tok.Length);
321 bool isTokenSuffix() const {
322 assert(Kind == k_Token && "Invalid access!");
326 const MCExpr *getImm() const {
327 assert(Kind == k_Immediate && "Invalid access!");
331 const MCExpr *getShiftedImmVal() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.Val;
336 unsigned getShiftedImmShift() const {
337 assert(Kind == k_ShiftedImm && "Invalid access!");
338 return ShiftedImm.ShiftAmount;
341 AArch64CC::CondCode getCondCode() const {
342 assert(Kind == k_CondCode && "Invalid access!");
343 return CondCode.Code;
346 unsigned getFPImm() const {
347 assert(Kind == k_FPImm && "Invalid access!");
351 unsigned getBarrier() const {
352 assert(Kind == k_Barrier && "Invalid access!");
356 StringRef getBarrierName() const {
357 assert(Kind == k_Barrier && "Invalid access!");
358 return StringRef(Barrier.Data, Barrier.Length);
361 unsigned getReg() const override {
362 assert(Kind == k_Register && "Invalid access!");
366 unsigned getVectorListStart() const {
367 assert(Kind == k_VectorList && "Invalid access!");
368 return VectorList.RegNum;
371 unsigned getVectorListCount() const {
372 assert(Kind == k_VectorList && "Invalid access!");
373 return VectorList.Count;
376 unsigned getVectorIndex() const {
377 assert(Kind == k_VectorIndex && "Invalid access!");
378 return VectorIndex.Val;
381 StringRef getSysReg() const {
382 assert(Kind == k_SysReg && "Invalid access!");
383 return StringRef(SysReg.Data, SysReg.Length);
386 unsigned getSysCR() const {
387 assert(Kind == k_SysCR && "Invalid access!");
391 unsigned getPrefetch() const {
392 assert(Kind == k_Prefetch && "Invalid access!");
396 StringRef getPrefetchName() const {
397 assert(Kind == k_Prefetch && "Invalid access!");
398 return StringRef(Prefetch.Data, Prefetch.Length);
401 AArch64_AM::ShiftExtendType getShiftExtendType() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.Type;
406 unsigned getShiftExtendAmount() const {
407 assert(Kind == k_ShiftExtend && "Invalid access!");
408 return ShiftExtend.Amount;
411 bool hasShiftExtendAmount() const {
412 assert(Kind == k_ShiftExtend && "Invalid access!");
413 return ShiftExtend.HasExplicitAmount;
416 bool isImm() const override { return Kind == k_Immediate; }
417 bool isMem() const override { return false; }
418 bool isSImm9() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val < 256);
427 bool isSImm7s4() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
436 bool isSImm7s8() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
445 bool isSImm7s16() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
455 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
456 AArch64MCExpr::VariantKind ELFRefKind;
457 MCSymbolRefExpr::VariantKind DarwinRefKind;
459 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
461 // If we don't understand the expression, assume the best and
462 // let the fixup and relocation code deal with it.
466 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
467 ELFRefKind == AArch64MCExpr::VK_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
472 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
474 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
475 // Note that we don't range-check the addend. It's adjusted modulo page
476 // size when converted, so there is no "out of range" condition when using
478 return Addend >= 0 && (Addend % Scale) == 0;
479 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
480 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
481 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
488 template <int Scale> bool isUImm12Offset() const {
492 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
494 return isSymbolicUImm12Offset(getImm(), Scale);
496 int64_t Val = MCE->getValue();
497 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
500 bool isImm0_1() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= 0 && Val < 2);
509 bool isImm0_7() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 8);
518 bool isImm1_8() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val > 0 && Val < 9);
527 bool isImm0_15() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 16);
536 bool isImm1_16() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 int64_t Val = MCE->getValue();
543 return (Val > 0 && Val < 17);
545 bool isImm0_31() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 int64_t Val = MCE->getValue();
552 return (Val >= 0 && Val < 32);
554 bool isImm1_31() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 int64_t Val = MCE->getValue();
561 return (Val >= 1 && Val < 32);
563 bool isImm1_32() const {
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
569 int64_t Val = MCE->getValue();
570 return (Val >= 1 && Val < 33);
572 bool isImm0_63() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
579 return (Val >= 0 && Val < 64);
581 bool isImm1_63() const {
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
587 int64_t Val = MCE->getValue();
588 return (Val >= 1 && Val < 64);
590 bool isImm1_64() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 1 && Val < 65);
599 bool isImm0_127() const {
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 128);
608 bool isImm0_255() const {
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614 int64_t Val = MCE->getValue();
615 return (Val >= 0 && Val < 256);
617 bool isImm0_65535() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 int64_t Val = MCE->getValue();
624 return (Val >= 0 && Val < 65536);
626 bool isImm32_63() const {
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 int64_t Val = MCE->getValue();
633 return (Val >= 32 && Val < 64);
635 bool isLogicalImm32() const {
638 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
641 int64_t Val = MCE->getValue();
642 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
645 return AArch64_AM::isLogicalImmediate(Val, 32);
647 bool isLogicalImm64() const {
650 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
653 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
655 bool isLogicalImm32Not() const {
658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
661 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
662 return AArch64_AM::isLogicalImmediate(Val, 32);
664 bool isLogicalImm64Not() const {
667 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
670 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
672 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
673 bool isAddSubImm() const {
674 if (!isShiftedImm() && !isImm())
679 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
680 if (isShiftedImm()) {
681 unsigned Shift = ShiftedImm.ShiftAmount;
682 Expr = ShiftedImm.Val;
683 if (Shift != 0 && Shift != 12)
689 AArch64MCExpr::VariantKind ELFRefKind;
690 MCSymbolRefExpr::VariantKind DarwinRefKind;
692 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
693 DarwinRefKind, Addend)) {
694 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
695 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
696 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
697 || ELFRefKind == AArch64MCExpr::VK_LO12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
700 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
703 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
704 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
707 // Otherwise it should be a real immediate in range:
708 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
709 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
711 bool isAddSubImmNeg() const {
712 if (!isShiftedImm() && !isImm())
717 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
718 if (isShiftedImm()) {
719 unsigned Shift = ShiftedImm.ShiftAmount;
720 Expr = ShiftedImm.Val;
721 if (Shift != 0 && Shift != 12)
726 // Otherwise it should be a real negative immediate in range:
727 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
728 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
730 bool isCondCode() const { return Kind == k_CondCode; }
731 bool isSIMDImmType10() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
739 bool isBranchTarget26() const {
742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
745 int64_t Val = MCE->getValue();
748 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
750 bool isPCRelLabel19() const {
753 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
756 int64_t Val = MCE->getValue();
759 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
761 bool isBranchTarget14() const {
764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
767 int64_t Val = MCE->getValue();
770 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
774 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
778 AArch64MCExpr::VariantKind ELFRefKind;
779 MCSymbolRefExpr::VariantKind DarwinRefKind;
781 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
782 DarwinRefKind, Addend)) {
785 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
788 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
789 if (ELFRefKind == AllowedModifiers[i])
796 bool isMovZSymbolG3() const {
797 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
800 bool isMovZSymbolG2() const {
801 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
802 AArch64MCExpr::VK_TPREL_G2,
803 AArch64MCExpr::VK_DTPREL_G2});
806 bool isMovZSymbolG1() const {
807 return isMovWSymbol({
808 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
809 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
810 AArch64MCExpr::VK_DTPREL_G1,
814 bool isMovZSymbolG0() const {
815 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
816 AArch64MCExpr::VK_TPREL_G0,
817 AArch64MCExpr::VK_DTPREL_G0});
820 bool isMovKSymbolG3() const {
821 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
824 bool isMovKSymbolG2() const {
825 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
828 bool isMovKSymbolG1() const {
829 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
830 AArch64MCExpr::VK_TPREL_G1_NC,
831 AArch64MCExpr::VK_DTPREL_G1_NC});
834 bool isMovKSymbolG0() const {
836 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
837 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
840 template<int RegWidth, int Shift>
841 bool isMOVZMovAlias() const {
842 if (!isImm()) return false;
844 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845 if (!CE) return false;
846 uint64_t Value = CE->getValue();
849 Value &= 0xffffffffULL;
851 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
852 if (Value == 0 && Shift != 0)
855 return (Value & ~(0xffffULL << Shift)) == 0;
858 template<int RegWidth, int Shift>
859 bool isMOVNMovAlias() const {
860 if (!isImm()) return false;
862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
863 if (!CE) return false;
864 uint64_t Value = CE->getValue();
866 // MOVZ takes precedence over MOVN.
867 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
868 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
873 Value &= 0xffffffffULL;
875 return (Value & ~(0xffffULL << Shift)) == 0;
878 bool isFPImm() const { return Kind == k_FPImm; }
879 bool isBarrier() const { return Kind == k_Barrier; }
880 bool isSysReg() const { return Kind == k_SysReg; }
881 bool isMRSSystemRegister() const {
882 if (!isSysReg()) return false;
884 return SysReg.MRSReg != -1U;
886 bool isMSRSystemRegister() const {
887 if (!isSysReg()) return false;
888 return SysReg.MSRReg != -1U;
890 bool isSystemPStateFieldWithImm0_1() const {
891 if (!isSysReg()) return false;
892 return SysReg.PStateField == AArch64PState::PAN;
894 bool isSystemPStateFieldWithImm0_15() const {
895 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
896 return SysReg.PStateField != -1U;
898 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
899 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
900 bool isVectorRegLo() const {
901 return Kind == k_Register && Reg.isVector &&
902 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
905 bool isGPR32as64() const {
906 return Kind == k_Register && !Reg.isVector &&
907 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
909 bool isWSeqPair() const {
910 return Kind == k_Register && !Reg.isVector &&
911 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
914 bool isXSeqPair() const {
915 return Kind == k_Register && !Reg.isVector &&
916 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
920 bool isGPR64sp0() const {
921 return Kind == k_Register && !Reg.isVector &&
922 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
925 /// Is this a vector list with the type implicit (presumably attached to the
926 /// instruction itself)?
927 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
928 return Kind == k_VectorList && VectorList.Count == NumRegs &&
929 !VectorList.ElementKind;
932 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
933 bool isTypedVectorList() const {
934 if (Kind != k_VectorList)
936 if (VectorList.Count != NumRegs)
938 if (VectorList.ElementKind != ElementKind)
940 return VectorList.NumElements == NumElements;
943 bool isVectorIndex1() const {
944 return Kind == k_VectorIndex && VectorIndex.Val == 1;
946 bool isVectorIndexB() const {
947 return Kind == k_VectorIndex && VectorIndex.Val < 16;
949 bool isVectorIndexH() const {
950 return Kind == k_VectorIndex && VectorIndex.Val < 8;
952 bool isVectorIndexS() const {
953 return Kind == k_VectorIndex && VectorIndex.Val < 4;
955 bool isVectorIndexD() const {
956 return Kind == k_VectorIndex && VectorIndex.Val < 2;
958 bool isToken() const override { return Kind == k_Token; }
959 bool isTokenEqual(StringRef Str) const {
960 return Kind == k_Token && getToken() == Str;
962 bool isSysCR() const { return Kind == k_SysCR; }
963 bool isPrefetch() const { return Kind == k_Prefetch; }
964 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
965 bool isShifter() const {
966 if (!isShiftExtend())
969 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
970 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
971 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
972 ST == AArch64_AM::MSL);
974 bool isExtend() const {
975 if (!isShiftExtend())
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
980 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
981 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
982 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
983 ET == AArch64_AM::LSL) &&
984 getShiftExtendAmount() <= 4;
987 bool isExtend64() const {
990 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
991 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
992 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
994 bool isExtendLSL64() const {
997 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
998 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
999 ET == AArch64_AM::LSL) &&
1000 getShiftExtendAmount() <= 4;
1003 template<int Width> bool isMemXExtend() const {
1006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1007 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1008 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1009 getShiftExtendAmount() == 0);
1012 template<int Width> bool isMemWExtend() const {
1015 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1016 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1017 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1018 getShiftExtendAmount() == 0);
1021 template <unsigned width>
1022 bool isArithmeticShifter() const {
1026 // An arithmetic shifter is LSL, LSR, or ASR.
1027 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1032 template <unsigned width>
1033 bool isLogicalShifter() const {
1037 // A logical shifter is LSL, LSR, ASR or ROR.
1038 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1040 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1041 getShiftExtendAmount() < width;
1044 bool isMovImm32Shifter() const {
1048 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1049 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1050 if (ST != AArch64_AM::LSL)
1052 uint64_t Val = getShiftExtendAmount();
1053 return (Val == 0 || Val == 16);
1056 bool isMovImm64Shifter() const {
1060 // A MOVi shifter is LSL of 0 or 16.
1061 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1062 if (ST != AArch64_AM::LSL)
1064 uint64_t Val = getShiftExtendAmount();
1065 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1068 bool isLogicalVecShifter() const {
1072 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1073 unsigned Shift = getShiftExtendAmount();
1074 return getShiftExtendType() == AArch64_AM::LSL &&
1075 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1078 bool isLogicalVecHalfWordShifter() const {
1079 if (!isLogicalVecShifter())
1082 // A logical vector shifter is a left shift by 0 or 8.
1083 unsigned Shift = getShiftExtendAmount();
1084 return getShiftExtendType() == AArch64_AM::LSL &&
1085 (Shift == 0 || Shift == 8);
1088 bool isMoveVecShifter() const {
1089 if (!isShiftExtend())
1092 // A logical vector shifter is a left shift by 8 or 16.
1093 unsigned Shift = getShiftExtendAmount();
1094 return getShiftExtendType() == AArch64_AM::MSL &&
1095 (Shift == 8 || Shift == 16);
1098 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1099 // to LDUR/STUR when the offset is not legal for the former but is for
1100 // the latter. As such, in addition to checking for being a legal unscaled
1101 // address, also check that it is not a legal scaled address. This avoids
1102 // ambiguity in the matcher.
1104 bool isSImm9OffsetFB() const {
1105 return isSImm9() && !isUImm12Offset<Width / 8>();
1108 bool isAdrpLabel() const {
1109 // Validation was handled during parsing, so we just sanity check that
1110 // something didn't go haywire.
1114 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1115 int64_t Val = CE->getValue();
1116 int64_t Min = - (4096 * (1LL << (21 - 1)));
1117 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1118 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1124 bool isAdrLabel() const {
1125 // Validation was handled during parsing, so we just sanity check that
1126 // something didn't go haywire.
1130 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1131 int64_t Val = CE->getValue();
1132 int64_t Min = - (1LL << (21 - 1));
1133 int64_t Max = ((1LL << (21 - 1)) - 1);
1134 return Val >= Min && Val <= Max;
1140 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1141 // Add as immediates when possible. Null MCExpr = 0.
1143 Inst.addOperand(MCOperand::createImm(0));
1144 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1145 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1147 Inst.addOperand(MCOperand::createExpr(Expr));
1150 void addRegOperands(MCInst &Inst, unsigned N) const {
1151 assert(N == 1 && "Invalid number of operands!");
1152 Inst.addOperand(MCOperand::createReg(getReg()));
1155 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1158 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1160 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1161 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1162 RI->getEncodingValue(getReg()));
1164 Inst.addOperand(MCOperand::createReg(Reg));
1167 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1171 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1174 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1178 Inst.addOperand(MCOperand::createReg(getReg()));
1181 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::createReg(getReg()));
1186 template <unsigned NumRegs>
1187 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 static const unsigned FirstRegs[] = { AArch64::D0,
1192 AArch64::D0_D1_D2_D3 };
1193 unsigned FirstReg = FirstRegs[NumRegs - 1];
1196 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1199 template <unsigned NumRegs>
1200 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 static const unsigned FirstRegs[] = { AArch64::Q0,
1205 AArch64::Q0_Q1_Q2_Q3 };
1206 unsigned FirstReg = FirstRegs[NumRegs - 1];
1209 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1212 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1217 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1222 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1227 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1232 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1233 assert(N == 1 && "Invalid number of operands!");
1234 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1237 void addImmOperands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 // If this is a pageoff symrefexpr with an addend, adjust the addend
1240 // to be only the page-offset portion. Otherwise, just add the expr
1242 addExpr(Inst, getImm());
1245 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1246 assert(N == 2 && "Invalid number of operands!");
1247 if (isShiftedImm()) {
1248 addExpr(Inst, getShiftedImmVal());
1249 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1251 addExpr(Inst, getImm());
1252 Inst.addOperand(MCOperand::createImm(0));
1256 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1257 assert(N == 2 && "Invalid number of operands!");
1259 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1260 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1261 int64_t Val = -CE->getValue();
1262 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1264 Inst.addOperand(MCOperand::createImm(Val));
1265 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1268 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 Inst.addOperand(MCOperand::createImm(getCondCode()));
1273 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 addExpr(Inst, getImm());
1279 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1282 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1283 addImmOperands(Inst, N);
1287 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1288 assert(N == 1 && "Invalid number of operands!");
1289 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createExpr(getImm()));
1295 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1298 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1301 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1304 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1307 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1310 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1313 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1316 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1322 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1328 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1334 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1340 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1346 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349 assert(MCE && "Invalid constant immediate operand!");
1350 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1353 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1354 assert(N == 1 && "Invalid number of operands!");
1355 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1356 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1359 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1362 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1365 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1366 assert(N == 1 && "Invalid number of operands!");
1367 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1368 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1371 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1377 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1378 assert(N == 1 && "Invalid number of operands!");
1379 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1380 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1383 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1384 assert(N == 1 && "Invalid number of operands!");
1385 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1386 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1389 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1392 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1395 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1401 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1402 assert(N == 1 && "Invalid number of operands!");
1403 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1404 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1407 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1408 assert(N == 1 && "Invalid number of operands!");
1409 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1413 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1417 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1418 Inst.addOperand(MCOperand::createImm(encoding));
1421 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1422 assert(N == 1 && "Invalid number of operands!");
1423 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1424 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1425 Inst.addOperand(MCOperand::createImm(encoding));
1428 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1431 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1432 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1433 Inst.addOperand(MCOperand::createImm(encoding));
1436 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1437 assert(N == 1 && "Invalid number of operands!");
1438 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1441 Inst.addOperand(MCOperand::createImm(encoding));
1444 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1447 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1448 Inst.addOperand(MCOperand::createImm(encoding));
1451 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1452 // Branch operands don't encode the low bits, so shift them off
1453 // here. If it's a label, however, just put it on directly as there's
1454 // not enough information now to do anything.
1455 assert(N == 1 && "Invalid number of operands!");
1456 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1458 addExpr(Inst, getImm());
1461 assert(MCE && "Invalid constant immediate operand!");
1462 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1465 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1466 // Branch operands don't encode the low bits, so shift them off
1467 // here. If it's a label, however, just put it on directly as there's
1468 // not enough information now to do anything.
1469 assert(N == 1 && "Invalid number of operands!");
1470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1472 addExpr(Inst, getImm());
1475 assert(MCE && "Invalid constant immediate operand!");
1476 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1479 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1480 // Branch operands don't encode the low bits, so shift them off
1481 // here. If it's a label, however, just put it on directly as there's
1482 // not enough information now to do anything.
1483 assert(N == 1 && "Invalid number of operands!");
1484 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1486 addExpr(Inst, getImm());
1489 assert(MCE && "Invalid constant immediate operand!");
1490 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1493 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1495 Inst.addOperand(MCOperand::createImm(getFPImm()));
1498 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1500 Inst.addOperand(MCOperand::createImm(getBarrier()));
1503 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1504 assert(N == 1 && "Invalid number of operands!");
1506 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1509 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1512 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1515 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1516 assert(N == 1 && "Invalid number of operands!");
1518 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1521 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1524 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1527 void addSysCROperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!");
1529 Inst.addOperand(MCOperand::createImm(getSysCR()));
1532 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!");
1534 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1537 void addShifterOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && "Invalid number of operands!");
1540 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1541 Inst.addOperand(MCOperand::createImm(Imm));
1544 void addExtendOperands(MCInst &Inst, unsigned N) const {
1545 assert(N == 1 && "Invalid number of operands!");
1546 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1547 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1548 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1549 Inst.addOperand(MCOperand::createImm(Imm));
1552 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1553 assert(N == 1 && "Invalid number of operands!");
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1556 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1557 Inst.addOperand(MCOperand::createImm(Imm));
1560 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1561 assert(N == 2 && "Invalid number of operands!");
1562 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1563 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1564 Inst.addOperand(MCOperand::createImm(IsSigned));
1565 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1568 // For 8-bit load/store instructions with a register offset, both the
1569 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1570 // they're disambiguated by whether the shift was explicit or implicit rather
1572 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1573 assert(N == 2 && "Invalid number of operands!");
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1576 Inst.addOperand(MCOperand::createImm(IsSigned));
1577 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1581 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1582 assert(N == 1 && "Invalid number of operands!");
1584 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1585 uint64_t Value = CE->getValue();
1586 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1590 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1591 assert(N == 1 && "Invalid number of operands!");
1593 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1594 uint64_t Value = CE->getValue();
1595 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1598 void print(raw_ostream &OS) const override;
1600 static std::unique_ptr<AArch64Operand>
1601 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1603 Op->Tok.Data = Str.data();
1604 Op->Tok.Length = Str.size();
1605 Op->Tok.IsSuffix = IsSuffix;
1611 static std::unique_ptr<AArch64Operand>
1612 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1614 Op->Reg.RegNum = RegNum;
1615 Op->Reg.isVector = isVector;
1621 static std::unique_ptr<AArch64Operand>
1622 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1623 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1624 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1625 Op->VectorList.RegNum = RegNum;
1626 Op->VectorList.Count = Count;
1627 Op->VectorList.NumElements = NumElements;
1628 Op->VectorList.ElementKind = ElementKind;
1634 static std::unique_ptr<AArch64Operand>
1635 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1636 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1637 Op->VectorIndex.Val = Idx;
1643 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1644 SMLoc E, MCContext &Ctx) {
1645 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1652 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1653 unsigned ShiftAmount,
1656 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1657 Op->ShiftedImm .Val = Val;
1658 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1664 static std::unique_ptr<AArch64Operand>
1665 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1667 Op->CondCode.Code = Code;
1673 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1675 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1676 Op->FPImm.Val = Val;
1682 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1686 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1687 Op->Barrier.Val = Val;
1688 Op->Barrier.Data = Str.data();
1689 Op->Barrier.Length = Str.size();
1695 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1698 uint32_t PStateField,
1700 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1701 Op->SysReg.Data = Str.data();
1702 Op->SysReg.Length = Str.size();
1703 Op->SysReg.MRSReg = MRSReg;
1704 Op->SysReg.MSRReg = MSRReg;
1705 Op->SysReg.PStateField = PStateField;
1711 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1712 SMLoc E, MCContext &Ctx) {
1713 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1714 Op->SysCRImm.Val = Val;
1720 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1724 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1725 Op->Prefetch.Val = Val;
1726 Op->Barrier.Data = Str.data();
1727 Op->Barrier.Length = Str.size();
1733 static std::unique_ptr<AArch64Operand>
1734 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1735 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1736 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1737 Op->ShiftExtend.Type = ShOp;
1738 Op->ShiftExtend.Amount = Val;
1739 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1746 } // end anonymous namespace.
1748 void AArch64Operand::print(raw_ostream &OS) const {
1751 OS << "<fpimm " << getFPImm() << "("
1752 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1755 StringRef Name = getBarrierName();
1757 OS << "<barrier " << Name << ">";
1759 OS << "<barrier invalid #" << getBarrier() << ">";
1765 case k_ShiftedImm: {
1766 unsigned Shift = getShiftedImmShift();
1767 OS << "<shiftedimm ";
1768 OS << *getShiftedImmVal();
1769 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1773 OS << "<condcode " << getCondCode() << ">";
1776 OS << "<register " << getReg() << ">";
1778 case k_VectorList: {
1779 OS << "<vectorlist ";
1780 unsigned Reg = getVectorListStart();
1781 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1782 OS << Reg + i << " ";
1787 OS << "<vectorindex " << getVectorIndex() << ">";
1790 OS << "<sysreg: " << getSysReg() << '>';
1793 OS << "'" << getToken() << "'";
1796 OS << "c" << getSysCR();
1799 StringRef Name = getPrefetchName();
1801 OS << "<prfop " << Name << ">";
1803 OS << "<prfop invalid #" << getPrefetch() << ">";
1806 case k_ShiftExtend: {
1807 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1808 << getShiftExtendAmount();
1809 if (!hasShiftExtendAmount())
1817 /// @name Auto-generated Match Functions
1820 static unsigned MatchRegisterName(StringRef Name);
1824 static unsigned matchVectorRegName(StringRef Name) {
1825 return StringSwitch<unsigned>(Name.lower())
1826 .Case("v0", AArch64::Q0)
1827 .Case("v1", AArch64::Q1)
1828 .Case("v2", AArch64::Q2)
1829 .Case("v3", AArch64::Q3)
1830 .Case("v4", AArch64::Q4)
1831 .Case("v5", AArch64::Q5)
1832 .Case("v6", AArch64::Q6)
1833 .Case("v7", AArch64::Q7)
1834 .Case("v8", AArch64::Q8)
1835 .Case("v9", AArch64::Q9)
1836 .Case("v10", AArch64::Q10)
1837 .Case("v11", AArch64::Q11)
1838 .Case("v12", AArch64::Q12)
1839 .Case("v13", AArch64::Q13)
1840 .Case("v14", AArch64::Q14)
1841 .Case("v15", AArch64::Q15)
1842 .Case("v16", AArch64::Q16)
1843 .Case("v17", AArch64::Q17)
1844 .Case("v18", AArch64::Q18)
1845 .Case("v19", AArch64::Q19)
1846 .Case("v20", AArch64::Q20)
1847 .Case("v21", AArch64::Q21)
1848 .Case("v22", AArch64::Q22)
1849 .Case("v23", AArch64::Q23)
1850 .Case("v24", AArch64::Q24)
1851 .Case("v25", AArch64::Q25)
1852 .Case("v26", AArch64::Q26)
1853 .Case("v27", AArch64::Q27)
1854 .Case("v28", AArch64::Q28)
1855 .Case("v29", AArch64::Q29)
1856 .Case("v30", AArch64::Q30)
1857 .Case("v31", AArch64::Q31)
1861 static bool isValidVectorKind(StringRef Name) {
1862 return StringSwitch<bool>(Name.lower())
1872 // Accept the width neutral ones, too, for verbose syntax. If those
1873 // aren't used in the right places, the token operand won't match so
1874 // all will work out.
1882 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1883 char &ElementKind) {
1884 assert(isValidVectorKind(Name));
1886 ElementKind = Name.lower()[Name.size() - 1];
1889 if (Name.size() == 2)
1892 // Parse the lane count
1893 Name = Name.drop_front();
1894 while (isdigit(Name.front())) {
1895 NumElements = 10 * NumElements + (Name.front() - '0');
1896 Name = Name.drop_front();
1900 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1902 StartLoc = getLoc();
1903 RegNo = tryParseRegister();
1904 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1905 return (RegNo == (unsigned)-1);
1908 // Matches a register name or register alias previously defined by '.req'
1909 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1911 unsigned RegNum = isVector ? matchVectorRegName(Name)
1912 : MatchRegisterName(Name);
1915 // Check for aliases registered via .req. Canonicalize to lower case.
1916 // That's more consistent since register names are case insensitive, and
1917 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1918 auto Entry = RegisterReqs.find(Name.lower());
1919 if (Entry == RegisterReqs.end())
1921 // set RegNum if the match is the right kind of register
1922 if (isVector == Entry->getValue().first)
1923 RegNum = Entry->getValue().second;
1928 /// tryParseRegister - Try to parse a register name. The token must be an
1929 /// Identifier when called, and if it is a register name the token is eaten and
1930 /// the register is added to the operand list.
1931 int AArch64AsmParser::tryParseRegister() {
1932 MCAsmParser &Parser = getParser();
1933 const AsmToken &Tok = Parser.getTok();
1934 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1936 std::string lowerCase = Tok.getString().lower();
1937 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1938 // Also handle a few aliases of registers.
1940 RegNum = StringSwitch<unsigned>(lowerCase)
1941 .Case("fp", AArch64::FP)
1942 .Case("lr", AArch64::LR)
1943 .Case("x31", AArch64::XZR)
1944 .Case("w31", AArch64::WZR)
1950 Parser.Lex(); // Eat identifier token.
1954 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1955 /// kind specifier. If it is a register specifier, eat the token and return it.
1956 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1957 MCAsmParser &Parser = getParser();
1958 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1959 TokError("vector register expected");
1963 StringRef Name = Parser.getTok().getString();
1964 // If there is a kind specifier, it's separated from the register name by
1966 size_t Start = 0, Next = Name.find('.');
1967 StringRef Head = Name.slice(Start, Next);
1968 unsigned RegNum = matchRegisterNameAlias(Head, true);
1971 if (Next != StringRef::npos) {
1972 Kind = Name.slice(Next, StringRef::npos);
1973 if (!isValidVectorKind(Kind)) {
1974 TokError("invalid vector kind qualifier");
1978 Parser.Lex(); // Eat the register token.
1983 TokError("vector register expected");
1987 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1988 AArch64AsmParser::OperandMatchResultTy
1989 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1990 MCAsmParser &Parser = getParser();
1993 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1994 Error(S, "Expected cN operand where 0 <= N <= 15");
1995 return MatchOperand_ParseFail;
1998 StringRef Tok = Parser.getTok().getIdentifier();
1999 if (Tok[0] != 'c' && Tok[0] != 'C') {
2000 Error(S, "Expected cN operand where 0 <= N <= 15");
2001 return MatchOperand_ParseFail;
2005 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2006 if (BadNum || CRNum > 15) {
2007 Error(S, "Expected cN operand where 0 <= N <= 15");
2008 return MatchOperand_ParseFail;
2011 Parser.Lex(); // Eat identifier token.
2013 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2014 return MatchOperand_Success;
2017 /// tryParsePrefetch - Try to parse a prefetch operand.
2018 AArch64AsmParser::OperandMatchResultTy
2019 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2020 MCAsmParser &Parser = getParser();
2022 const AsmToken &Tok = Parser.getTok();
2023 // Either an identifier for named values or a 5-bit immediate.
2024 bool Hash = Tok.is(AsmToken::Hash);
2025 if (Hash || Tok.is(AsmToken::Integer)) {
2027 Parser.Lex(); // Eat hash token.
2028 const MCExpr *ImmVal;
2029 if (getParser().parseExpression(ImmVal))
2030 return MatchOperand_ParseFail;
2032 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2034 TokError("immediate value expected for prefetch operand");
2035 return MatchOperand_ParseFail;
2037 unsigned prfop = MCE->getValue();
2039 TokError("prefetch operand out of range, [0,31] expected");
2040 return MatchOperand_ParseFail;
2044 auto Mapper = AArch64PRFM::PRFMMapper();
2046 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2047 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2049 return MatchOperand_Success;
2052 if (Tok.isNot(AsmToken::Identifier)) {
2053 TokError("pre-fetch hint expected");
2054 return MatchOperand_ParseFail;
2058 auto Mapper = AArch64PRFM::PRFMMapper();
2060 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2062 TokError("pre-fetch hint expected");
2063 return MatchOperand_ParseFail;
2066 Parser.Lex(); // Eat identifier token.
2067 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2069 return MatchOperand_Success;
2072 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2074 AArch64AsmParser::OperandMatchResultTy
2075 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2076 MCAsmParser &Parser = getParser();
2080 if (Parser.getTok().is(AsmToken::Hash)) {
2081 Parser.Lex(); // Eat hash token.
2084 if (parseSymbolicImmVal(Expr))
2085 return MatchOperand_ParseFail;
2087 AArch64MCExpr::VariantKind ELFRefKind;
2088 MCSymbolRefExpr::VariantKind DarwinRefKind;
2090 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2091 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2092 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2093 // No modifier was specified at all; this is the syntax for an ELF basic
2094 // ADRP relocation (unfortunately).
2096 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2097 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2098 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2100 Error(S, "gotpage label reference not allowed an addend");
2101 return MatchOperand_ParseFail;
2102 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2103 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2104 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2105 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2106 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2107 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2108 // The operand must be an @page or @gotpage qualified symbolref.
2109 Error(S, "page or gotpage label reference expected");
2110 return MatchOperand_ParseFail;
2114 // We have either a label reference possibly with addend or an immediate. The
2115 // addend is a raw value here. The linker will adjust it to only reference the
2117 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2118 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2120 return MatchOperand_Success;
2123 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2125 AArch64AsmParser::OperandMatchResultTy
2126 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2127 MCAsmParser &Parser = getParser();
2131 if (Parser.getTok().is(AsmToken::Hash)) {
2132 Parser.Lex(); // Eat hash token.
2135 if (getParser().parseExpression(Expr))
2136 return MatchOperand_ParseFail;
2138 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2139 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2141 return MatchOperand_Success;
2144 /// tryParseFPImm - A floating point immediate expression operand.
2145 AArch64AsmParser::OperandMatchResultTy
2146 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2147 MCAsmParser &Parser = getParser();
2151 if (Parser.getTok().is(AsmToken::Hash)) {
2152 Parser.Lex(); // Eat '#'
2156 // Handle negation, as that still comes through as a separate token.
2157 bool isNegative = false;
2158 if (Parser.getTok().is(AsmToken::Minus)) {
2162 const AsmToken &Tok = Parser.getTok();
2163 if (Tok.is(AsmToken::Real)) {
2164 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2166 RealVal.changeSign();
2168 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2169 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2170 Parser.Lex(); // Eat the token.
2171 // Check for out of range values. As an exception, we let Zero through,
2172 // as we handle that special case in post-processing before matching in
2173 // order to use the zero register for it.
2174 if (Val == -1 && !RealVal.isPosZero()) {
2175 TokError("expected compatible register or floating-point constant");
2176 return MatchOperand_ParseFail;
2178 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2179 return MatchOperand_Success;
2181 if (Tok.is(AsmToken::Integer)) {
2183 if (!isNegative && Tok.getString().startswith("0x")) {
2184 Val = Tok.getIntVal();
2185 if (Val > 255 || Val < 0) {
2186 TokError("encoded floating point value out of range");
2187 return MatchOperand_ParseFail;
2190 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2191 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2192 // If we had a '-' in front, toggle the sign bit.
2193 IntVal ^= (uint64_t)isNegative << 63;
2194 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2196 Parser.Lex(); // Eat the token.
2197 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2198 return MatchOperand_Success;
2202 return MatchOperand_NoMatch;
2204 TokError("invalid floating point immediate");
2205 return MatchOperand_ParseFail;
2208 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2209 AArch64AsmParser::OperandMatchResultTy
2210 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2211 MCAsmParser &Parser = getParser();
2214 if (Parser.getTok().is(AsmToken::Hash))
2215 Parser.Lex(); // Eat '#'
2216 else if (Parser.getTok().isNot(AsmToken::Integer))
2217 // Operand should start from # or should be integer, emit error otherwise.
2218 return MatchOperand_NoMatch;
2221 if (parseSymbolicImmVal(Imm))
2222 return MatchOperand_ParseFail;
2223 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2224 uint64_t ShiftAmount = 0;
2225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2227 int64_t Val = MCE->getValue();
2228 if (Val > 0xfff && (Val & 0xfff) == 0) {
2229 Imm = MCConstantExpr::create(Val >> 12, getContext());
2233 SMLoc E = Parser.getTok().getLoc();
2234 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2236 return MatchOperand_Success;
2242 // The optional operand must be "lsl #N" where N is non-negative.
2243 if (!Parser.getTok().is(AsmToken::Identifier) ||
2244 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2245 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2246 return MatchOperand_ParseFail;
2252 if (Parser.getTok().is(AsmToken::Hash)) {
2256 if (Parser.getTok().isNot(AsmToken::Integer)) {
2257 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2258 return MatchOperand_ParseFail;
2261 int64_t ShiftAmount = Parser.getTok().getIntVal();
2263 if (ShiftAmount < 0) {
2264 Error(Parser.getTok().getLoc(), "positive shift amount required");
2265 return MatchOperand_ParseFail;
2267 Parser.Lex(); // Eat the number
2269 SMLoc E = Parser.getTok().getLoc();
2270 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2271 S, E, getContext()));
2272 return MatchOperand_Success;
2275 /// parseCondCodeString - Parse a Condition Code string.
2276 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2277 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2278 .Case("eq", AArch64CC::EQ)
2279 .Case("ne", AArch64CC::NE)
2280 .Case("cs", AArch64CC::HS)
2281 .Case("hs", AArch64CC::HS)
2282 .Case("cc", AArch64CC::LO)
2283 .Case("lo", AArch64CC::LO)
2284 .Case("mi", AArch64CC::MI)
2285 .Case("pl", AArch64CC::PL)
2286 .Case("vs", AArch64CC::VS)
2287 .Case("vc", AArch64CC::VC)
2288 .Case("hi", AArch64CC::HI)
2289 .Case("ls", AArch64CC::LS)
2290 .Case("ge", AArch64CC::GE)
2291 .Case("lt", AArch64CC::LT)
2292 .Case("gt", AArch64CC::GT)
2293 .Case("le", AArch64CC::LE)
2294 .Case("al", AArch64CC::AL)
2295 .Case("nv", AArch64CC::NV)
2296 .Default(AArch64CC::Invalid);
2300 /// parseCondCode - Parse a Condition Code operand.
2301 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2302 bool invertCondCode) {
2303 MCAsmParser &Parser = getParser();
2305 const AsmToken &Tok = Parser.getTok();
2306 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2308 StringRef Cond = Tok.getString();
2309 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2310 if (CC == AArch64CC::Invalid)
2311 return TokError("invalid condition code");
2312 Parser.Lex(); // Eat identifier token.
2314 if (invertCondCode) {
2315 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2316 return TokError("condition codes AL and NV are invalid for this instruction");
2317 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2321 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2325 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2326 /// them if present.
2327 AArch64AsmParser::OperandMatchResultTy
2328 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2329 MCAsmParser &Parser = getParser();
2330 const AsmToken &Tok = Parser.getTok();
2331 std::string LowerID = Tok.getString().lower();
2332 AArch64_AM::ShiftExtendType ShOp =
2333 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2334 .Case("lsl", AArch64_AM::LSL)
2335 .Case("lsr", AArch64_AM::LSR)
2336 .Case("asr", AArch64_AM::ASR)
2337 .Case("ror", AArch64_AM::ROR)
2338 .Case("msl", AArch64_AM::MSL)
2339 .Case("uxtb", AArch64_AM::UXTB)
2340 .Case("uxth", AArch64_AM::UXTH)
2341 .Case("uxtw", AArch64_AM::UXTW)
2342 .Case("uxtx", AArch64_AM::UXTX)
2343 .Case("sxtb", AArch64_AM::SXTB)
2344 .Case("sxth", AArch64_AM::SXTH)
2345 .Case("sxtw", AArch64_AM::SXTW)
2346 .Case("sxtx", AArch64_AM::SXTX)
2347 .Default(AArch64_AM::InvalidShiftExtend);
2349 if (ShOp == AArch64_AM::InvalidShiftExtend)
2350 return MatchOperand_NoMatch;
2352 SMLoc S = Tok.getLoc();
2355 bool Hash = getLexer().is(AsmToken::Hash);
2356 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2357 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2358 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2359 ShOp == AArch64_AM::MSL) {
2360 // We expect a number here.
2361 TokError("expected #imm after shift specifier");
2362 return MatchOperand_ParseFail;
2365 // "extend" type operatoins don't need an immediate, #0 is implicit.
2366 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2368 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2369 return MatchOperand_Success;
2373 Parser.Lex(); // Eat the '#'.
2375 // Make sure we do actually have a number or a parenthesized expression.
2376 SMLoc E = Parser.getTok().getLoc();
2377 if (!Parser.getTok().is(AsmToken::Integer) &&
2378 !Parser.getTok().is(AsmToken::LParen)) {
2379 Error(E, "expected integer shift amount");
2380 return MatchOperand_ParseFail;
2383 const MCExpr *ImmVal;
2384 if (getParser().parseExpression(ImmVal))
2385 return MatchOperand_ParseFail;
2387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2389 Error(E, "expected constant '#imm' after shift specifier");
2390 return MatchOperand_ParseFail;
2393 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2394 Operands.push_back(AArch64Operand::CreateShiftExtend(
2395 ShOp, MCE->getValue(), true, S, E, getContext()));
2396 return MatchOperand_Success;
2399 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2400 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2401 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2402 OperandVector &Operands) {
2403 if (Name.find('.') != StringRef::npos)
2404 return TokError("invalid operand");
2408 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2410 MCAsmParser &Parser = getParser();
2411 const AsmToken &Tok = Parser.getTok();
2412 StringRef Op = Tok.getString();
2413 SMLoc S = Tok.getLoc();
2415 const MCExpr *Expr = nullptr;
2417 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2419 Expr = MCConstantExpr::create(op1, getContext()); \
2420 Operands.push_back( \
2421 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2422 Operands.push_back( \
2423 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2424 Operands.push_back( \
2425 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2426 Expr = MCConstantExpr::create(op2, getContext()); \
2427 Operands.push_back( \
2428 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2431 if (Mnemonic == "ic") {
2432 if (!Op.compare_lower("ialluis")) {
2433 // SYS #0, C7, C1, #0
2434 SYS_ALIAS(0, 7, 1, 0);
2435 } else if (!Op.compare_lower("iallu")) {
2436 // SYS #0, C7, C5, #0
2437 SYS_ALIAS(0, 7, 5, 0);
2438 } else if (!Op.compare_lower("ivau")) {
2439 // SYS #3, C7, C5, #1
2440 SYS_ALIAS(3, 7, 5, 1);
2442 return TokError("invalid operand for IC instruction");
2444 } else if (Mnemonic == "dc") {
2445 if (!Op.compare_lower("zva")) {
2446 // SYS #3, C7, C4, #1
2447 SYS_ALIAS(3, 7, 4, 1);
2448 } else if (!Op.compare_lower("ivac")) {
2449 // SYS #3, C7, C6, #1
2450 SYS_ALIAS(0, 7, 6, 1);
2451 } else if (!Op.compare_lower("isw")) {
2452 // SYS #0, C7, C6, #2
2453 SYS_ALIAS(0, 7, 6, 2);
2454 } else if (!Op.compare_lower("cvac")) {
2455 // SYS #3, C7, C10, #1
2456 SYS_ALIAS(3, 7, 10, 1);
2457 } else if (!Op.compare_lower("csw")) {
2458 // SYS #0, C7, C10, #2
2459 SYS_ALIAS(0, 7, 10, 2);
2460 } else if (!Op.compare_lower("cvau")) {
2461 // SYS #3, C7, C11, #1
2462 SYS_ALIAS(3, 7, 11, 1);
2463 } else if (!Op.compare_lower("civac")) {
2464 // SYS #3, C7, C14, #1
2465 SYS_ALIAS(3, 7, 14, 1);
2466 } else if (!Op.compare_lower("cisw")) {
2467 // SYS #0, C7, C14, #2
2468 SYS_ALIAS(0, 7, 14, 2);
2470 return TokError("invalid operand for DC instruction");
2472 } else if (Mnemonic == "at") {
2473 if (!Op.compare_lower("s1e1r")) {
2474 // SYS #0, C7, C8, #0
2475 SYS_ALIAS(0, 7, 8, 0);
2476 } else if (!Op.compare_lower("s1e2r")) {
2477 // SYS #4, C7, C8, #0
2478 SYS_ALIAS(4, 7, 8, 0);
2479 } else if (!Op.compare_lower("s1e3r")) {
2480 // SYS #6, C7, C8, #0
2481 SYS_ALIAS(6, 7, 8, 0);
2482 } else if (!Op.compare_lower("s1e1w")) {
2483 // SYS #0, C7, C8, #1
2484 SYS_ALIAS(0, 7, 8, 1);
2485 } else if (!Op.compare_lower("s1e2w")) {
2486 // SYS #4, C7, C8, #1
2487 SYS_ALIAS(4, 7, 8, 1);
2488 } else if (!Op.compare_lower("s1e3w")) {
2489 // SYS #6, C7, C8, #1
2490 SYS_ALIAS(6, 7, 8, 1);
2491 } else if (!Op.compare_lower("s1e0r")) {
2492 // SYS #0, C7, C8, #3
2493 SYS_ALIAS(0, 7, 8, 2);
2494 } else if (!Op.compare_lower("s1e0w")) {
2495 // SYS #0, C7, C8, #3
2496 SYS_ALIAS(0, 7, 8, 3);
2497 } else if (!Op.compare_lower("s12e1r")) {
2498 // SYS #4, C7, C8, #4
2499 SYS_ALIAS(4, 7, 8, 4);
2500 } else if (!Op.compare_lower("s12e1w")) {
2501 // SYS #4, C7, C8, #5
2502 SYS_ALIAS(4, 7, 8, 5);
2503 } else if (!Op.compare_lower("s12e0r")) {
2504 // SYS #4, C7, C8, #6
2505 SYS_ALIAS(4, 7, 8, 6);
2506 } else if (!Op.compare_lower("s12e0w")) {
2507 // SYS #4, C7, C8, #7
2508 SYS_ALIAS(4, 7, 8, 7);
2510 return TokError("invalid operand for AT instruction");
2512 } else if (Mnemonic == "tlbi") {
2513 if (!Op.compare_lower("vmalle1is")) {
2514 // SYS #0, C8, C3, #0
2515 SYS_ALIAS(0, 8, 3, 0);
2516 } else if (!Op.compare_lower("alle2is")) {
2517 // SYS #4, C8, C3, #0
2518 SYS_ALIAS(4, 8, 3, 0);
2519 } else if (!Op.compare_lower("alle3is")) {
2520 // SYS #6, C8, C3, #0
2521 SYS_ALIAS(6, 8, 3, 0);
2522 } else if (!Op.compare_lower("vae1is")) {
2523 // SYS #0, C8, C3, #1
2524 SYS_ALIAS(0, 8, 3, 1);
2525 } else if (!Op.compare_lower("vae2is")) {
2526 // SYS #4, C8, C3, #1
2527 SYS_ALIAS(4, 8, 3, 1);
2528 } else if (!Op.compare_lower("vae3is")) {
2529 // SYS #6, C8, C3, #1
2530 SYS_ALIAS(6, 8, 3, 1);
2531 } else if (!Op.compare_lower("aside1is")) {
2532 // SYS #0, C8, C3, #2
2533 SYS_ALIAS(0, 8, 3, 2);
2534 } else if (!Op.compare_lower("vaae1is")) {
2535 // SYS #0, C8, C3, #3
2536 SYS_ALIAS(0, 8, 3, 3);
2537 } else if (!Op.compare_lower("alle1is")) {
2538 // SYS #4, C8, C3, #4
2539 SYS_ALIAS(4, 8, 3, 4);
2540 } else if (!Op.compare_lower("vale1is")) {
2541 // SYS #0, C8, C3, #5
2542 SYS_ALIAS(0, 8, 3, 5);
2543 } else if (!Op.compare_lower("vaale1is")) {
2544 // SYS #0, C8, C3, #7
2545 SYS_ALIAS(0, 8, 3, 7);
2546 } else if (!Op.compare_lower("vmalle1")) {
2547 // SYS #0, C8, C7, #0
2548 SYS_ALIAS(0, 8, 7, 0);
2549 } else if (!Op.compare_lower("alle2")) {
2550 // SYS #4, C8, C7, #0
2551 SYS_ALIAS(4, 8, 7, 0);
2552 } else if (!Op.compare_lower("vale2is")) {
2553 // SYS #4, C8, C3, #5
2554 SYS_ALIAS(4, 8, 3, 5);
2555 } else if (!Op.compare_lower("vale3is")) {
2556 // SYS #6, C8, C3, #5
2557 SYS_ALIAS(6, 8, 3, 5);
2558 } else if (!Op.compare_lower("alle3")) {
2559 // SYS #6, C8, C7, #0
2560 SYS_ALIAS(6, 8, 7, 0);
2561 } else if (!Op.compare_lower("vae1")) {
2562 // SYS #0, C8, C7, #1
2563 SYS_ALIAS(0, 8, 7, 1);
2564 } else if (!Op.compare_lower("vae2")) {
2565 // SYS #4, C8, C7, #1
2566 SYS_ALIAS(4, 8, 7, 1);
2567 } else if (!Op.compare_lower("vae3")) {
2568 // SYS #6, C8, C7, #1
2569 SYS_ALIAS(6, 8, 7, 1);
2570 } else if (!Op.compare_lower("aside1")) {
2571 // SYS #0, C8, C7, #2
2572 SYS_ALIAS(0, 8, 7, 2);
2573 } else if (!Op.compare_lower("vaae1")) {
2574 // SYS #0, C8, C7, #3
2575 SYS_ALIAS(0, 8, 7, 3);
2576 } else if (!Op.compare_lower("alle1")) {
2577 // SYS #4, C8, C7, #4
2578 SYS_ALIAS(4, 8, 7, 4);
2579 } else if (!Op.compare_lower("vale1")) {
2580 // SYS #0, C8, C7, #5
2581 SYS_ALIAS(0, 8, 7, 5);
2582 } else if (!Op.compare_lower("vale2")) {
2583 // SYS #4, C8, C7, #5
2584 SYS_ALIAS(4, 8, 7, 5);
2585 } else if (!Op.compare_lower("vale3")) {
2586 // SYS #6, C8, C7, #5
2587 SYS_ALIAS(6, 8, 7, 5);
2588 } else if (!Op.compare_lower("vaale1")) {
2589 // SYS #0, C8, C7, #7
2590 SYS_ALIAS(0, 8, 7, 7);
2591 } else if (!Op.compare_lower("ipas2e1")) {
2592 // SYS #4, C8, C4, #1
2593 SYS_ALIAS(4, 8, 4, 1);
2594 } else if (!Op.compare_lower("ipas2le1")) {
2595 // SYS #4, C8, C4, #5
2596 SYS_ALIAS(4, 8, 4, 5);
2597 } else if (!Op.compare_lower("ipas2e1is")) {
2598 // SYS #4, C8, C4, #1
2599 SYS_ALIAS(4, 8, 0, 1);
2600 } else if (!Op.compare_lower("ipas2le1is")) {
2601 // SYS #4, C8, C4, #5
2602 SYS_ALIAS(4, 8, 0, 5);
2603 } else if (!Op.compare_lower("vmalls12e1")) {
2604 // SYS #4, C8, C7, #6
2605 SYS_ALIAS(4, 8, 7, 6);
2606 } else if (!Op.compare_lower("vmalls12e1is")) {
2607 // SYS #4, C8, C3, #6
2608 SYS_ALIAS(4, 8, 3, 6);
2610 return TokError("invalid operand for TLBI instruction");
2616 Parser.Lex(); // Eat operand.
2618 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2619 bool HasRegister = false;
2621 // Check for the optional register operand.
2622 if (getLexer().is(AsmToken::Comma)) {
2623 Parser.Lex(); // Eat comma.
2625 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2626 return TokError("expected register operand");
2631 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2632 Parser.eatToEndOfStatement();
2633 return TokError("unexpected token in argument list");
2636 if (ExpectRegister && !HasRegister) {
2637 return TokError("specified " + Mnemonic + " op requires a register");
2639 else if (!ExpectRegister && HasRegister) {
2640 return TokError("specified " + Mnemonic + " op does not use a register");
2643 Parser.Lex(); // Consume the EndOfStatement
2647 AArch64AsmParser::OperandMatchResultTy
2648 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2649 MCAsmParser &Parser = getParser();
2650 const AsmToken &Tok = Parser.getTok();
2652 // Can be either a #imm style literal or an option name
2653 bool Hash = Tok.is(AsmToken::Hash);
2654 if (Hash || Tok.is(AsmToken::Integer)) {
2655 // Immediate operand.
2657 Parser.Lex(); // Eat the '#'
2658 const MCExpr *ImmVal;
2659 SMLoc ExprLoc = getLoc();
2660 if (getParser().parseExpression(ImmVal))
2661 return MatchOperand_ParseFail;
2662 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2664 Error(ExprLoc, "immediate value expected for barrier operand");
2665 return MatchOperand_ParseFail;
2667 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2668 Error(ExprLoc, "barrier operand out of range");
2669 return MatchOperand_ParseFail;
2672 auto Mapper = AArch64DB::DBarrierMapper();
2674 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2675 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2676 ExprLoc, getContext()));
2677 return MatchOperand_Success;
2680 if (Tok.isNot(AsmToken::Identifier)) {
2681 TokError("invalid operand for instruction");
2682 return MatchOperand_ParseFail;
2686 auto Mapper = AArch64DB::DBarrierMapper();
2688 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2690 TokError("invalid barrier option name");
2691 return MatchOperand_ParseFail;
2694 // The only valid named option for ISB is 'sy'
2695 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2696 TokError("'sy' or #imm operand expected");
2697 return MatchOperand_ParseFail;
2700 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2701 getLoc(), getContext()));
2702 Parser.Lex(); // Consume the option
2704 return MatchOperand_Success;
2707 AArch64AsmParser::OperandMatchResultTy
2708 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2709 MCAsmParser &Parser = getParser();
2710 const AsmToken &Tok = Parser.getTok();
2712 if (Tok.isNot(AsmToken::Identifier))
2713 return MatchOperand_NoMatch;
2716 auto MRSMapper = AArch64SysReg::MRSMapper();
2717 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2719 assert(IsKnown == (MRSReg != -1U) &&
2720 "register should be -1 if and only if it's unknown");
2722 auto MSRMapper = AArch64SysReg::MSRMapper();
2723 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2725 assert(IsKnown == (MSRReg != -1U) &&
2726 "register should be -1 if and only if it's unknown");
2728 auto PStateMapper = AArch64PState::PStateMapper();
2729 uint32_t PStateField =
2730 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2731 assert(IsKnown == (PStateField != -1U) &&
2732 "register should be -1 if and only if it's unknown");
2734 Operands.push_back(AArch64Operand::CreateSysReg(
2735 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2736 Parser.Lex(); // Eat identifier
2738 return MatchOperand_Success;
2741 /// tryParseVectorRegister - Parse a vector register operand.
2742 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2743 MCAsmParser &Parser = getParser();
2744 if (Parser.getTok().isNot(AsmToken::Identifier))
2748 // Check for a vector register specifier first.
2750 int64_t Reg = tryMatchVectorRegister(Kind, false);
2754 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2755 // If there was an explicit qualifier, that goes on as a literal text
2759 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2761 // If there is an index specifier following the register, parse that too.
2762 if (Parser.getTok().is(AsmToken::LBrac)) {
2763 SMLoc SIdx = getLoc();
2764 Parser.Lex(); // Eat left bracket token.
2766 const MCExpr *ImmVal;
2767 if (getParser().parseExpression(ImmVal))
2769 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2771 TokError("immediate value expected for vector index");
2776 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2777 Error(E, "']' expected");
2781 Parser.Lex(); // Eat right bracket token.
2783 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2790 /// parseRegister - Parse a non-vector register operand.
2791 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2792 MCAsmParser &Parser = getParser();
2794 // Try for a vector register.
2795 if (!tryParseVectorRegister(Operands))
2798 // Try for a scalar register.
2799 int64_t Reg = tryParseRegister();
2803 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2805 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2806 // as a string token in the instruction itself.
2807 if (getLexer().getKind() == AsmToken::LBrac) {
2808 SMLoc LBracS = getLoc();
2810 const AsmToken &Tok = Parser.getTok();
2811 if (Tok.is(AsmToken::Integer)) {
2812 SMLoc IntS = getLoc();
2813 int64_t Val = Tok.getIntVal();
2816 if (getLexer().getKind() == AsmToken::RBrac) {
2817 SMLoc RBracS = getLoc();
2820 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2822 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2824 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2834 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2835 MCAsmParser &Parser = getParser();
2836 bool HasELFModifier = false;
2837 AArch64MCExpr::VariantKind RefKind;
2839 if (Parser.getTok().is(AsmToken::Colon)) {
2840 Parser.Lex(); // Eat ':"
2841 HasELFModifier = true;
2843 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2844 Error(Parser.getTok().getLoc(),
2845 "expect relocation specifier in operand after ':'");
2849 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2850 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2851 .Case("lo12", AArch64MCExpr::VK_LO12)
2852 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2853 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2854 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2855 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2856 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2857 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2858 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2859 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2860 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2861 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2862 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2863 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2864 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2865 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2866 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2867 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2868 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2869 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2870 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2871 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2872 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2873 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2874 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2875 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2876 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2877 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2878 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2879 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2880 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2881 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2882 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2883 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2884 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2885 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2886 .Default(AArch64MCExpr::VK_INVALID);
2888 if (RefKind == AArch64MCExpr::VK_INVALID) {
2889 Error(Parser.getTok().getLoc(),
2890 "expect relocation specifier in operand after ':'");
2894 Parser.Lex(); // Eat identifier
2896 if (Parser.getTok().isNot(AsmToken::Colon)) {
2897 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2900 Parser.Lex(); // Eat ':'
2903 if (getParser().parseExpression(ImmVal))
2907 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2912 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2913 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2914 MCAsmParser &Parser = getParser();
2915 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2917 Parser.Lex(); // Eat left bracket token.
2919 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2922 int64_t PrevReg = FirstReg;
2925 if (Parser.getTok().is(AsmToken::Minus)) {
2926 Parser.Lex(); // Eat the minus.
2928 SMLoc Loc = getLoc();
2930 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2933 // Any Kind suffices must match on all regs in the list.
2934 if (Kind != NextKind)
2935 return Error(Loc, "mismatched register size suffix");
2937 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2939 if (Space == 0 || Space > 3) {
2940 return Error(Loc, "invalid number of vectors");
2946 while (Parser.getTok().is(AsmToken::Comma)) {
2947 Parser.Lex(); // Eat the comma token.
2949 SMLoc Loc = getLoc();
2951 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2954 // Any Kind suffices must match on all regs in the list.
2955 if (Kind != NextKind)
2956 return Error(Loc, "mismatched register size suffix");
2958 // Registers must be incremental (with wraparound at 31)
2959 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2960 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2961 return Error(Loc, "registers must be sequential");
2968 if (Parser.getTok().isNot(AsmToken::RCurly))
2969 return Error(getLoc(), "'}' expected");
2970 Parser.Lex(); // Eat the '}' token.
2973 return Error(S, "invalid number of vectors");
2975 unsigned NumElements = 0;
2976 char ElementKind = 0;
2978 parseValidVectorKind(Kind, NumElements, ElementKind);
2980 Operands.push_back(AArch64Operand::CreateVectorList(
2981 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2983 // If there is an index specifier following the list, parse that too.
2984 if (Parser.getTok().is(AsmToken::LBrac)) {
2985 SMLoc SIdx = getLoc();
2986 Parser.Lex(); // Eat left bracket token.
2988 const MCExpr *ImmVal;
2989 if (getParser().parseExpression(ImmVal))
2991 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2993 TokError("immediate value expected for vector index");
2998 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2999 Error(E, "']' expected");
3003 Parser.Lex(); // Eat right bracket token.
3005 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3011 AArch64AsmParser::OperandMatchResultTy
3012 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3013 MCAsmParser &Parser = getParser();
3014 const AsmToken &Tok = Parser.getTok();
3015 if (!Tok.is(AsmToken::Identifier))
3016 return MatchOperand_NoMatch;
3018 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3020 MCContext &Ctx = getContext();
3021 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3022 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3023 return MatchOperand_NoMatch;
3026 Parser.Lex(); // Eat register
3028 if (Parser.getTok().isNot(AsmToken::Comma)) {
3030 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3031 return MatchOperand_Success;
3033 Parser.Lex(); // Eat comma.
3035 if (Parser.getTok().is(AsmToken::Hash))
3036 Parser.Lex(); // Eat hash
3038 if (Parser.getTok().isNot(AsmToken::Integer)) {
3039 Error(getLoc(), "index must be absent or #0");
3040 return MatchOperand_ParseFail;
3043 const MCExpr *ImmVal;
3044 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3045 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3046 Error(getLoc(), "index must be absent or #0");
3047 return MatchOperand_ParseFail;
3051 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3052 return MatchOperand_Success;
3055 /// parseOperand - Parse a arm instruction operand. For now this parses the
3056 /// operand regardless of the mnemonic.
3057 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3058 bool invertCondCode) {
3059 MCAsmParser &Parser = getParser();
3060 // Check if the current operand has a custom associated parser, if so, try to
3061 // custom parse the operand, or fallback to the general approach.
3062 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3063 if (ResTy == MatchOperand_Success)
3065 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3066 // there was a match, but an error occurred, in which case, just return that
3067 // the operand parsing failed.
3068 if (ResTy == MatchOperand_ParseFail)
3071 // Nothing custom, so do general case parsing.
3073 switch (getLexer().getKind()) {
3077 if (parseSymbolicImmVal(Expr))
3078 return Error(S, "invalid operand");
3080 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3081 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3084 case AsmToken::LBrac: {
3085 SMLoc Loc = Parser.getTok().getLoc();
3086 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3088 Parser.Lex(); // Eat '['
3090 // There's no comma after a '[', so we can parse the next operand
3092 return parseOperand(Operands, false, false);
3094 case AsmToken::LCurly:
3095 return parseVectorList(Operands);
3096 case AsmToken::Identifier: {
3097 // If we're expecting a Condition Code operand, then just parse that.
3099 return parseCondCode(Operands, invertCondCode);
3101 // If it's a register name, parse it.
3102 if (!parseRegister(Operands))
3105 // This could be an optional "shift" or "extend" operand.
3106 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3107 // We can only continue if no tokens were eaten.
3108 if (GotShift != MatchOperand_NoMatch)
3111 // This was not a register so parse other operands that start with an
3112 // identifier (like labels) as expressions and create them as immediates.
3113 const MCExpr *IdVal;
3115 if (getParser().parseExpression(IdVal))
3118 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3119 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3122 case AsmToken::Integer:
3123 case AsmToken::Real:
3124 case AsmToken::Hash: {
3125 // #42 -> immediate.
3127 if (getLexer().is(AsmToken::Hash))
3130 // Parse a negative sign
3131 bool isNegative = false;
3132 if (Parser.getTok().is(AsmToken::Minus)) {
3134 // We need to consume this token only when we have a Real, otherwise
3135 // we let parseSymbolicImmVal take care of it
3136 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3140 // The only Real that should come through here is a literal #0.0 for
3141 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3142 // so convert the value.
3143 const AsmToken &Tok = Parser.getTok();
3144 if (Tok.is(AsmToken::Real)) {
3145 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3146 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3147 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3148 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3149 Mnemonic != "fcmlt")
3150 return TokError("unexpected floating point literal");
3151 else if (IntVal != 0 || isNegative)
3152 return TokError("expected floating-point constant #0.0");
3153 Parser.Lex(); // Eat the token.
3156 AArch64Operand::CreateToken("#0", false, S, getContext()));
3158 AArch64Operand::CreateToken(".0", false, S, getContext()));
3162 const MCExpr *ImmVal;
3163 if (parseSymbolicImmVal(ImmVal))
3166 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3167 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3170 case AsmToken::Equal: {
3171 SMLoc Loc = Parser.getTok().getLoc();
3172 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3173 return Error(Loc, "unexpected token in operand");
3174 Parser.Lex(); // Eat '='
3175 const MCExpr *SubExprVal;
3176 if (getParser().parseExpression(SubExprVal))
3179 if (Operands.size() < 2 ||
3180 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3184 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3185 Operands[1]->getReg());
3187 MCContext& Ctx = getContext();
3188 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3189 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3190 if (isa<MCConstantExpr>(SubExprVal)) {
3191 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3192 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3193 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3197 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3198 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3199 Operands.push_back(AArch64Operand::CreateImm(
3200 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3202 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3203 ShiftAmt, true, S, E, Ctx));
3206 APInt Simm = APInt(64, Imm << ShiftAmt);
3207 // check if the immediate is an unsigned or signed 32-bit int for W regs
3208 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3209 return Error(Loc, "Immediate too large for register");
3211 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3212 const MCExpr *CPLoc =
3213 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3214 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3220 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3222 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3223 StringRef Name, SMLoc NameLoc,
3224 OperandVector &Operands) {
3225 MCAsmParser &Parser = getParser();
3226 Name = StringSwitch<StringRef>(Name.lower())
3227 .Case("beq", "b.eq")
3228 .Case("bne", "b.ne")
3229 .Case("bhs", "b.hs")
3230 .Case("bcs", "b.cs")
3231 .Case("blo", "b.lo")
3232 .Case("bcc", "b.cc")
3233 .Case("bmi", "b.mi")
3234 .Case("bpl", "b.pl")
3235 .Case("bvs", "b.vs")
3236 .Case("bvc", "b.vc")
3237 .Case("bhi", "b.hi")
3238 .Case("bls", "b.ls")
3239 .Case("bge", "b.ge")
3240 .Case("blt", "b.lt")
3241 .Case("bgt", "b.gt")
3242 .Case("ble", "b.le")
3243 .Case("bal", "b.al")
3244 .Case("bnv", "b.nv")
3247 // First check for the AArch64-specific .req directive.
3248 if (Parser.getTok().is(AsmToken::Identifier) &&
3249 Parser.getTok().getIdentifier() == ".req") {
3250 parseDirectiveReq(Name, NameLoc);
3251 // We always return 'error' for this, as we're done with this
3252 // statement and don't need to match the 'instruction."
3256 // Create the leading tokens for the mnemonic, split by '.' characters.
3257 size_t Start = 0, Next = Name.find('.');
3258 StringRef Head = Name.slice(Start, Next);
3260 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3261 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3262 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3263 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3264 Parser.eatToEndOfStatement();
3269 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3272 // Handle condition codes for a branch mnemonic
3273 if (Head == "b" && Next != StringRef::npos) {
3275 Next = Name.find('.', Start + 1);
3276 Head = Name.slice(Start + 1, Next);
3278 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3279 (Head.data() - Name.data()));
3280 AArch64CC::CondCode CC = parseCondCodeString(Head);
3281 if (CC == AArch64CC::Invalid)
3282 return Error(SuffixLoc, "invalid condition code");
3284 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3286 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3289 // Add the remaining tokens in the mnemonic.
3290 while (Next != StringRef::npos) {
3292 Next = Name.find('.', Start + 1);
3293 Head = Name.slice(Start, Next);
3294 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3295 (Head.data() - Name.data()) + 1);
3297 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3300 // Conditional compare instructions have a Condition Code operand, which needs
3301 // to be parsed and an immediate operand created.
3302 bool condCodeFourthOperand =
3303 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3304 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3305 Head == "csinc" || Head == "csinv" || Head == "csneg");
3307 // These instructions are aliases to some of the conditional select
3308 // instructions. However, the condition code is inverted in the aliased
3311 // FIXME: Is this the correct way to handle these? Or should the parser
3312 // generate the aliased instructions directly?
3313 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3314 bool condCodeThirdOperand =
3315 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3317 // Read the remaining operands.
3318 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3319 // Read the first operand.
3320 if (parseOperand(Operands, false, false)) {
3321 Parser.eatToEndOfStatement();
3326 while (getLexer().is(AsmToken::Comma)) {
3327 Parser.Lex(); // Eat the comma.
3329 // Parse and remember the operand.
3330 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3331 (N == 3 && condCodeThirdOperand) ||
3332 (N == 2 && condCodeSecondOperand),
3333 condCodeSecondOperand || condCodeThirdOperand)) {
3334 Parser.eatToEndOfStatement();
3338 // After successfully parsing some operands there are two special cases to
3339 // consider (i.e. notional operands not separated by commas). Both are due
3340 // to memory specifiers:
3341 // + An RBrac will end an address for load/store/prefetch
3342 // + An '!' will indicate a pre-indexed operation.
3344 // It's someone else's responsibility to make sure these tokens are sane
3345 // in the given context!
3346 if (Parser.getTok().is(AsmToken::RBrac)) {
3347 SMLoc Loc = Parser.getTok().getLoc();
3348 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3353 if (Parser.getTok().is(AsmToken::Exclaim)) {
3354 SMLoc Loc = Parser.getTok().getLoc();
3355 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3364 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3365 SMLoc Loc = Parser.getTok().getLoc();
3366 Parser.eatToEndOfStatement();
3367 return Error(Loc, "unexpected token in argument list");
3370 Parser.Lex(); // Consume the EndOfStatement
3374 // FIXME: This entire function is a giant hack to provide us with decent
3375 // operand range validation/diagnostics until TableGen/MC can be extended
3376 // to support autogeneration of this kind of validation.
3377 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3378 SmallVectorImpl<SMLoc> &Loc) {
3379 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3380 // Check for indexed addressing modes w/ the base register being the
3381 // same as a destination/source register or pair load where
3382 // the Rt == Rt2. All of those are undefined behaviour.
3383 switch (Inst.getOpcode()) {
3384 case AArch64::LDPSWpre:
3385 case AArch64::LDPWpost:
3386 case AArch64::LDPWpre:
3387 case AArch64::LDPXpost:
3388 case AArch64::LDPXpre: {
3389 unsigned Rt = Inst.getOperand(1).getReg();
3390 unsigned Rt2 = Inst.getOperand(2).getReg();
3391 unsigned Rn = Inst.getOperand(3).getReg();
3392 if (RI->isSubRegisterEq(Rn, Rt))
3393 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3394 "is also a destination");
3395 if (RI->isSubRegisterEq(Rn, Rt2))
3396 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3397 "is also a destination");
3400 case AArch64::LDPDi:
3401 case AArch64::LDPQi:
3402 case AArch64::LDPSi:
3403 case AArch64::LDPSWi:
3404 case AArch64::LDPWi:
3405 case AArch64::LDPXi: {
3406 unsigned Rt = Inst.getOperand(0).getReg();
3407 unsigned Rt2 = Inst.getOperand(1).getReg();
3409 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3412 case AArch64::LDPDpost:
3413 case AArch64::LDPDpre:
3414 case AArch64::LDPQpost:
3415 case AArch64::LDPQpre:
3416 case AArch64::LDPSpost:
3417 case AArch64::LDPSpre:
3418 case AArch64::LDPSWpost: {
3419 unsigned Rt = Inst.getOperand(1).getReg();
3420 unsigned Rt2 = Inst.getOperand(2).getReg();
3422 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3425 case AArch64::STPDpost:
3426 case AArch64::STPDpre:
3427 case AArch64::STPQpost:
3428 case AArch64::STPQpre:
3429 case AArch64::STPSpost:
3430 case AArch64::STPSpre:
3431 case AArch64::STPWpost:
3432 case AArch64::STPWpre:
3433 case AArch64::STPXpost:
3434 case AArch64::STPXpre: {
3435 unsigned Rt = Inst.getOperand(1).getReg();
3436 unsigned Rt2 = Inst.getOperand(2).getReg();
3437 unsigned Rn = Inst.getOperand(3).getReg();
3438 if (RI->isSubRegisterEq(Rn, Rt))
3439 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3440 "is also a source");
3441 if (RI->isSubRegisterEq(Rn, Rt2))
3442 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3443 "is also a source");
3446 case AArch64::LDRBBpre:
3447 case AArch64::LDRBpre:
3448 case AArch64::LDRHHpre:
3449 case AArch64::LDRHpre:
3450 case AArch64::LDRSBWpre:
3451 case AArch64::LDRSBXpre:
3452 case AArch64::LDRSHWpre:
3453 case AArch64::LDRSHXpre:
3454 case AArch64::LDRSWpre:
3455 case AArch64::LDRWpre:
3456 case AArch64::LDRXpre:
3457 case AArch64::LDRBBpost:
3458 case AArch64::LDRBpost:
3459 case AArch64::LDRHHpost:
3460 case AArch64::LDRHpost:
3461 case AArch64::LDRSBWpost:
3462 case AArch64::LDRSBXpost:
3463 case AArch64::LDRSHWpost:
3464 case AArch64::LDRSHXpost:
3465 case AArch64::LDRSWpost:
3466 case AArch64::LDRWpost:
3467 case AArch64::LDRXpost: {
3468 unsigned Rt = Inst.getOperand(1).getReg();
3469 unsigned Rn = Inst.getOperand(2).getReg();
3470 if (RI->isSubRegisterEq(Rn, Rt))
3471 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3472 "is also a source");
3475 case AArch64::STRBBpost:
3476 case AArch64::STRBpost:
3477 case AArch64::STRHHpost:
3478 case AArch64::STRHpost:
3479 case AArch64::STRWpost:
3480 case AArch64::STRXpost:
3481 case AArch64::STRBBpre:
3482 case AArch64::STRBpre:
3483 case AArch64::STRHHpre:
3484 case AArch64::STRHpre:
3485 case AArch64::STRWpre:
3486 case AArch64::STRXpre: {
3487 unsigned Rt = Inst.getOperand(1).getReg();
3488 unsigned Rn = Inst.getOperand(2).getReg();
3489 if (RI->isSubRegisterEq(Rn, Rt))
3490 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3491 "is also a source");
3496 // Now check immediate ranges. Separate from the above as there is overlap
3497 // in the instructions being checked and this keeps the nested conditionals
3499 switch (Inst.getOpcode()) {
3500 case AArch64::ADDSWri:
3501 case AArch64::ADDSXri:
3502 case AArch64::ADDWri:
3503 case AArch64::ADDXri:
3504 case AArch64::SUBSWri:
3505 case AArch64::SUBSXri:
3506 case AArch64::SUBWri:
3507 case AArch64::SUBXri: {
3508 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3509 // some slight duplication here.
3510 if (Inst.getOperand(2).isExpr()) {
3511 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3512 AArch64MCExpr::VariantKind ELFRefKind;
3513 MCSymbolRefExpr::VariantKind DarwinRefKind;
3515 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3516 return Error(Loc[2], "invalid immediate expression");
3519 // Only allow these with ADDXri.
3520 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3521 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3522 Inst.getOpcode() == AArch64::ADDXri)
3525 // Only allow these with ADDXri/ADDWri
3526 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3527 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3528 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3529 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3530 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3531 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3532 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3533 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3534 (Inst.getOpcode() == AArch64::ADDXri ||
3535 Inst.getOpcode() == AArch64::ADDWri))
3538 // Don't allow expressions in the immediate field otherwise
3539 return Error(Loc[2], "invalid immediate expression");
3548 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3550 case Match_MissingFeature:
3552 "instruction requires a CPU feature not currently enabled");
3553 case Match_InvalidOperand:
3554 return Error(Loc, "invalid operand for instruction");
3555 case Match_InvalidSuffix:
3556 return Error(Loc, "invalid type suffix for instruction");
3557 case Match_InvalidCondCode:
3558 return Error(Loc, "expected AArch64 condition code");
3559 case Match_AddSubRegExtendSmall:
3561 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3562 case Match_AddSubRegExtendLarge:
3564 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3565 case Match_AddSubSecondSource:
3567 "expected compatible register, symbol or integer in range [0, 4095]");
3568 case Match_LogicalSecondSource:
3569 return Error(Loc, "expected compatible register or logical immediate");
3570 case Match_InvalidMovImm32Shift:
3571 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3572 case Match_InvalidMovImm64Shift:
3573 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3574 case Match_AddSubRegShift32:
3576 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3577 case Match_AddSubRegShift64:
3579 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3580 case Match_InvalidFPImm:
3582 "expected compatible register or floating-point constant");
3583 case Match_InvalidMemoryIndexedSImm9:
3584 return Error(Loc, "index must be an integer in range [-256, 255].");
3585 case Match_InvalidMemoryIndexed4SImm7:
3586 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3587 case Match_InvalidMemoryIndexed8SImm7:
3588 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3589 case Match_InvalidMemoryIndexed16SImm7:
3590 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3591 case Match_InvalidMemoryWExtend8:
3593 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3594 case Match_InvalidMemoryWExtend16:
3596 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3597 case Match_InvalidMemoryWExtend32:
3599 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3600 case Match_InvalidMemoryWExtend64:
3602 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3603 case Match_InvalidMemoryWExtend128:
3605 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3606 case Match_InvalidMemoryXExtend8:
3608 "expected 'lsl' or 'sxtx' with optional shift of #0");
3609 case Match_InvalidMemoryXExtend16:
3611 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3612 case Match_InvalidMemoryXExtend32:
3614 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3615 case Match_InvalidMemoryXExtend64:
3617 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3618 case Match_InvalidMemoryXExtend128:
3620 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3621 case Match_InvalidMemoryIndexed1:
3622 return Error(Loc, "index must be an integer in range [0, 4095].");
3623 case Match_InvalidMemoryIndexed2:
3624 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3625 case Match_InvalidMemoryIndexed4:
3626 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3627 case Match_InvalidMemoryIndexed8:
3628 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3629 case Match_InvalidMemoryIndexed16:
3630 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3631 case Match_InvalidImm0_1:
3632 return Error(Loc, "immediate must be an integer in range [0, 1].");
3633 case Match_InvalidImm0_7:
3634 return Error(Loc, "immediate must be an integer in range [0, 7].");
3635 case Match_InvalidImm0_15:
3636 return Error(Loc, "immediate must be an integer in range [0, 15].");
3637 case Match_InvalidImm0_31:
3638 return Error(Loc, "immediate must be an integer in range [0, 31].");
3639 case Match_InvalidImm0_63:
3640 return Error(Loc, "immediate must be an integer in range [0, 63].");
3641 case Match_InvalidImm0_127:
3642 return Error(Loc, "immediate must be an integer in range [0, 127].");
3643 case Match_InvalidImm0_65535:
3644 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3645 case Match_InvalidImm1_8:
3646 return Error(Loc, "immediate must be an integer in range [1, 8].");
3647 case Match_InvalidImm1_16:
3648 return Error(Loc, "immediate must be an integer in range [1, 16].");
3649 case Match_InvalidImm1_32:
3650 return Error(Loc, "immediate must be an integer in range [1, 32].");
3651 case Match_InvalidImm1_64:
3652 return Error(Loc, "immediate must be an integer in range [1, 64].");
3653 case Match_InvalidIndex1:
3654 return Error(Loc, "expected lane specifier '[1]'");
3655 case Match_InvalidIndexB:
3656 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3657 case Match_InvalidIndexH:
3658 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3659 case Match_InvalidIndexS:
3660 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3661 case Match_InvalidIndexD:
3662 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3663 case Match_InvalidLabel:
3664 return Error(Loc, "expected label or encodable integer pc offset");
3666 return Error(Loc, "expected readable system register");
3668 return Error(Loc, "expected writable system register or pstate");
3669 case Match_MnemonicFail:
3670 return Error(Loc, "unrecognized instruction mnemonic");
3672 llvm_unreachable("unexpected error code!");
3676 static const char *getSubtargetFeatureName(uint64_t Val);
3678 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3679 OperandVector &Operands,
3681 uint64_t &ErrorInfo,
3682 bool MatchingInlineAsm) {
3683 assert(!Operands.empty() && "Unexpect empty operand list!");
3684 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3685 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3687 StringRef Tok = Op.getToken();
3688 unsigned NumOperands = Operands.size();
3690 if (NumOperands == 4 && Tok == "lsl") {
3691 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3692 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3693 if (Op2.isReg() && Op3.isImm()) {
3694 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3696 uint64_t Op3Val = Op3CE->getValue();
3697 uint64_t NewOp3Val = 0;
3698 uint64_t NewOp4Val = 0;
3699 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3701 NewOp3Val = (32 - Op3Val) & 0x1f;
3702 NewOp4Val = 31 - Op3Val;
3704 NewOp3Val = (64 - Op3Val) & 0x3f;
3705 NewOp4Val = 63 - Op3Val;
3708 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3709 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3711 Operands[0] = AArch64Operand::CreateToken(
3712 "ubfm", false, Op.getStartLoc(), getContext());
3713 Operands.push_back(AArch64Operand::CreateImm(
3714 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3715 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3716 Op3.getEndLoc(), getContext());
3719 } else if (NumOperands == 4 && Tok == "bfc") {
3720 // FIXME: Horrible hack to handle BFC->BFM alias.
3721 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3722 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3723 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3725 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3726 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3727 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3729 if (LSBCE && WidthCE) {
3730 uint64_t LSB = LSBCE->getValue();
3731 uint64_t Width = WidthCE->getValue();
3733 uint64_t RegWidth = 0;
3734 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3740 if (LSB >= RegWidth)
3741 return Error(LSBOp.getStartLoc(),
3742 "expected integer in range [0, 31]");
3743 if (Width < 1 || Width > RegWidth)
3744 return Error(WidthOp.getStartLoc(),
3745 "expected integer in range [1, 32]");
3749 ImmR = (32 - LSB) & 0x1f;
3751 ImmR = (64 - LSB) & 0x3f;
3753 uint64_t ImmS = Width - 1;
3755 if (ImmR != 0 && ImmS >= ImmR)
3756 return Error(WidthOp.getStartLoc(),
3757 "requested insert overflows register");
3759 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3760 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3761 Operands[0] = AArch64Operand::CreateToken(
3762 "bfm", false, Op.getStartLoc(), getContext());
3763 Operands[2] = AArch64Operand::CreateReg(
3764 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3765 SMLoc(), getContext());
3766 Operands[3] = AArch64Operand::CreateImm(
3767 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3768 Operands.emplace_back(
3769 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3770 WidthOp.getEndLoc(), getContext()));
3773 } else if (NumOperands == 5) {
3774 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3775 // UBFIZ -> UBFM aliases.
3776 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3777 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3779 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3781 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3782 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3783 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3785 if (Op3CE && Op4CE) {
3786 uint64_t Op3Val = Op3CE->getValue();
3787 uint64_t Op4Val = Op4CE->getValue();
3789 uint64_t RegWidth = 0;
3790 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3796 if (Op3Val >= RegWidth)
3797 return Error(Op3.getStartLoc(),
3798 "expected integer in range [0, 31]");
3799 if (Op4Val < 1 || Op4Val > RegWidth)
3800 return Error(Op4.getStartLoc(),
3801 "expected integer in range [1, 32]");
3803 uint64_t NewOp3Val = 0;
3805 NewOp3Val = (32 - Op3Val) & 0x1f;
3807 NewOp3Val = (64 - Op3Val) & 0x3f;
3809 uint64_t NewOp4Val = Op4Val - 1;
3811 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3812 return Error(Op4.getStartLoc(),
3813 "requested insert overflows register");
3815 const MCExpr *NewOp3 =
3816 MCConstantExpr::create(NewOp3Val, getContext());
3817 const MCExpr *NewOp4 =
3818 MCConstantExpr::create(NewOp4Val, getContext());
3819 Operands[3] = AArch64Operand::CreateImm(
3820 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3821 Operands[4] = AArch64Operand::CreateImm(
3822 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3824 Operands[0] = AArch64Operand::CreateToken(
3825 "bfm", false, Op.getStartLoc(), getContext());
3826 else if (Tok == "sbfiz")
3827 Operands[0] = AArch64Operand::CreateToken(
3828 "sbfm", false, Op.getStartLoc(), getContext());
3829 else if (Tok == "ubfiz")
3830 Operands[0] = AArch64Operand::CreateToken(
3831 "ubfm", false, Op.getStartLoc(), getContext());
3833 llvm_unreachable("No valid mnemonic for alias?");
3837 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3838 // UBFX -> UBFM aliases.
3839 } else if (NumOperands == 5 &&
3840 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3841 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3842 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3843 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3845 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3846 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3847 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3849 if (Op3CE && Op4CE) {
3850 uint64_t Op3Val = Op3CE->getValue();
3851 uint64_t Op4Val = Op4CE->getValue();
3853 uint64_t RegWidth = 0;
3854 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3860 if (Op3Val >= RegWidth)
3861 return Error(Op3.getStartLoc(),
3862 "expected integer in range [0, 31]");
3863 if (Op4Val < 1 || Op4Val > RegWidth)
3864 return Error(Op4.getStartLoc(),
3865 "expected integer in range [1, 32]");
3867 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3869 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3870 return Error(Op4.getStartLoc(),
3871 "requested extract overflows register");
3873 const MCExpr *NewOp4 =
3874 MCConstantExpr::create(NewOp4Val, getContext());
3875 Operands[4] = AArch64Operand::CreateImm(
3876 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3878 Operands[0] = AArch64Operand::CreateToken(
3879 "bfm", false, Op.getStartLoc(), getContext());
3880 else if (Tok == "sbfx")
3881 Operands[0] = AArch64Operand::CreateToken(
3882 "sbfm", false, Op.getStartLoc(), getContext());
3883 else if (Tok == "ubfx")
3884 Operands[0] = AArch64Operand::CreateToken(
3885 "ubfm", false, Op.getStartLoc(), getContext());
3887 llvm_unreachable("No valid mnemonic for alias?");
3892 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3893 // InstAlias can't quite handle this since the reg classes aren't
3895 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3896 // The source register can be Wn here, but the matcher expects a
3897 // GPR64. Twiddle it here if necessary.
3898 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3900 unsigned Reg = getXRegFromWReg(Op.getReg());
3901 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3902 Op.getEndLoc(), getContext());
3905 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3906 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3907 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3909 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3911 // The source register can be Wn here, but the matcher expects a
3912 // GPR64. Twiddle it here if necessary.
3913 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3915 unsigned Reg = getXRegFromWReg(Op.getReg());
3916 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3917 Op.getEndLoc(), getContext());
3921 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3922 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3923 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3925 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3927 // The source register can be Wn here, but the matcher expects a
3928 // GPR32. Twiddle it here if necessary.
3929 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3931 unsigned Reg = getWRegFromXReg(Op.getReg());
3932 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3933 Op.getEndLoc(), getContext());
3938 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3939 if (NumOperands == 3 && Tok == "fmov") {
3940 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3941 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3942 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3944 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3948 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3949 Op.getEndLoc(), getContext());
3954 // First try to match against the secondary set of tables containing the
3955 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3956 unsigned MatchResult =
3957 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3959 // If that fails, try against the alternate table containing long-form NEON:
3960 // "fadd v0.2s, v1.2s, v2.2s"
3961 if (MatchResult != Match_Success) {
3962 // But first, save the short-form match result: we can use it in case the
3963 // long-form match also fails.
3964 auto ShortFormNEONErrorInfo = ErrorInfo;
3965 auto ShortFormNEONMatchResult = MatchResult;
3968 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3970 // Now, both matches failed, and the long-form match failed on the mnemonic
3971 // suffix token operand. The short-form match failure is probably more
3972 // relevant: use it instead.
3973 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3974 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3975 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3976 MatchResult = ShortFormNEONMatchResult;
3977 ErrorInfo = ShortFormNEONErrorInfo;
3982 switch (MatchResult) {
3983 case Match_Success: {
3984 // Perform range checking and other semantic validations
3985 SmallVector<SMLoc, 8> OperandLocs;
3986 NumOperands = Operands.size();
3987 for (unsigned i = 1; i < NumOperands; ++i)
3988 OperandLocs.push_back(Operands[i]->getStartLoc());
3989 if (validateInstruction(Inst, OperandLocs))
3993 Out.EmitInstruction(Inst, STI);
3996 case Match_MissingFeature: {
3997 assert(ErrorInfo && "Unknown missing feature!");
3998 // Special case the error message for the very common case where only
3999 // a single subtarget feature is missing (neon, e.g.).
4000 std::string Msg = "instruction requires:";
4002 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4003 if (ErrorInfo & Mask) {
4005 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4009 return Error(IDLoc, Msg);
4011 case Match_MnemonicFail:
4012 return showMatchError(IDLoc, MatchResult);
4013 case Match_InvalidOperand: {
4014 SMLoc ErrorLoc = IDLoc;
4016 if (ErrorInfo != ~0ULL) {
4017 if (ErrorInfo >= Operands.size())
4018 return Error(IDLoc, "too few operands for instruction");
4020 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4021 if (ErrorLoc == SMLoc())
4024 // If the match failed on a suffix token operand, tweak the diagnostic
4026 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4027 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4028 MatchResult = Match_InvalidSuffix;
4030 return showMatchError(ErrorLoc, MatchResult);
4032 case Match_InvalidMemoryIndexed1:
4033 case Match_InvalidMemoryIndexed2:
4034 case Match_InvalidMemoryIndexed4:
4035 case Match_InvalidMemoryIndexed8:
4036 case Match_InvalidMemoryIndexed16:
4037 case Match_InvalidCondCode:
4038 case Match_AddSubRegExtendSmall:
4039 case Match_AddSubRegExtendLarge:
4040 case Match_AddSubSecondSource:
4041 case Match_LogicalSecondSource:
4042 case Match_AddSubRegShift32:
4043 case Match_AddSubRegShift64:
4044 case Match_InvalidMovImm32Shift:
4045 case Match_InvalidMovImm64Shift:
4046 case Match_InvalidFPImm:
4047 case Match_InvalidMemoryWExtend8:
4048 case Match_InvalidMemoryWExtend16:
4049 case Match_InvalidMemoryWExtend32:
4050 case Match_InvalidMemoryWExtend64:
4051 case Match_InvalidMemoryWExtend128:
4052 case Match_InvalidMemoryXExtend8:
4053 case Match_InvalidMemoryXExtend16:
4054 case Match_InvalidMemoryXExtend32:
4055 case Match_InvalidMemoryXExtend64:
4056 case Match_InvalidMemoryXExtend128:
4057 case Match_InvalidMemoryIndexed4SImm7:
4058 case Match_InvalidMemoryIndexed8SImm7:
4059 case Match_InvalidMemoryIndexed16SImm7:
4060 case Match_InvalidMemoryIndexedSImm9:
4061 case Match_InvalidImm0_1:
4062 case Match_InvalidImm0_7:
4063 case Match_InvalidImm0_15:
4064 case Match_InvalidImm0_31:
4065 case Match_InvalidImm0_63:
4066 case Match_InvalidImm0_127:
4067 case Match_InvalidImm0_65535:
4068 case Match_InvalidImm1_8:
4069 case Match_InvalidImm1_16:
4070 case Match_InvalidImm1_32:
4071 case Match_InvalidImm1_64:
4072 case Match_InvalidIndex1:
4073 case Match_InvalidIndexB:
4074 case Match_InvalidIndexH:
4075 case Match_InvalidIndexS:
4076 case Match_InvalidIndexD:
4077 case Match_InvalidLabel:
4080 if (ErrorInfo >= Operands.size())
4081 return Error(IDLoc, "too few operands for instruction");
4082 // Any time we get here, there's nothing fancy to do. Just get the
4083 // operand SMLoc and display the diagnostic.
4084 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4085 if (ErrorLoc == SMLoc())
4087 return showMatchError(ErrorLoc, MatchResult);
4091 llvm_unreachable("Implement any new match types added!");
4094 /// ParseDirective parses the arm specific directives
4095 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4096 const MCObjectFileInfo::Environment Format =
4097 getContext().getObjectFileInfo()->getObjectFileType();
4098 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4099 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4101 StringRef IDVal = DirectiveID.getIdentifier();
4102 SMLoc Loc = DirectiveID.getLoc();
4103 if (IDVal == ".hword")
4104 return parseDirectiveWord(2, Loc);
4105 if (IDVal == ".word")
4106 return parseDirectiveWord(4, Loc);
4107 if (IDVal == ".xword")
4108 return parseDirectiveWord(8, Loc);
4109 if (IDVal == ".tlsdesccall")
4110 return parseDirectiveTLSDescCall(Loc);
4111 if (IDVal == ".ltorg" || IDVal == ".pool")
4112 return parseDirectiveLtorg(Loc);
4113 if (IDVal == ".unreq")
4114 return parseDirectiveUnreq(Loc);
4116 if (!IsMachO && !IsCOFF) {
4117 if (IDVal == ".inst")
4118 return parseDirectiveInst(Loc);
4121 return parseDirectiveLOH(IDVal, Loc);
4124 /// parseDirectiveWord
4125 /// ::= .word [ expression (, expression)* ]
4126 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4127 MCAsmParser &Parser = getParser();
4128 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4130 const MCExpr *Value;
4131 if (getParser().parseExpression(Value))
4134 getParser().getStreamer().EmitValue(Value, Size);
4136 if (getLexer().is(AsmToken::EndOfStatement))
4139 // FIXME: Improve diagnostic.
4140 if (getLexer().isNot(AsmToken::Comma))
4141 return Error(L, "unexpected token in directive");
4150 /// parseDirectiveInst
4151 /// ::= .inst opcode [, ...]
4152 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4153 MCAsmParser &Parser = getParser();
4154 if (getLexer().is(AsmToken::EndOfStatement)) {
4155 Parser.eatToEndOfStatement();
4156 Error(Loc, "expected expression following directive");
4163 if (getParser().parseExpression(Expr)) {
4164 Error(Loc, "expected expression");
4168 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4170 Error(Loc, "expected constant expression");
4174 getTargetStreamer().emitInst(Value->getValue());
4176 if (getLexer().is(AsmToken::EndOfStatement))
4179 if (getLexer().isNot(AsmToken::Comma)) {
4180 Error(Loc, "unexpected token in directive");
4184 Parser.Lex(); // Eat comma.
4191 // parseDirectiveTLSDescCall:
4192 // ::= .tlsdesccall symbol
4193 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4195 if (getParser().parseIdentifier(Name))
4196 return Error(L, "expected symbol after directive");
4198 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4199 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4200 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4203 Inst.setOpcode(AArch64::TLSDESCCALL);
4204 Inst.addOperand(MCOperand::createExpr(Expr));
4206 getParser().getStreamer().EmitInstruction(Inst, STI);
4210 /// ::= .loh <lohName | lohId> label1, ..., labelN
4211 /// The number of arguments depends on the loh identifier.
4212 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4213 if (IDVal != MCLOHDirectiveName())
4216 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4217 if (getParser().getTok().isNot(AsmToken::Integer))
4218 return TokError("expected an identifier or a number in directive");
4219 // We successfully get a numeric value for the identifier.
4220 // Check if it is valid.
4221 int64_t Id = getParser().getTok().getIntVal();
4222 if (Id <= -1U && !isValidMCLOHType(Id))
4223 return TokError("invalid numeric identifier in directive");
4224 Kind = (MCLOHType)Id;
4226 StringRef Name = getTok().getIdentifier();
4227 // We successfully parse an identifier.
4228 // Check if it is a recognized one.
4229 int Id = MCLOHNameToId(Name);
4232 return TokError("invalid identifier in directive");
4233 Kind = (MCLOHType)Id;
4235 // Consume the identifier.
4237 // Get the number of arguments of this LOH.
4238 int NbArgs = MCLOHIdToNbArgs(Kind);
4240 assert(NbArgs != -1 && "Invalid number of arguments");
4242 SmallVector<MCSymbol *, 3> Args;
4243 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4245 if (getParser().parseIdentifier(Name))
4246 return TokError("expected identifier in directive");
4247 Args.push_back(getContext().getOrCreateSymbol(Name));
4249 if (Idx + 1 == NbArgs)
4251 if (getLexer().isNot(AsmToken::Comma))
4252 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4255 if (getLexer().isNot(AsmToken::EndOfStatement))
4256 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4258 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4262 /// parseDirectiveLtorg
4263 /// ::= .ltorg | .pool
4264 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4265 getTargetStreamer().emitCurrentConstantPool();
4269 /// parseDirectiveReq
4270 /// ::= name .req registername
4271 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4272 MCAsmParser &Parser = getParser();
4273 Parser.Lex(); // Eat the '.req' token.
4274 SMLoc SRegLoc = getLoc();
4275 unsigned RegNum = tryParseRegister();
4276 bool IsVector = false;
4278 if (RegNum == static_cast<unsigned>(-1)) {
4280 RegNum = tryMatchVectorRegister(Kind, false);
4281 if (!Kind.empty()) {
4282 Error(SRegLoc, "vector register without type specifier expected");
4288 if (RegNum == static_cast<unsigned>(-1)) {
4289 Parser.eatToEndOfStatement();
4290 Error(SRegLoc, "register name or alias expected");
4294 // Shouldn't be anything else.
4295 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4296 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4297 Parser.eatToEndOfStatement();
4301 Parser.Lex(); // Consume the EndOfStatement
4303 auto pair = std::make_pair(IsVector, RegNum);
4304 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4305 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4310 /// parseDirectiveUneq
4311 /// ::= .unreq registername
4312 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4313 MCAsmParser &Parser = getParser();
4314 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4315 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4316 Parser.eatToEndOfStatement();
4319 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4320 Parser.Lex(); // Eat the identifier.
4325 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4326 AArch64MCExpr::VariantKind &ELFRefKind,
4327 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4329 ELFRefKind = AArch64MCExpr::VK_INVALID;
4330 DarwinRefKind = MCSymbolRefExpr::VK_None;
4333 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4334 ELFRefKind = AE->getKind();
4335 Expr = AE->getSubExpr();
4338 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4340 // It's a simple symbol reference with no addend.
4341 DarwinRefKind = SE->getKind();
4345 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4349 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4352 DarwinRefKind = SE->getKind();
4354 if (BE->getOpcode() != MCBinaryExpr::Add &&
4355 BE->getOpcode() != MCBinaryExpr::Sub)
4358 // See if the addend is is a constant, otherwise there's more going
4359 // on here than we can deal with.
4360 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4364 Addend = AddendExpr->getValue();
4365 if (BE->getOpcode() == MCBinaryExpr::Sub)
4368 // It's some symbol reference + a constant addend, but really
4369 // shouldn't use both Darwin and ELF syntax.
4370 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4371 DarwinRefKind == MCSymbolRefExpr::VK_None;
4374 /// Force static initialization.
4375 extern "C" void LLVMInitializeAArch64AsmParser() {
4376 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4377 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4378 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4381 #define GET_REGISTER_MATCHER
4382 #define GET_SUBTARGET_FEATURE_NAME
4383 #define GET_MATCHER_IMPLEMENTATION
4384 #include "AArch64GenAsmMatcher.inc"
4386 // Define this matcher function after the auto-generated include so we
4387 // have the match class enum definitions.
4388 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4390 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4391 // If the kind is a token for a literal immediate, check if our asm
4392 // operand matches. This is for InstAliases which have a fixed-value
4393 // immediate in the syntax.
4394 int64_t ExpectedVal;
4397 return Match_InvalidOperand;
4439 return Match_InvalidOperand;
4440 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4442 return Match_InvalidOperand;
4443 if (CE->getValue() == ExpectedVal)
4444 return Match_Success;
4445 return Match_InvalidOperand;
4449 AArch64AsmParser::OperandMatchResultTy
4450 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4454 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4455 Error(S, "expected register");
4456 return MatchOperand_ParseFail;
4459 int FirstReg = tryParseRegister();
4460 if (FirstReg == -1) {
4461 return MatchOperand_ParseFail;
4463 const MCRegisterClass &WRegClass =
4464 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4465 const MCRegisterClass &XRegClass =
4466 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4468 bool isXReg = XRegClass.contains(FirstReg),
4469 isWReg = WRegClass.contains(FirstReg);
4470 if (!isXReg && !isWReg) {
4471 Error(S, "expected first even register of a "
4472 "consecutive same-size even/odd register pair");
4473 return MatchOperand_ParseFail;
4476 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4477 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4479 if (FirstEncoding & 0x1) {
4480 Error(S, "expected first even register of a "
4481 "consecutive same-size even/odd register pair");
4482 return MatchOperand_ParseFail;
4486 if (getParser().getTok().isNot(AsmToken::Comma)) {
4487 Error(M, "expected comma");
4488 return MatchOperand_ParseFail;
4494 int SecondReg = tryParseRegister();
4495 if (SecondReg ==-1) {
4496 return MatchOperand_ParseFail;
4499 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4500 (isXReg && !XRegClass.contains(SecondReg)) ||
4501 (isWReg && !WRegClass.contains(SecondReg))) {
4502 Error(E,"expected second odd register of a "
4503 "consecutive same-size even/odd register pair");
4504 return MatchOperand_ParseFail;
4509 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4510 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4512 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4513 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4516 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4519 return MatchOperand_Success;