1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
48 // Map of register aliases registers via the .req directive.
49 StringMap<std::pair<bool, unsigned> > RegisterReqs;
51 AArch64TargetStreamer &getTargetStreamer() {
52 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53 return static_cast<AArch64TargetStreamer &>(TS);
56 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
58 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62 int tryParseRegister();
63 int tryMatchVectorRegister(StringRef &Kind, bool expected);
64 bool parseRegister(OperandVector &Operands);
65 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66 bool parseVectorList(OperandVector &Operands);
67 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72 bool showMatchError(SMLoc Loc, unsigned ErrCode);
74 bool parseDirectiveWord(unsigned Size, SMLoc L);
75 bool parseDirectiveInst(SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
113 enum AArch64MatchResultTy {
114 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
118 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
119 const MCInstrInfo &MII, const MCTargetOptions &Options)
120 : MCTargetAsmParser(), STI(STI) {
121 MCAsmParserExtension::Initialize(Parser);
122 MCStreamer &S = getParser().getStreamer();
123 if (S.getTargetStreamer() == nullptr)
124 new AArch64TargetStreamer(S);
126 // Initialize the set of available features.
127 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
130 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131 SMLoc NameLoc, OperandVector &Operands) override;
132 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133 bool ParseDirective(AsmToken DirectiveID) override;
134 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135 unsigned Kind) override;
137 static bool classifySymbolRef(const MCExpr *Expr,
138 AArch64MCExpr::VariantKind &ELFRefKind,
139 MCSymbolRefExpr::VariantKind &DarwinRefKind,
142 } // end anonymous namespace
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
148 class AArch64Operand : public MCParsedAsmOperand {
166 SMLoc StartLoc, EndLoc;
171 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
179 struct VectorListOp {
182 unsigned NumElements;
183 unsigned ElementKind;
186 struct VectorIndexOp {
194 struct ShiftedImmOp {
196 unsigned ShiftAmount;
200 AArch64CC::CondCode Code;
204 unsigned Val; // Encoded 8-bit representation.
208 unsigned Val; // Not the enum since not all values have names.
218 uint32_t PStateField;
231 struct ShiftExtendOp {
232 AArch64_AM::ShiftExtendType Type;
234 bool HasExplicitAmount;
244 struct VectorListOp VectorList;
245 struct VectorIndexOp VectorIndex;
247 struct ShiftedImmOp ShiftedImm;
248 struct CondCodeOp CondCode;
249 struct FPImmOp FPImm;
250 struct BarrierOp Barrier;
251 struct SysRegOp SysReg;
252 struct SysCRImmOp SysCRImm;
253 struct PrefetchOp Prefetch;
254 struct ShiftExtendOp ShiftExtend;
257 // Keep the MCContext around as the MCExprs may need manipulated during
258 // the add<>Operands() calls.
262 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
264 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
266 StartLoc = o.StartLoc;
276 ShiftedImm = o.ShiftedImm;
279 CondCode = o.CondCode;
291 VectorList = o.VectorList;
294 VectorIndex = o.VectorIndex;
300 SysCRImm = o.SysCRImm;
303 Prefetch = o.Prefetch;
306 ShiftExtend = o.ShiftExtend;
311 /// getStartLoc - Get the location of the first token of this operand.
312 SMLoc getStartLoc() const override { return StartLoc; }
313 /// getEndLoc - Get the location of the last token of this operand.
314 SMLoc getEndLoc() const override { return EndLoc; }
316 StringRef getToken() const {
317 assert(Kind == k_Token && "Invalid access!");
318 return StringRef(Tok.Data, Tok.Length);
321 bool isTokenSuffix() const {
322 assert(Kind == k_Token && "Invalid access!");
326 const MCExpr *getImm() const {
327 assert(Kind == k_Immediate && "Invalid access!");
331 const MCExpr *getShiftedImmVal() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.Val;
336 unsigned getShiftedImmShift() const {
337 assert(Kind == k_ShiftedImm && "Invalid access!");
338 return ShiftedImm.ShiftAmount;
341 AArch64CC::CondCode getCondCode() const {
342 assert(Kind == k_CondCode && "Invalid access!");
343 return CondCode.Code;
346 unsigned getFPImm() const {
347 assert(Kind == k_FPImm && "Invalid access!");
351 unsigned getBarrier() const {
352 assert(Kind == k_Barrier && "Invalid access!");
356 StringRef getBarrierName() const {
357 assert(Kind == k_Barrier && "Invalid access!");
358 return StringRef(Barrier.Data, Barrier.Length);
361 unsigned getReg() const override {
362 assert(Kind == k_Register && "Invalid access!");
366 unsigned getVectorListStart() const {
367 assert(Kind == k_VectorList && "Invalid access!");
368 return VectorList.RegNum;
371 unsigned getVectorListCount() const {
372 assert(Kind == k_VectorList && "Invalid access!");
373 return VectorList.Count;
376 unsigned getVectorIndex() const {
377 assert(Kind == k_VectorIndex && "Invalid access!");
378 return VectorIndex.Val;
381 StringRef getSysReg() const {
382 assert(Kind == k_SysReg && "Invalid access!");
383 return StringRef(SysReg.Data, SysReg.Length);
386 unsigned getSysCR() const {
387 assert(Kind == k_SysCR && "Invalid access!");
391 unsigned getPrefetch() const {
392 assert(Kind == k_Prefetch && "Invalid access!");
396 StringRef getPrefetchName() const {
397 assert(Kind == k_Prefetch && "Invalid access!");
398 return StringRef(Prefetch.Data, Prefetch.Length);
401 AArch64_AM::ShiftExtendType getShiftExtendType() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.Type;
406 unsigned getShiftExtendAmount() const {
407 assert(Kind == k_ShiftExtend && "Invalid access!");
408 return ShiftExtend.Amount;
411 bool hasShiftExtendAmount() const {
412 assert(Kind == k_ShiftExtend && "Invalid access!");
413 return ShiftExtend.HasExplicitAmount;
416 bool isImm() const override { return Kind == k_Immediate; }
417 bool isMem() const override { return false; }
418 bool isSImm9() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val < 256);
427 bool isSImm7s4() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
436 bool isSImm7s8() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
445 bool isSImm7s16() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
455 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
456 AArch64MCExpr::VariantKind ELFRefKind;
457 MCSymbolRefExpr::VariantKind DarwinRefKind;
459 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
461 // If we don't understand the expression, assume the best and
462 // let the fixup and relocation code deal with it.
466 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
467 ELFRefKind == AArch64MCExpr::VK_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
472 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
474 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
475 // Note that we don't range-check the addend. It's adjusted modulo page
476 // size when converted, so there is no "out of range" condition when using
478 return Addend >= 0 && (Addend % Scale) == 0;
479 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
480 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
481 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
488 template <int Scale> bool isUImm12Offset() const {
492 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
494 return isSymbolicUImm12Offset(getImm(), Scale);
496 int64_t Val = MCE->getValue();
497 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
500 bool isImm0_7() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= 0 && Val < 8);
509 bool isImm1_8() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val > 0 && Val < 9);
518 bool isImm0_15() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val >= 0 && Val < 16);
527 bool isImm1_16() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val > 0 && Val < 17);
536 bool isImm0_31() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 int64_t Val = MCE->getValue();
543 return (Val >= 0 && Val < 32);
545 bool isImm1_31() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 int64_t Val = MCE->getValue();
552 return (Val >= 1 && Val < 32);
554 bool isImm1_32() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 int64_t Val = MCE->getValue();
561 return (Val >= 1 && Val < 33);
563 bool isImm0_63() const {
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
569 int64_t Val = MCE->getValue();
570 return (Val >= 0 && Val < 64);
572 bool isImm1_63() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
579 return (Val >= 1 && Val < 64);
581 bool isImm1_64() const {
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
587 int64_t Val = MCE->getValue();
588 return (Val >= 1 && Val < 65);
590 bool isImm0_127() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 128);
599 bool isImm0_255() const {
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 256);
608 bool isImm0_65535() const {
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614 int64_t Val = MCE->getValue();
615 return (Val >= 0 && Val < 65536);
617 bool isImm32_63() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 int64_t Val = MCE->getValue();
624 return (Val >= 32 && Val < 64);
626 bool isLogicalImm32() const {
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 int64_t Val = MCE->getValue();
633 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
636 return AArch64_AM::isLogicalImmediate(Val, 32);
638 bool isLogicalImm64() const {
641 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
644 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
646 bool isLogicalImm32Not() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
653 return AArch64_AM::isLogicalImmediate(Val, 32);
655 bool isLogicalImm64Not() const {
658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
661 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
663 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
664 bool isAddSubImm() const {
665 if (!isShiftedImm() && !isImm())
670 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
671 if (isShiftedImm()) {
672 unsigned Shift = ShiftedImm.ShiftAmount;
673 Expr = ShiftedImm.Val;
674 if (Shift != 0 && Shift != 12)
680 AArch64MCExpr::VariantKind ELFRefKind;
681 MCSymbolRefExpr::VariantKind DarwinRefKind;
683 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
684 DarwinRefKind, Addend)) {
685 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
686 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
687 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
688 || ELFRefKind == AArch64MCExpr::VK_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
690 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
691 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
693 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
694 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
695 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
698 // Otherwise it should be a real immediate in range:
699 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
700 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
702 bool isCondCode() const { return Kind == k_CondCode; }
703 bool isSIMDImmType10() const {
706 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
709 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
711 bool isBranchTarget26() const {
714 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
717 int64_t Val = MCE->getValue();
720 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
722 bool isPCRelLabel19() const {
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
728 int64_t Val = MCE->getValue();
731 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
733 bool isBranchTarget14() const {
736 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
739 int64_t Val = MCE->getValue();
742 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
746 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
750 AArch64MCExpr::VariantKind ELFRefKind;
751 MCSymbolRefExpr::VariantKind DarwinRefKind;
753 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
754 DarwinRefKind, Addend)) {
757 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
760 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
761 if (ELFRefKind == AllowedModifiers[i])
768 bool isMovZSymbolG3() const {
769 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
772 bool isMovZSymbolG2() const {
773 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
774 AArch64MCExpr::VK_TPREL_G2,
775 AArch64MCExpr::VK_DTPREL_G2});
778 bool isMovZSymbolG1() const {
779 return isMovWSymbol({
780 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
781 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
782 AArch64MCExpr::VK_DTPREL_G1,
786 bool isMovZSymbolG0() const {
787 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
788 AArch64MCExpr::VK_TPREL_G0,
789 AArch64MCExpr::VK_DTPREL_G0});
792 bool isMovKSymbolG3() const {
793 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
796 bool isMovKSymbolG2() const {
797 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
800 bool isMovKSymbolG1() const {
801 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
802 AArch64MCExpr::VK_TPREL_G1_NC,
803 AArch64MCExpr::VK_DTPREL_G1_NC});
806 bool isMovKSymbolG0() const {
808 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
809 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
812 template<int RegWidth, int Shift>
813 bool isMOVZMovAlias() const {
814 if (!isImm()) return false;
816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
817 if (!CE) return false;
818 uint64_t Value = CE->getValue();
821 Value &= 0xffffffffULL;
823 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
824 if (Value == 0 && Shift != 0)
827 return (Value & ~(0xffffULL << Shift)) == 0;
830 template<int RegWidth, int Shift>
831 bool isMOVNMovAlias() const {
832 if (!isImm()) return false;
834 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
835 if (!CE) return false;
836 uint64_t Value = CE->getValue();
838 // MOVZ takes precedence over MOVN.
839 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
840 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
845 Value &= 0xffffffffULL;
847 return (Value & ~(0xffffULL << Shift)) == 0;
850 bool isFPImm() const { return Kind == k_FPImm; }
851 bool isBarrier() const { return Kind == k_Barrier; }
852 bool isSysReg() const { return Kind == k_SysReg; }
853 bool isMRSSystemRegister() const {
854 if (!isSysReg()) return false;
856 return SysReg.MRSReg != -1U;
858 bool isMSRSystemRegister() const {
859 if (!isSysReg()) return false;
861 return SysReg.MSRReg != -1U;
863 bool isSystemPStateField() const {
864 if (!isSysReg()) return false;
866 return SysReg.PStateField != -1U;
868 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
869 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
870 bool isVectorRegLo() const {
871 return Kind == k_Register && Reg.isVector &&
872 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
875 bool isGPR32as64() const {
876 return Kind == k_Register && !Reg.isVector &&
877 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
879 bool isWSeqPair() const {
880 return Kind == k_Register && !Reg.isVector &&
881 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
884 bool isXSeqPair() const {
885 return Kind == k_Register && !Reg.isVector &&
886 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
890 bool isGPR64sp0() const {
891 return Kind == k_Register && !Reg.isVector &&
892 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
895 /// Is this a vector list with the type implicit (presumably attached to the
896 /// instruction itself)?
897 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
898 return Kind == k_VectorList && VectorList.Count == NumRegs &&
899 !VectorList.ElementKind;
902 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
903 bool isTypedVectorList() const {
904 if (Kind != k_VectorList)
906 if (VectorList.Count != NumRegs)
908 if (VectorList.ElementKind != ElementKind)
910 return VectorList.NumElements == NumElements;
913 bool isVectorIndex1() const {
914 return Kind == k_VectorIndex && VectorIndex.Val == 1;
916 bool isVectorIndexB() const {
917 return Kind == k_VectorIndex && VectorIndex.Val < 16;
919 bool isVectorIndexH() const {
920 return Kind == k_VectorIndex && VectorIndex.Val < 8;
922 bool isVectorIndexS() const {
923 return Kind == k_VectorIndex && VectorIndex.Val < 4;
925 bool isVectorIndexD() const {
926 return Kind == k_VectorIndex && VectorIndex.Val < 2;
928 bool isToken() const override { return Kind == k_Token; }
929 bool isTokenEqual(StringRef Str) const {
930 return Kind == k_Token && getToken() == Str;
932 bool isSysCR() const { return Kind == k_SysCR; }
933 bool isPrefetch() const { return Kind == k_Prefetch; }
934 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
935 bool isShifter() const {
936 if (!isShiftExtend())
939 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
940 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
941 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
942 ST == AArch64_AM::MSL);
944 bool isExtend() const {
945 if (!isShiftExtend())
948 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
949 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
950 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
951 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
952 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
953 ET == AArch64_AM::LSL) &&
954 getShiftExtendAmount() <= 4;
957 bool isExtend64() const {
960 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
961 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
962 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
964 bool isExtendLSL64() const {
967 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
968 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
969 ET == AArch64_AM::LSL) &&
970 getShiftExtendAmount() <= 4;
973 template<int Width> bool isMemXExtend() const {
976 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
977 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
978 (getShiftExtendAmount() == Log2_32(Width / 8) ||
979 getShiftExtendAmount() == 0);
982 template<int Width> bool isMemWExtend() const {
985 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
986 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
987 (getShiftExtendAmount() == Log2_32(Width / 8) ||
988 getShiftExtendAmount() == 0);
991 template <unsigned width>
992 bool isArithmeticShifter() const {
996 // An arithmetic shifter is LSL, LSR, or ASR.
997 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
998 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
999 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1002 template <unsigned width>
1003 bool isLogicalShifter() const {
1007 // A logical shifter is LSL, LSR, ASR or ROR.
1008 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1009 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1010 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1011 getShiftExtendAmount() < width;
1014 bool isMovImm32Shifter() const {
1018 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16);
1026 bool isMovImm64Shifter() const {
1030 // A MOVi shifter is LSL of 0 or 16.
1031 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1032 if (ST != AArch64_AM::LSL)
1034 uint64_t Val = getShiftExtendAmount();
1035 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1038 bool isLogicalVecShifter() const {
1042 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1043 unsigned Shift = getShiftExtendAmount();
1044 return getShiftExtendType() == AArch64_AM::LSL &&
1045 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1048 bool isLogicalVecHalfWordShifter() const {
1049 if (!isLogicalVecShifter())
1052 // A logical vector shifter is a left shift by 0 or 8.
1053 unsigned Shift = getShiftExtendAmount();
1054 return getShiftExtendType() == AArch64_AM::LSL &&
1055 (Shift == 0 || Shift == 8);
1058 bool isMoveVecShifter() const {
1059 if (!isShiftExtend())
1062 // A logical vector shifter is a left shift by 8 or 16.
1063 unsigned Shift = getShiftExtendAmount();
1064 return getShiftExtendType() == AArch64_AM::MSL &&
1065 (Shift == 8 || Shift == 16);
1068 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1069 // to LDUR/STUR when the offset is not legal for the former but is for
1070 // the latter. As such, in addition to checking for being a legal unscaled
1071 // address, also check that it is not a legal scaled address. This avoids
1072 // ambiguity in the matcher.
1074 bool isSImm9OffsetFB() const {
1075 return isSImm9() && !isUImm12Offset<Width / 8>();
1078 bool isAdrpLabel() const {
1079 // Validation was handled during parsing, so we just sanity check that
1080 // something didn't go haywire.
1084 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1085 int64_t Val = CE->getValue();
1086 int64_t Min = - (4096 * (1LL << (21 - 1)));
1087 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1088 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1094 bool isAdrLabel() const {
1095 // Validation was handled during parsing, so we just sanity check that
1096 // something didn't go haywire.
1100 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1101 int64_t Val = CE->getValue();
1102 int64_t Min = - (1LL << (21 - 1));
1103 int64_t Max = ((1LL << (21 - 1)) - 1);
1104 return Val >= Min && Val <= Max;
1110 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1111 // Add as immediates when possible. Null MCExpr = 0.
1113 Inst.addOperand(MCOperand::createImm(0));
1114 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1115 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1117 Inst.addOperand(MCOperand::createExpr(Expr));
1120 void addRegOperands(MCInst &Inst, unsigned N) const {
1121 assert(N == 1 && "Invalid number of operands!");
1122 Inst.addOperand(MCOperand::createReg(getReg()));
1125 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1128 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1130 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1131 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1132 RI->getEncodingValue(getReg()));
1134 Inst.addOperand(MCOperand::createReg(Reg));
1137 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1138 assert(N == 1 && "Invalid number of operands!");
1140 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1141 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1144 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1147 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1148 Inst.addOperand(MCOperand::createReg(getReg()));
1151 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1152 assert(N == 1 && "Invalid number of operands!");
1153 Inst.addOperand(MCOperand::createReg(getReg()));
1156 template <unsigned NumRegs>
1157 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1160 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1161 unsigned FirstReg = FirstRegs[NumRegs - 1];
1164 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1167 template <unsigned NumRegs>
1168 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1170 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1171 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1172 unsigned FirstReg = FirstRegs[NumRegs - 1];
1175 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1178 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!");
1180 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1183 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1184 assert(N == 1 && "Invalid number of operands!");
1185 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1188 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1193 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1194 assert(N == 1 && "Invalid number of operands!");
1195 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1198 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1199 assert(N == 1 && "Invalid number of operands!");
1200 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1203 void addImmOperands(MCInst &Inst, unsigned N) const {
1204 assert(N == 1 && "Invalid number of operands!");
1205 // If this is a pageoff symrefexpr with an addend, adjust the addend
1206 // to be only the page-offset portion. Otherwise, just add the expr
1208 addExpr(Inst, getImm());
1211 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1212 assert(N == 2 && "Invalid number of operands!");
1213 if (isShiftedImm()) {
1214 addExpr(Inst, getShiftedImmVal());
1215 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1217 addExpr(Inst, getImm());
1218 Inst.addOperand(MCOperand::createImm(0));
1222 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 Inst.addOperand(MCOperand::createImm(getCondCode()));
1227 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1231 addExpr(Inst, getImm());
1233 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1236 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1237 addImmOperands(Inst, N);
1241 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1243 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1246 Inst.addOperand(MCOperand::createExpr(getImm()));
1249 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1252 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1258 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1264 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1270 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1276 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1282 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1288 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1291 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1294 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1295 assert(N == 1 && "Invalid number of operands!");
1296 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1297 assert(MCE && "Invalid constant immediate operand!");
1298 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1301 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1307 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1331 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1337 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1343 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1355 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1358 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1361 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1363 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1365 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1366 Inst.addOperand(MCOperand::createImm(encoding));
1369 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1370 assert(N == 1 && "Invalid number of operands!");
1371 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1372 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1373 Inst.addOperand(MCOperand::createImm(encoding));
1376 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1380 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1381 Inst.addOperand(MCOperand::createImm(encoding));
1384 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1385 assert(N == 1 && "Invalid number of operands!");
1386 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1388 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1389 Inst.addOperand(MCOperand::createImm(encoding));
1392 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1395 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1396 Inst.addOperand(MCOperand::createImm(encoding));
1399 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1400 // Branch operands don't encode the low bits, so shift them off
1401 // here. If it's a label, however, just put it on directly as there's
1402 // not enough information now to do anything.
1403 assert(N == 1 && "Invalid number of operands!");
1404 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1406 addExpr(Inst, getImm());
1409 assert(MCE && "Invalid constant immediate operand!");
1410 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1413 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1414 // Branch operands don't encode the low bits, so shift them off
1415 // here. If it's a label, however, just put it on directly as there's
1416 // not enough information now to do anything.
1417 assert(N == 1 && "Invalid number of operands!");
1418 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1420 addExpr(Inst, getImm());
1423 assert(MCE && "Invalid constant immediate operand!");
1424 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1427 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1428 // Branch operands don't encode the low bits, so shift them off
1429 // here. If it's a label, however, just put it on directly as there's
1430 // not enough information now to do anything.
1431 assert(N == 1 && "Invalid number of operands!");
1432 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1434 addExpr(Inst, getImm());
1437 assert(MCE && "Invalid constant immediate operand!");
1438 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1441 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1442 assert(N == 1 && "Invalid number of operands!");
1443 Inst.addOperand(MCOperand::createImm(getFPImm()));
1446 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1447 assert(N == 1 && "Invalid number of operands!");
1448 Inst.addOperand(MCOperand::createImm(getBarrier()));
1451 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1454 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1457 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1460 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1463 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1466 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1469 void addSysCROperands(MCInst &Inst, unsigned N) const {
1470 assert(N == 1 && "Invalid number of operands!");
1471 Inst.addOperand(MCOperand::createImm(getSysCR()));
1474 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1479 void addShifterOperands(MCInst &Inst, unsigned N) const {
1480 assert(N == 1 && "Invalid number of operands!");
1482 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1483 Inst.addOperand(MCOperand::createImm(Imm));
1486 void addExtendOperands(MCInst &Inst, unsigned N) const {
1487 assert(N == 1 && "Invalid number of operands!");
1488 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1489 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1490 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1491 Inst.addOperand(MCOperand::createImm(Imm));
1494 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1496 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1497 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1498 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1499 Inst.addOperand(MCOperand::createImm(Imm));
1502 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::createImm(IsSigned));
1507 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1510 // For 8-bit load/store instructions with a register offset, both the
1511 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1512 // they're disambiguated by whether the shift was explicit or implicit rather
1514 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1515 assert(N == 2 && "Invalid number of operands!");
1516 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1518 Inst.addOperand(MCOperand::createImm(IsSigned));
1519 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1523 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1524 assert(N == 1 && "Invalid number of operands!");
1526 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1527 uint64_t Value = CE->getValue();
1528 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1532 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1533 assert(N == 1 && "Invalid number of operands!");
1535 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1536 uint64_t Value = CE->getValue();
1537 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1540 void print(raw_ostream &OS) const override;
1542 static std::unique_ptr<AArch64Operand>
1543 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1544 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1545 Op->Tok.Data = Str.data();
1546 Op->Tok.Length = Str.size();
1547 Op->Tok.IsSuffix = IsSuffix;
1553 static std::unique_ptr<AArch64Operand>
1554 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1555 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1556 Op->Reg.RegNum = RegNum;
1557 Op->Reg.isVector = isVector;
1563 static std::unique_ptr<AArch64Operand>
1564 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1565 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1567 Op->VectorList.RegNum = RegNum;
1568 Op->VectorList.Count = Count;
1569 Op->VectorList.NumElements = NumElements;
1570 Op->VectorList.ElementKind = ElementKind;
1576 static std::unique_ptr<AArch64Operand>
1577 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1578 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1579 Op->VectorIndex.Val = Idx;
1585 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1586 SMLoc E, MCContext &Ctx) {
1587 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1594 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1595 unsigned ShiftAmount,
1598 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1599 Op->ShiftedImm .Val = Val;
1600 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1606 static std::unique_ptr<AArch64Operand>
1607 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1608 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1609 Op->CondCode.Code = Code;
1615 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1617 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1618 Op->FPImm.Val = Val;
1624 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1628 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1629 Op->Barrier.Val = Val;
1630 Op->Barrier.Data = Str.data();
1631 Op->Barrier.Length = Str.size();
1637 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1640 uint32_t PStateField,
1642 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1643 Op->SysReg.Data = Str.data();
1644 Op->SysReg.Length = Str.size();
1645 Op->SysReg.MRSReg = MRSReg;
1646 Op->SysReg.MSRReg = MSRReg;
1647 Op->SysReg.PStateField = PStateField;
1653 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1654 SMLoc E, MCContext &Ctx) {
1655 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1656 Op->SysCRImm.Val = Val;
1662 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1666 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1667 Op->Prefetch.Val = Val;
1668 Op->Barrier.Data = Str.data();
1669 Op->Barrier.Length = Str.size();
1675 static std::unique_ptr<AArch64Operand>
1676 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1677 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1678 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1679 Op->ShiftExtend.Type = ShOp;
1680 Op->ShiftExtend.Amount = Val;
1681 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1688 } // end anonymous namespace.
1690 void AArch64Operand::print(raw_ostream &OS) const {
1693 OS << "<fpimm " << getFPImm() << "("
1694 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1697 StringRef Name = getBarrierName();
1699 OS << "<barrier " << Name << ">";
1701 OS << "<barrier invalid #" << getBarrier() << ">";
1707 case k_ShiftedImm: {
1708 unsigned Shift = getShiftedImmShift();
1709 OS << "<shiftedimm ";
1710 OS << *getShiftedImmVal();
1711 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1715 OS << "<condcode " << getCondCode() << ">";
1718 OS << "<register " << getReg() << ">";
1720 case k_VectorList: {
1721 OS << "<vectorlist ";
1722 unsigned Reg = getVectorListStart();
1723 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1724 OS << Reg + i << " ";
1729 OS << "<vectorindex " << getVectorIndex() << ">";
1732 OS << "<sysreg: " << getSysReg() << '>';
1735 OS << "'" << getToken() << "'";
1738 OS << "c" << getSysCR();
1741 StringRef Name = getPrefetchName();
1743 OS << "<prfop " << Name << ">";
1745 OS << "<prfop invalid #" << getPrefetch() << ">";
1748 case k_ShiftExtend: {
1749 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1750 << getShiftExtendAmount();
1751 if (!hasShiftExtendAmount())
1759 /// @name Auto-generated Match Functions
1762 static unsigned MatchRegisterName(StringRef Name);
1766 static unsigned matchVectorRegName(StringRef Name) {
1767 return StringSwitch<unsigned>(Name.lower())
1768 .Case("v0", AArch64::Q0)
1769 .Case("v1", AArch64::Q1)
1770 .Case("v2", AArch64::Q2)
1771 .Case("v3", AArch64::Q3)
1772 .Case("v4", AArch64::Q4)
1773 .Case("v5", AArch64::Q5)
1774 .Case("v6", AArch64::Q6)
1775 .Case("v7", AArch64::Q7)
1776 .Case("v8", AArch64::Q8)
1777 .Case("v9", AArch64::Q9)
1778 .Case("v10", AArch64::Q10)
1779 .Case("v11", AArch64::Q11)
1780 .Case("v12", AArch64::Q12)
1781 .Case("v13", AArch64::Q13)
1782 .Case("v14", AArch64::Q14)
1783 .Case("v15", AArch64::Q15)
1784 .Case("v16", AArch64::Q16)
1785 .Case("v17", AArch64::Q17)
1786 .Case("v18", AArch64::Q18)
1787 .Case("v19", AArch64::Q19)
1788 .Case("v20", AArch64::Q20)
1789 .Case("v21", AArch64::Q21)
1790 .Case("v22", AArch64::Q22)
1791 .Case("v23", AArch64::Q23)
1792 .Case("v24", AArch64::Q24)
1793 .Case("v25", AArch64::Q25)
1794 .Case("v26", AArch64::Q26)
1795 .Case("v27", AArch64::Q27)
1796 .Case("v28", AArch64::Q28)
1797 .Case("v29", AArch64::Q29)
1798 .Case("v30", AArch64::Q30)
1799 .Case("v31", AArch64::Q31)
1803 static bool isValidVectorKind(StringRef Name) {
1804 return StringSwitch<bool>(Name.lower())
1814 // Accept the width neutral ones, too, for verbose syntax. If those
1815 // aren't used in the right places, the token operand won't match so
1816 // all will work out.
1824 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1825 char &ElementKind) {
1826 assert(isValidVectorKind(Name));
1828 ElementKind = Name.lower()[Name.size() - 1];
1831 if (Name.size() == 2)
1834 // Parse the lane count
1835 Name = Name.drop_front();
1836 while (isdigit(Name.front())) {
1837 NumElements = 10 * NumElements + (Name.front() - '0');
1838 Name = Name.drop_front();
1842 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1844 StartLoc = getLoc();
1845 RegNo = tryParseRegister();
1846 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1847 return (RegNo == (unsigned)-1);
1850 // Matches a register name or register alias previously defined by '.req'
1851 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1853 unsigned RegNum = isVector ? matchVectorRegName(Name)
1854 : MatchRegisterName(Name);
1857 // Check for aliases registered via .req. Canonicalize to lower case.
1858 // That's more consistent since register names are case insensitive, and
1859 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1860 auto Entry = RegisterReqs.find(Name.lower());
1861 if (Entry == RegisterReqs.end())
1863 // set RegNum if the match is the right kind of register
1864 if (isVector == Entry->getValue().first)
1865 RegNum = Entry->getValue().second;
1870 /// tryParseRegister - Try to parse a register name. The token must be an
1871 /// Identifier when called, and if it is a register name the token is eaten and
1872 /// the register is added to the operand list.
1873 int AArch64AsmParser::tryParseRegister() {
1874 MCAsmParser &Parser = getParser();
1875 const AsmToken &Tok = Parser.getTok();
1876 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1878 std::string lowerCase = Tok.getString().lower();
1879 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1880 // Also handle a few aliases of registers.
1882 RegNum = StringSwitch<unsigned>(lowerCase)
1883 .Case("fp", AArch64::FP)
1884 .Case("lr", AArch64::LR)
1885 .Case("x31", AArch64::XZR)
1886 .Case("w31", AArch64::WZR)
1892 Parser.Lex(); // Eat identifier token.
1896 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1897 /// kind specifier. If it is a register specifier, eat the token and return it.
1898 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1899 MCAsmParser &Parser = getParser();
1900 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1901 TokError("vector register expected");
1905 StringRef Name = Parser.getTok().getString();
1906 // If there is a kind specifier, it's separated from the register name by
1908 size_t Start = 0, Next = Name.find('.');
1909 StringRef Head = Name.slice(Start, Next);
1910 unsigned RegNum = matchRegisterNameAlias(Head, true);
1913 if (Next != StringRef::npos) {
1914 Kind = Name.slice(Next, StringRef::npos);
1915 if (!isValidVectorKind(Kind)) {
1916 TokError("invalid vector kind qualifier");
1920 Parser.Lex(); // Eat the register token.
1925 TokError("vector register expected");
1929 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1930 AArch64AsmParser::OperandMatchResultTy
1931 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1932 MCAsmParser &Parser = getParser();
1935 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1936 Error(S, "Expected cN operand where 0 <= N <= 15");
1937 return MatchOperand_ParseFail;
1940 StringRef Tok = Parser.getTok().getIdentifier();
1941 if (Tok[0] != 'c' && Tok[0] != 'C') {
1942 Error(S, "Expected cN operand where 0 <= N <= 15");
1943 return MatchOperand_ParseFail;
1947 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1948 if (BadNum || CRNum > 15) {
1949 Error(S, "Expected cN operand where 0 <= N <= 15");
1950 return MatchOperand_ParseFail;
1953 Parser.Lex(); // Eat identifier token.
1955 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1956 return MatchOperand_Success;
1959 /// tryParsePrefetch - Try to parse a prefetch operand.
1960 AArch64AsmParser::OperandMatchResultTy
1961 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1962 MCAsmParser &Parser = getParser();
1964 const AsmToken &Tok = Parser.getTok();
1965 // Either an identifier for named values or a 5-bit immediate.
1966 bool Hash = Tok.is(AsmToken::Hash);
1967 if (Hash || Tok.is(AsmToken::Integer)) {
1969 Parser.Lex(); // Eat hash token.
1970 const MCExpr *ImmVal;
1971 if (getParser().parseExpression(ImmVal))
1972 return MatchOperand_ParseFail;
1974 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1976 TokError("immediate value expected for prefetch operand");
1977 return MatchOperand_ParseFail;
1979 unsigned prfop = MCE->getValue();
1981 TokError("prefetch operand out of range, [0,31] expected");
1982 return MatchOperand_ParseFail;
1986 auto Mapper = AArch64PRFM::PRFMMapper();
1988 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
1989 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1991 return MatchOperand_Success;
1994 if (Tok.isNot(AsmToken::Identifier)) {
1995 TokError("pre-fetch hint expected");
1996 return MatchOperand_ParseFail;
2000 auto Mapper = AArch64PRFM::PRFMMapper();
2002 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2004 TokError("pre-fetch hint expected");
2005 return MatchOperand_ParseFail;
2008 Parser.Lex(); // Eat identifier token.
2009 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2011 return MatchOperand_Success;
2014 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2016 AArch64AsmParser::OperandMatchResultTy
2017 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2018 MCAsmParser &Parser = getParser();
2022 if (Parser.getTok().is(AsmToken::Hash)) {
2023 Parser.Lex(); // Eat hash token.
2026 if (parseSymbolicImmVal(Expr))
2027 return MatchOperand_ParseFail;
2029 AArch64MCExpr::VariantKind ELFRefKind;
2030 MCSymbolRefExpr::VariantKind DarwinRefKind;
2032 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2033 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2034 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2035 // No modifier was specified at all; this is the syntax for an ELF basic
2036 // ADRP relocation (unfortunately).
2038 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2039 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2040 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2042 Error(S, "gotpage label reference not allowed an addend");
2043 return MatchOperand_ParseFail;
2044 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2045 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2046 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2047 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2048 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2049 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2050 // The operand must be an @page or @gotpage qualified symbolref.
2051 Error(S, "page or gotpage label reference expected");
2052 return MatchOperand_ParseFail;
2056 // We have either a label reference possibly with addend or an immediate. The
2057 // addend is a raw value here. The linker will adjust it to only reference the
2059 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2060 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2062 return MatchOperand_Success;
2065 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2067 AArch64AsmParser::OperandMatchResultTy
2068 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2069 MCAsmParser &Parser = getParser();
2073 if (Parser.getTok().is(AsmToken::Hash)) {
2074 Parser.Lex(); // Eat hash token.
2077 if (getParser().parseExpression(Expr))
2078 return MatchOperand_ParseFail;
2080 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2081 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2083 return MatchOperand_Success;
2086 /// tryParseFPImm - A floating point immediate expression operand.
2087 AArch64AsmParser::OperandMatchResultTy
2088 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2089 MCAsmParser &Parser = getParser();
2093 if (Parser.getTok().is(AsmToken::Hash)) {
2094 Parser.Lex(); // Eat '#'
2098 // Handle negation, as that still comes through as a separate token.
2099 bool isNegative = false;
2100 if (Parser.getTok().is(AsmToken::Minus)) {
2104 const AsmToken &Tok = Parser.getTok();
2105 if (Tok.is(AsmToken::Real)) {
2106 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2108 RealVal.changeSign();
2110 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2111 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2112 Parser.Lex(); // Eat the token.
2113 // Check for out of range values. As an exception, we let Zero through,
2114 // as we handle that special case in post-processing before matching in
2115 // order to use the zero register for it.
2116 if (Val == -1 && !RealVal.isPosZero()) {
2117 TokError("expected compatible register or floating-point constant");
2118 return MatchOperand_ParseFail;
2120 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2121 return MatchOperand_Success;
2123 if (Tok.is(AsmToken::Integer)) {
2125 if (!isNegative && Tok.getString().startswith("0x")) {
2126 Val = Tok.getIntVal();
2127 if (Val > 255 || Val < 0) {
2128 TokError("encoded floating point value out of range");
2129 return MatchOperand_ParseFail;
2132 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2133 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2134 // If we had a '-' in front, toggle the sign bit.
2135 IntVal ^= (uint64_t)isNegative << 63;
2136 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2138 Parser.Lex(); // Eat the token.
2139 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2140 return MatchOperand_Success;
2144 return MatchOperand_NoMatch;
2146 TokError("invalid floating point immediate");
2147 return MatchOperand_ParseFail;
2150 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2151 AArch64AsmParser::OperandMatchResultTy
2152 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2153 MCAsmParser &Parser = getParser();
2156 if (Parser.getTok().is(AsmToken::Hash))
2157 Parser.Lex(); // Eat '#'
2158 else if (Parser.getTok().isNot(AsmToken::Integer))
2159 // Operand should start from # or should be integer, emit error otherwise.
2160 return MatchOperand_NoMatch;
2163 if (parseSymbolicImmVal(Imm))
2164 return MatchOperand_ParseFail;
2165 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2166 uint64_t ShiftAmount = 0;
2167 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2169 int64_t Val = MCE->getValue();
2170 if (Val > 0xfff && (Val & 0xfff) == 0) {
2171 Imm = MCConstantExpr::create(Val >> 12, getContext());
2175 SMLoc E = Parser.getTok().getLoc();
2176 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2178 return MatchOperand_Success;
2184 // The optional operand must be "lsl #N" where N is non-negative.
2185 if (!Parser.getTok().is(AsmToken::Identifier) ||
2186 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2187 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2188 return MatchOperand_ParseFail;
2194 if (Parser.getTok().is(AsmToken::Hash)) {
2198 if (Parser.getTok().isNot(AsmToken::Integer)) {
2199 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2200 return MatchOperand_ParseFail;
2203 int64_t ShiftAmount = Parser.getTok().getIntVal();
2205 if (ShiftAmount < 0) {
2206 Error(Parser.getTok().getLoc(), "positive shift amount required");
2207 return MatchOperand_ParseFail;
2209 Parser.Lex(); // Eat the number
2211 SMLoc E = Parser.getTok().getLoc();
2212 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2213 S, E, getContext()));
2214 return MatchOperand_Success;
2217 /// parseCondCodeString - Parse a Condition Code string.
2218 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2219 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2220 .Case("eq", AArch64CC::EQ)
2221 .Case("ne", AArch64CC::NE)
2222 .Case("cs", AArch64CC::HS)
2223 .Case("hs", AArch64CC::HS)
2224 .Case("cc", AArch64CC::LO)
2225 .Case("lo", AArch64CC::LO)
2226 .Case("mi", AArch64CC::MI)
2227 .Case("pl", AArch64CC::PL)
2228 .Case("vs", AArch64CC::VS)
2229 .Case("vc", AArch64CC::VC)
2230 .Case("hi", AArch64CC::HI)
2231 .Case("ls", AArch64CC::LS)
2232 .Case("ge", AArch64CC::GE)
2233 .Case("lt", AArch64CC::LT)
2234 .Case("gt", AArch64CC::GT)
2235 .Case("le", AArch64CC::LE)
2236 .Case("al", AArch64CC::AL)
2237 .Case("nv", AArch64CC::NV)
2238 .Default(AArch64CC::Invalid);
2242 /// parseCondCode - Parse a Condition Code operand.
2243 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2244 bool invertCondCode) {
2245 MCAsmParser &Parser = getParser();
2247 const AsmToken &Tok = Parser.getTok();
2248 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2250 StringRef Cond = Tok.getString();
2251 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2252 if (CC == AArch64CC::Invalid)
2253 return TokError("invalid condition code");
2254 Parser.Lex(); // Eat identifier token.
2256 if (invertCondCode) {
2257 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2258 return TokError("condition codes AL and NV are invalid for this instruction");
2259 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2263 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2267 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2268 /// them if present.
2269 AArch64AsmParser::OperandMatchResultTy
2270 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2271 MCAsmParser &Parser = getParser();
2272 const AsmToken &Tok = Parser.getTok();
2273 std::string LowerID = Tok.getString().lower();
2274 AArch64_AM::ShiftExtendType ShOp =
2275 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2276 .Case("lsl", AArch64_AM::LSL)
2277 .Case("lsr", AArch64_AM::LSR)
2278 .Case("asr", AArch64_AM::ASR)
2279 .Case("ror", AArch64_AM::ROR)
2280 .Case("msl", AArch64_AM::MSL)
2281 .Case("uxtb", AArch64_AM::UXTB)
2282 .Case("uxth", AArch64_AM::UXTH)
2283 .Case("uxtw", AArch64_AM::UXTW)
2284 .Case("uxtx", AArch64_AM::UXTX)
2285 .Case("sxtb", AArch64_AM::SXTB)
2286 .Case("sxth", AArch64_AM::SXTH)
2287 .Case("sxtw", AArch64_AM::SXTW)
2288 .Case("sxtx", AArch64_AM::SXTX)
2289 .Default(AArch64_AM::InvalidShiftExtend);
2291 if (ShOp == AArch64_AM::InvalidShiftExtend)
2292 return MatchOperand_NoMatch;
2294 SMLoc S = Tok.getLoc();
2297 bool Hash = getLexer().is(AsmToken::Hash);
2298 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2299 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2300 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2301 ShOp == AArch64_AM::MSL) {
2302 // We expect a number here.
2303 TokError("expected #imm after shift specifier");
2304 return MatchOperand_ParseFail;
2307 // "extend" type operatoins don't need an immediate, #0 is implicit.
2308 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2310 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2311 return MatchOperand_Success;
2315 Parser.Lex(); // Eat the '#'.
2317 // Make sure we do actually have a number or a parenthesized expression.
2318 SMLoc E = Parser.getTok().getLoc();
2319 if (!Parser.getTok().is(AsmToken::Integer) &&
2320 !Parser.getTok().is(AsmToken::LParen)) {
2321 Error(E, "expected integer shift amount");
2322 return MatchOperand_ParseFail;
2325 const MCExpr *ImmVal;
2326 if (getParser().parseExpression(ImmVal))
2327 return MatchOperand_ParseFail;
2329 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2331 Error(E, "expected constant '#imm' after shift specifier");
2332 return MatchOperand_ParseFail;
2335 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2336 Operands.push_back(AArch64Operand::CreateShiftExtend(
2337 ShOp, MCE->getValue(), true, S, E, getContext()));
2338 return MatchOperand_Success;
2341 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2342 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2343 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2344 OperandVector &Operands) {
2345 if (Name.find('.') != StringRef::npos)
2346 return TokError("invalid operand");
2350 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2352 MCAsmParser &Parser = getParser();
2353 const AsmToken &Tok = Parser.getTok();
2354 StringRef Op = Tok.getString();
2355 SMLoc S = Tok.getLoc();
2357 const MCExpr *Expr = nullptr;
2359 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2361 Expr = MCConstantExpr::create(op1, getContext()); \
2362 Operands.push_back( \
2363 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2364 Operands.push_back( \
2365 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2366 Operands.push_back( \
2367 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2368 Expr = MCConstantExpr::create(op2, getContext()); \
2369 Operands.push_back( \
2370 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2373 if (Mnemonic == "ic") {
2374 if (!Op.compare_lower("ialluis")) {
2375 // SYS #0, C7, C1, #0
2376 SYS_ALIAS(0, 7, 1, 0);
2377 } else if (!Op.compare_lower("iallu")) {
2378 // SYS #0, C7, C5, #0
2379 SYS_ALIAS(0, 7, 5, 0);
2380 } else if (!Op.compare_lower("ivau")) {
2381 // SYS #3, C7, C5, #1
2382 SYS_ALIAS(3, 7, 5, 1);
2384 return TokError("invalid operand for IC instruction");
2386 } else if (Mnemonic == "dc") {
2387 if (!Op.compare_lower("zva")) {
2388 // SYS #3, C7, C4, #1
2389 SYS_ALIAS(3, 7, 4, 1);
2390 } else if (!Op.compare_lower("ivac")) {
2391 // SYS #3, C7, C6, #1
2392 SYS_ALIAS(0, 7, 6, 1);
2393 } else if (!Op.compare_lower("isw")) {
2394 // SYS #0, C7, C6, #2
2395 SYS_ALIAS(0, 7, 6, 2);
2396 } else if (!Op.compare_lower("cvac")) {
2397 // SYS #3, C7, C10, #1
2398 SYS_ALIAS(3, 7, 10, 1);
2399 } else if (!Op.compare_lower("csw")) {
2400 // SYS #0, C7, C10, #2
2401 SYS_ALIAS(0, 7, 10, 2);
2402 } else if (!Op.compare_lower("cvau")) {
2403 // SYS #3, C7, C11, #1
2404 SYS_ALIAS(3, 7, 11, 1);
2405 } else if (!Op.compare_lower("civac")) {
2406 // SYS #3, C7, C14, #1
2407 SYS_ALIAS(3, 7, 14, 1);
2408 } else if (!Op.compare_lower("cisw")) {
2409 // SYS #0, C7, C14, #2
2410 SYS_ALIAS(0, 7, 14, 2);
2412 return TokError("invalid operand for DC instruction");
2414 } else if (Mnemonic == "at") {
2415 if (!Op.compare_lower("s1e1r")) {
2416 // SYS #0, C7, C8, #0
2417 SYS_ALIAS(0, 7, 8, 0);
2418 } else if (!Op.compare_lower("s1e2r")) {
2419 // SYS #4, C7, C8, #0
2420 SYS_ALIAS(4, 7, 8, 0);
2421 } else if (!Op.compare_lower("s1e3r")) {
2422 // SYS #6, C7, C8, #0
2423 SYS_ALIAS(6, 7, 8, 0);
2424 } else if (!Op.compare_lower("s1e1w")) {
2425 // SYS #0, C7, C8, #1
2426 SYS_ALIAS(0, 7, 8, 1);
2427 } else if (!Op.compare_lower("s1e2w")) {
2428 // SYS #4, C7, C8, #1
2429 SYS_ALIAS(4, 7, 8, 1);
2430 } else if (!Op.compare_lower("s1e3w")) {
2431 // SYS #6, C7, C8, #1
2432 SYS_ALIAS(6, 7, 8, 1);
2433 } else if (!Op.compare_lower("s1e0r")) {
2434 // SYS #0, C7, C8, #3
2435 SYS_ALIAS(0, 7, 8, 2);
2436 } else if (!Op.compare_lower("s1e0w")) {
2437 // SYS #0, C7, C8, #3
2438 SYS_ALIAS(0, 7, 8, 3);
2439 } else if (!Op.compare_lower("s12e1r")) {
2440 // SYS #4, C7, C8, #4
2441 SYS_ALIAS(4, 7, 8, 4);
2442 } else if (!Op.compare_lower("s12e1w")) {
2443 // SYS #4, C7, C8, #5
2444 SYS_ALIAS(4, 7, 8, 5);
2445 } else if (!Op.compare_lower("s12e0r")) {
2446 // SYS #4, C7, C8, #6
2447 SYS_ALIAS(4, 7, 8, 6);
2448 } else if (!Op.compare_lower("s12e0w")) {
2449 // SYS #4, C7, C8, #7
2450 SYS_ALIAS(4, 7, 8, 7);
2452 return TokError("invalid operand for AT instruction");
2454 } else if (Mnemonic == "tlbi") {
2455 if (!Op.compare_lower("vmalle1is")) {
2456 // SYS #0, C8, C3, #0
2457 SYS_ALIAS(0, 8, 3, 0);
2458 } else if (!Op.compare_lower("alle2is")) {
2459 // SYS #4, C8, C3, #0
2460 SYS_ALIAS(4, 8, 3, 0);
2461 } else if (!Op.compare_lower("alle3is")) {
2462 // SYS #6, C8, C3, #0
2463 SYS_ALIAS(6, 8, 3, 0);
2464 } else if (!Op.compare_lower("vae1is")) {
2465 // SYS #0, C8, C3, #1
2466 SYS_ALIAS(0, 8, 3, 1);
2467 } else if (!Op.compare_lower("vae2is")) {
2468 // SYS #4, C8, C3, #1
2469 SYS_ALIAS(4, 8, 3, 1);
2470 } else if (!Op.compare_lower("vae3is")) {
2471 // SYS #6, C8, C3, #1
2472 SYS_ALIAS(6, 8, 3, 1);
2473 } else if (!Op.compare_lower("aside1is")) {
2474 // SYS #0, C8, C3, #2
2475 SYS_ALIAS(0, 8, 3, 2);
2476 } else if (!Op.compare_lower("vaae1is")) {
2477 // SYS #0, C8, C3, #3
2478 SYS_ALIAS(0, 8, 3, 3);
2479 } else if (!Op.compare_lower("alle1is")) {
2480 // SYS #4, C8, C3, #4
2481 SYS_ALIAS(4, 8, 3, 4);
2482 } else if (!Op.compare_lower("vale1is")) {
2483 // SYS #0, C8, C3, #5
2484 SYS_ALIAS(0, 8, 3, 5);
2485 } else if (!Op.compare_lower("vaale1is")) {
2486 // SYS #0, C8, C3, #7
2487 SYS_ALIAS(0, 8, 3, 7);
2488 } else if (!Op.compare_lower("vmalle1")) {
2489 // SYS #0, C8, C7, #0
2490 SYS_ALIAS(0, 8, 7, 0);
2491 } else if (!Op.compare_lower("alle2")) {
2492 // SYS #4, C8, C7, #0
2493 SYS_ALIAS(4, 8, 7, 0);
2494 } else if (!Op.compare_lower("vale2is")) {
2495 // SYS #4, C8, C3, #5
2496 SYS_ALIAS(4, 8, 3, 5);
2497 } else if (!Op.compare_lower("vale3is")) {
2498 // SYS #6, C8, C3, #5
2499 SYS_ALIAS(6, 8, 3, 5);
2500 } else if (!Op.compare_lower("alle3")) {
2501 // SYS #6, C8, C7, #0
2502 SYS_ALIAS(6, 8, 7, 0);
2503 } else if (!Op.compare_lower("vae1")) {
2504 // SYS #0, C8, C7, #1
2505 SYS_ALIAS(0, 8, 7, 1);
2506 } else if (!Op.compare_lower("vae2")) {
2507 // SYS #4, C8, C7, #1
2508 SYS_ALIAS(4, 8, 7, 1);
2509 } else if (!Op.compare_lower("vae3")) {
2510 // SYS #6, C8, C7, #1
2511 SYS_ALIAS(6, 8, 7, 1);
2512 } else if (!Op.compare_lower("aside1")) {
2513 // SYS #0, C8, C7, #2
2514 SYS_ALIAS(0, 8, 7, 2);
2515 } else if (!Op.compare_lower("vaae1")) {
2516 // SYS #0, C8, C7, #3
2517 SYS_ALIAS(0, 8, 7, 3);
2518 } else if (!Op.compare_lower("alle1")) {
2519 // SYS #4, C8, C7, #4
2520 SYS_ALIAS(4, 8, 7, 4);
2521 } else if (!Op.compare_lower("vale1")) {
2522 // SYS #0, C8, C7, #5
2523 SYS_ALIAS(0, 8, 7, 5);
2524 } else if (!Op.compare_lower("vale2")) {
2525 // SYS #4, C8, C7, #5
2526 SYS_ALIAS(4, 8, 7, 5);
2527 } else if (!Op.compare_lower("vale3")) {
2528 // SYS #6, C8, C7, #5
2529 SYS_ALIAS(6, 8, 7, 5);
2530 } else if (!Op.compare_lower("vaale1")) {
2531 // SYS #0, C8, C7, #7
2532 SYS_ALIAS(0, 8, 7, 7);
2533 } else if (!Op.compare_lower("ipas2e1")) {
2534 // SYS #4, C8, C4, #1
2535 SYS_ALIAS(4, 8, 4, 1);
2536 } else if (!Op.compare_lower("ipas2le1")) {
2537 // SYS #4, C8, C4, #5
2538 SYS_ALIAS(4, 8, 4, 5);
2539 } else if (!Op.compare_lower("ipas2e1is")) {
2540 // SYS #4, C8, C4, #1
2541 SYS_ALIAS(4, 8, 0, 1);
2542 } else if (!Op.compare_lower("ipas2le1is")) {
2543 // SYS #4, C8, C4, #5
2544 SYS_ALIAS(4, 8, 0, 5);
2545 } else if (!Op.compare_lower("vmalls12e1")) {
2546 // SYS #4, C8, C7, #6
2547 SYS_ALIAS(4, 8, 7, 6);
2548 } else if (!Op.compare_lower("vmalls12e1is")) {
2549 // SYS #4, C8, C3, #6
2550 SYS_ALIAS(4, 8, 3, 6);
2552 return TokError("invalid operand for TLBI instruction");
2558 Parser.Lex(); // Eat operand.
2560 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2561 bool HasRegister = false;
2563 // Check for the optional register operand.
2564 if (getLexer().is(AsmToken::Comma)) {
2565 Parser.Lex(); // Eat comma.
2567 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2568 return TokError("expected register operand");
2573 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2574 Parser.eatToEndOfStatement();
2575 return TokError("unexpected token in argument list");
2578 if (ExpectRegister && !HasRegister) {
2579 return TokError("specified " + Mnemonic + " op requires a register");
2581 else if (!ExpectRegister && HasRegister) {
2582 return TokError("specified " + Mnemonic + " op does not use a register");
2585 Parser.Lex(); // Consume the EndOfStatement
2589 AArch64AsmParser::OperandMatchResultTy
2590 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2591 MCAsmParser &Parser = getParser();
2592 const AsmToken &Tok = Parser.getTok();
2594 // Can be either a #imm style literal or an option name
2595 bool Hash = Tok.is(AsmToken::Hash);
2596 if (Hash || Tok.is(AsmToken::Integer)) {
2597 // Immediate operand.
2599 Parser.Lex(); // Eat the '#'
2600 const MCExpr *ImmVal;
2601 SMLoc ExprLoc = getLoc();
2602 if (getParser().parseExpression(ImmVal))
2603 return MatchOperand_ParseFail;
2604 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2606 Error(ExprLoc, "immediate value expected for barrier operand");
2607 return MatchOperand_ParseFail;
2609 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2610 Error(ExprLoc, "barrier operand out of range");
2611 return MatchOperand_ParseFail;
2614 auto Mapper = AArch64DB::DBarrierMapper();
2616 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2617 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2618 ExprLoc, getContext()));
2619 return MatchOperand_Success;
2622 if (Tok.isNot(AsmToken::Identifier)) {
2623 TokError("invalid operand for instruction");
2624 return MatchOperand_ParseFail;
2628 auto Mapper = AArch64DB::DBarrierMapper();
2630 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2632 TokError("invalid barrier option name");
2633 return MatchOperand_ParseFail;
2636 // The only valid named option for ISB is 'sy'
2637 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2638 TokError("'sy' or #imm operand expected");
2639 return MatchOperand_ParseFail;
2642 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2643 getLoc(), getContext()));
2644 Parser.Lex(); // Consume the option
2646 return MatchOperand_Success;
2649 AArch64AsmParser::OperandMatchResultTy
2650 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2651 MCAsmParser &Parser = getParser();
2652 const AsmToken &Tok = Parser.getTok();
2654 if (Tok.isNot(AsmToken::Identifier))
2655 return MatchOperand_NoMatch;
2658 auto MRSMapper = AArch64SysReg::MRSMapper();
2659 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2661 assert(IsKnown == (MRSReg != -1U) &&
2662 "register should be -1 if and only if it's unknown");
2664 auto MSRMapper = AArch64SysReg::MSRMapper();
2665 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2667 assert(IsKnown == (MSRReg != -1U) &&
2668 "register should be -1 if and only if it's unknown");
2670 auto PStateMapper = AArch64PState::PStateMapper();
2671 uint32_t PStateField =
2672 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2673 assert(IsKnown == (PStateField != -1U) &&
2674 "register should be -1 if and only if it's unknown");
2676 Operands.push_back(AArch64Operand::CreateSysReg(
2677 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2678 Parser.Lex(); // Eat identifier
2680 return MatchOperand_Success;
2683 /// tryParseVectorRegister - Parse a vector register operand.
2684 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2685 MCAsmParser &Parser = getParser();
2686 if (Parser.getTok().isNot(AsmToken::Identifier))
2690 // Check for a vector register specifier first.
2692 int64_t Reg = tryMatchVectorRegister(Kind, false);
2696 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2697 // If there was an explicit qualifier, that goes on as a literal text
2701 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2703 // If there is an index specifier following the register, parse that too.
2704 if (Parser.getTok().is(AsmToken::LBrac)) {
2705 SMLoc SIdx = getLoc();
2706 Parser.Lex(); // Eat left bracket token.
2708 const MCExpr *ImmVal;
2709 if (getParser().parseExpression(ImmVal))
2711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2713 TokError("immediate value expected for vector index");
2718 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2719 Error(E, "']' expected");
2723 Parser.Lex(); // Eat right bracket token.
2725 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2732 /// parseRegister - Parse a non-vector register operand.
2733 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2734 MCAsmParser &Parser = getParser();
2736 // Try for a vector register.
2737 if (!tryParseVectorRegister(Operands))
2740 // Try for a scalar register.
2741 int64_t Reg = tryParseRegister();
2745 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2747 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2748 // as a string token in the instruction itself.
2749 if (getLexer().getKind() == AsmToken::LBrac) {
2750 SMLoc LBracS = getLoc();
2752 const AsmToken &Tok = Parser.getTok();
2753 if (Tok.is(AsmToken::Integer)) {
2754 SMLoc IntS = getLoc();
2755 int64_t Val = Tok.getIntVal();
2758 if (getLexer().getKind() == AsmToken::RBrac) {
2759 SMLoc RBracS = getLoc();
2762 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2764 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2766 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2776 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2777 MCAsmParser &Parser = getParser();
2778 bool HasELFModifier = false;
2779 AArch64MCExpr::VariantKind RefKind;
2781 if (Parser.getTok().is(AsmToken::Colon)) {
2782 Parser.Lex(); // Eat ':"
2783 HasELFModifier = true;
2785 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2786 Error(Parser.getTok().getLoc(),
2787 "expect relocation specifier in operand after ':'");
2791 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2792 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2793 .Case("lo12", AArch64MCExpr::VK_LO12)
2794 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2795 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2796 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2797 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2798 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2799 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2800 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2801 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2802 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2803 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2804 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2805 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2806 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2807 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2808 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2809 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2810 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2811 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2812 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2813 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2814 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2815 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2816 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2817 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2818 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2819 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2820 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2821 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2822 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2823 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2824 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2825 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2826 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2827 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2828 .Default(AArch64MCExpr::VK_INVALID);
2830 if (RefKind == AArch64MCExpr::VK_INVALID) {
2831 Error(Parser.getTok().getLoc(),
2832 "expect relocation specifier in operand after ':'");
2836 Parser.Lex(); // Eat identifier
2838 if (Parser.getTok().isNot(AsmToken::Colon)) {
2839 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2842 Parser.Lex(); // Eat ':'
2845 if (getParser().parseExpression(ImmVal))
2849 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2854 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2855 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2856 MCAsmParser &Parser = getParser();
2857 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2859 Parser.Lex(); // Eat left bracket token.
2861 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2864 int64_t PrevReg = FirstReg;
2867 if (Parser.getTok().is(AsmToken::Minus)) {
2868 Parser.Lex(); // Eat the minus.
2870 SMLoc Loc = getLoc();
2872 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2875 // Any Kind suffices must match on all regs in the list.
2876 if (Kind != NextKind)
2877 return Error(Loc, "mismatched register size suffix");
2879 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2881 if (Space == 0 || Space > 3) {
2882 return Error(Loc, "invalid number of vectors");
2888 while (Parser.getTok().is(AsmToken::Comma)) {
2889 Parser.Lex(); // Eat the comma token.
2891 SMLoc Loc = getLoc();
2893 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2896 // Any Kind suffices must match on all regs in the list.
2897 if (Kind != NextKind)
2898 return Error(Loc, "mismatched register size suffix");
2900 // Registers must be incremental (with wraparound at 31)
2901 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2902 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2903 return Error(Loc, "registers must be sequential");
2910 if (Parser.getTok().isNot(AsmToken::RCurly))
2911 return Error(getLoc(), "'}' expected");
2912 Parser.Lex(); // Eat the '}' token.
2915 return Error(S, "invalid number of vectors");
2917 unsigned NumElements = 0;
2918 char ElementKind = 0;
2920 parseValidVectorKind(Kind, NumElements, ElementKind);
2922 Operands.push_back(AArch64Operand::CreateVectorList(
2923 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2925 // If there is an index specifier following the list, parse that too.
2926 if (Parser.getTok().is(AsmToken::LBrac)) {
2927 SMLoc SIdx = getLoc();
2928 Parser.Lex(); // Eat left bracket token.
2930 const MCExpr *ImmVal;
2931 if (getParser().parseExpression(ImmVal))
2933 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2935 TokError("immediate value expected for vector index");
2940 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2941 Error(E, "']' expected");
2945 Parser.Lex(); // Eat right bracket token.
2947 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2953 AArch64AsmParser::OperandMatchResultTy
2954 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2955 MCAsmParser &Parser = getParser();
2956 const AsmToken &Tok = Parser.getTok();
2957 if (!Tok.is(AsmToken::Identifier))
2958 return MatchOperand_NoMatch;
2960 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2962 MCContext &Ctx = getContext();
2963 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2964 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2965 return MatchOperand_NoMatch;
2968 Parser.Lex(); // Eat register
2970 if (Parser.getTok().isNot(AsmToken::Comma)) {
2972 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2973 return MatchOperand_Success;
2975 Parser.Lex(); // Eat comma.
2977 if (Parser.getTok().is(AsmToken::Hash))
2978 Parser.Lex(); // Eat hash
2980 if (Parser.getTok().isNot(AsmToken::Integer)) {
2981 Error(getLoc(), "index must be absent or #0");
2982 return MatchOperand_ParseFail;
2985 const MCExpr *ImmVal;
2986 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2987 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2988 Error(getLoc(), "index must be absent or #0");
2989 return MatchOperand_ParseFail;
2993 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2994 return MatchOperand_Success;
2997 /// parseOperand - Parse a arm instruction operand. For now this parses the
2998 /// operand regardless of the mnemonic.
2999 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3000 bool invertCondCode) {
3001 MCAsmParser &Parser = getParser();
3002 // Check if the current operand has a custom associated parser, if so, try to
3003 // custom parse the operand, or fallback to the general approach.
3004 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3005 if (ResTy == MatchOperand_Success)
3007 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3008 // there was a match, but an error occurred, in which case, just return that
3009 // the operand parsing failed.
3010 if (ResTy == MatchOperand_ParseFail)
3013 // Nothing custom, so do general case parsing.
3015 switch (getLexer().getKind()) {
3019 if (parseSymbolicImmVal(Expr))
3020 return Error(S, "invalid operand");
3022 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3023 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3026 case AsmToken::LBrac: {
3027 SMLoc Loc = Parser.getTok().getLoc();
3028 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3030 Parser.Lex(); // Eat '['
3032 // There's no comma after a '[', so we can parse the next operand
3034 return parseOperand(Operands, false, false);
3036 case AsmToken::LCurly:
3037 return parseVectorList(Operands);
3038 case AsmToken::Identifier: {
3039 // If we're expecting a Condition Code operand, then just parse that.
3041 return parseCondCode(Operands, invertCondCode);
3043 // If it's a register name, parse it.
3044 if (!parseRegister(Operands))
3047 // This could be an optional "shift" or "extend" operand.
3048 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3049 // We can only continue if no tokens were eaten.
3050 if (GotShift != MatchOperand_NoMatch)
3053 // This was not a register so parse other operands that start with an
3054 // identifier (like labels) as expressions and create them as immediates.
3055 const MCExpr *IdVal;
3057 if (getParser().parseExpression(IdVal))
3060 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3061 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3064 case AsmToken::Integer:
3065 case AsmToken::Real:
3066 case AsmToken::Hash: {
3067 // #42 -> immediate.
3069 if (getLexer().is(AsmToken::Hash))
3072 // Parse a negative sign
3073 bool isNegative = false;
3074 if (Parser.getTok().is(AsmToken::Minus)) {
3076 // We need to consume this token only when we have a Real, otherwise
3077 // we let parseSymbolicImmVal take care of it
3078 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3082 // The only Real that should come through here is a literal #0.0 for
3083 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3084 // so convert the value.
3085 const AsmToken &Tok = Parser.getTok();
3086 if (Tok.is(AsmToken::Real)) {
3087 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3088 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3089 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3090 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3091 Mnemonic != "fcmlt")
3092 return TokError("unexpected floating point literal");
3093 else if (IntVal != 0 || isNegative)
3094 return TokError("expected floating-point constant #0.0");
3095 Parser.Lex(); // Eat the token.
3098 AArch64Operand::CreateToken("#0", false, S, getContext()));
3100 AArch64Operand::CreateToken(".0", false, S, getContext()));
3104 const MCExpr *ImmVal;
3105 if (parseSymbolicImmVal(ImmVal))
3108 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3109 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3112 case AsmToken::Equal: {
3113 SMLoc Loc = Parser.getTok().getLoc();
3114 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3115 return Error(Loc, "unexpected token in operand");
3116 Parser.Lex(); // Eat '='
3117 const MCExpr *SubExprVal;
3118 if (getParser().parseExpression(SubExprVal))
3121 if (Operands.size() < 2 ||
3122 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3126 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3127 Operands[1]->getReg());
3129 MCContext& Ctx = getContext();
3130 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3131 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3132 if (isa<MCConstantExpr>(SubExprVal)) {
3133 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3134 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3135 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3139 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3140 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3141 Operands.push_back(AArch64Operand::CreateImm(
3142 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3144 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3145 ShiftAmt, true, S, E, Ctx));
3148 APInt Simm = APInt(64, Imm << ShiftAmt);
3149 // check if the immediate is an unsigned or signed 32-bit int for W regs
3150 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3151 return Error(Loc, "Immediate too large for register");
3153 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3154 const MCExpr *CPLoc =
3155 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3156 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3162 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3164 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3165 StringRef Name, SMLoc NameLoc,
3166 OperandVector &Operands) {
3167 MCAsmParser &Parser = getParser();
3168 Name = StringSwitch<StringRef>(Name.lower())
3169 .Case("beq", "b.eq")
3170 .Case("bne", "b.ne")
3171 .Case("bhs", "b.hs")
3172 .Case("bcs", "b.cs")
3173 .Case("blo", "b.lo")
3174 .Case("bcc", "b.cc")
3175 .Case("bmi", "b.mi")
3176 .Case("bpl", "b.pl")
3177 .Case("bvs", "b.vs")
3178 .Case("bvc", "b.vc")
3179 .Case("bhi", "b.hi")
3180 .Case("bls", "b.ls")
3181 .Case("bge", "b.ge")
3182 .Case("blt", "b.lt")
3183 .Case("bgt", "b.gt")
3184 .Case("ble", "b.le")
3185 .Case("bal", "b.al")
3186 .Case("bnv", "b.nv")
3189 // First check for the AArch64-specific .req directive.
3190 if (Parser.getTok().is(AsmToken::Identifier) &&
3191 Parser.getTok().getIdentifier() == ".req") {
3192 parseDirectiveReq(Name, NameLoc);
3193 // We always return 'error' for this, as we're done with this
3194 // statement and don't need to match the 'instruction."
3198 // Create the leading tokens for the mnemonic, split by '.' characters.
3199 size_t Start = 0, Next = Name.find('.');
3200 StringRef Head = Name.slice(Start, Next);
3202 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3203 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3204 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3205 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3206 Parser.eatToEndOfStatement();
3211 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3214 // Handle condition codes for a branch mnemonic
3215 if (Head == "b" && Next != StringRef::npos) {
3217 Next = Name.find('.', Start + 1);
3218 Head = Name.slice(Start + 1, Next);
3220 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3221 (Head.data() - Name.data()));
3222 AArch64CC::CondCode CC = parseCondCodeString(Head);
3223 if (CC == AArch64CC::Invalid)
3224 return Error(SuffixLoc, "invalid condition code");
3226 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3228 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3231 // Add the remaining tokens in the mnemonic.
3232 while (Next != StringRef::npos) {
3234 Next = Name.find('.', Start + 1);
3235 Head = Name.slice(Start, Next);
3236 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3237 (Head.data() - Name.data()) + 1);
3239 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3242 // Conditional compare instructions have a Condition Code operand, which needs
3243 // to be parsed and an immediate operand created.
3244 bool condCodeFourthOperand =
3245 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3246 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3247 Head == "csinc" || Head == "csinv" || Head == "csneg");
3249 // These instructions are aliases to some of the conditional select
3250 // instructions. However, the condition code is inverted in the aliased
3253 // FIXME: Is this the correct way to handle these? Or should the parser
3254 // generate the aliased instructions directly?
3255 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3256 bool condCodeThirdOperand =
3257 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3259 // Read the remaining operands.
3260 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3261 // Read the first operand.
3262 if (parseOperand(Operands, false, false)) {
3263 Parser.eatToEndOfStatement();
3268 while (getLexer().is(AsmToken::Comma)) {
3269 Parser.Lex(); // Eat the comma.
3271 // Parse and remember the operand.
3272 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3273 (N == 3 && condCodeThirdOperand) ||
3274 (N == 2 && condCodeSecondOperand),
3275 condCodeSecondOperand || condCodeThirdOperand)) {
3276 Parser.eatToEndOfStatement();
3280 // After successfully parsing some operands there are two special cases to
3281 // consider (i.e. notional operands not separated by commas). Both are due
3282 // to memory specifiers:
3283 // + An RBrac will end an address for load/store/prefetch
3284 // + An '!' will indicate a pre-indexed operation.
3286 // It's someone else's responsibility to make sure these tokens are sane
3287 // in the given context!
3288 if (Parser.getTok().is(AsmToken::RBrac)) {
3289 SMLoc Loc = Parser.getTok().getLoc();
3290 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3295 if (Parser.getTok().is(AsmToken::Exclaim)) {
3296 SMLoc Loc = Parser.getTok().getLoc();
3297 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3306 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3307 SMLoc Loc = Parser.getTok().getLoc();
3308 Parser.eatToEndOfStatement();
3309 return Error(Loc, "unexpected token in argument list");
3312 Parser.Lex(); // Consume the EndOfStatement
3316 // FIXME: This entire function is a giant hack to provide us with decent
3317 // operand range validation/diagnostics until TableGen/MC can be extended
3318 // to support autogeneration of this kind of validation.
3319 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3320 SmallVectorImpl<SMLoc> &Loc) {
3321 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3322 // Check for indexed addressing modes w/ the base register being the
3323 // same as a destination/source register or pair load where
3324 // the Rt == Rt2. All of those are undefined behaviour.
3325 switch (Inst.getOpcode()) {
3326 case AArch64::LDPSWpre:
3327 case AArch64::LDPWpost:
3328 case AArch64::LDPWpre:
3329 case AArch64::LDPXpost:
3330 case AArch64::LDPXpre: {
3331 unsigned Rt = Inst.getOperand(1).getReg();
3332 unsigned Rt2 = Inst.getOperand(2).getReg();
3333 unsigned Rn = Inst.getOperand(3).getReg();
3334 if (RI->isSubRegisterEq(Rn, Rt))
3335 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3336 "is also a destination");
3337 if (RI->isSubRegisterEq(Rn, Rt2))
3338 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3339 "is also a destination");
3342 case AArch64::LDPDi:
3343 case AArch64::LDPQi:
3344 case AArch64::LDPSi:
3345 case AArch64::LDPSWi:
3346 case AArch64::LDPWi:
3347 case AArch64::LDPXi: {
3348 unsigned Rt = Inst.getOperand(0).getReg();
3349 unsigned Rt2 = Inst.getOperand(1).getReg();
3351 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3354 case AArch64::LDPDpost:
3355 case AArch64::LDPDpre:
3356 case AArch64::LDPQpost:
3357 case AArch64::LDPQpre:
3358 case AArch64::LDPSpost:
3359 case AArch64::LDPSpre:
3360 case AArch64::LDPSWpost: {
3361 unsigned Rt = Inst.getOperand(1).getReg();
3362 unsigned Rt2 = Inst.getOperand(2).getReg();
3364 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3367 case AArch64::STPDpost:
3368 case AArch64::STPDpre:
3369 case AArch64::STPQpost:
3370 case AArch64::STPQpre:
3371 case AArch64::STPSpost:
3372 case AArch64::STPSpre:
3373 case AArch64::STPWpost:
3374 case AArch64::STPWpre:
3375 case AArch64::STPXpost:
3376 case AArch64::STPXpre: {
3377 unsigned Rt = Inst.getOperand(1).getReg();
3378 unsigned Rt2 = Inst.getOperand(2).getReg();
3379 unsigned Rn = Inst.getOperand(3).getReg();
3380 if (RI->isSubRegisterEq(Rn, Rt))
3381 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3382 "is also a source");
3383 if (RI->isSubRegisterEq(Rn, Rt2))
3384 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3385 "is also a source");
3388 case AArch64::LDRBBpre:
3389 case AArch64::LDRBpre:
3390 case AArch64::LDRHHpre:
3391 case AArch64::LDRHpre:
3392 case AArch64::LDRSBWpre:
3393 case AArch64::LDRSBXpre:
3394 case AArch64::LDRSHWpre:
3395 case AArch64::LDRSHXpre:
3396 case AArch64::LDRSWpre:
3397 case AArch64::LDRWpre:
3398 case AArch64::LDRXpre:
3399 case AArch64::LDRBBpost:
3400 case AArch64::LDRBpost:
3401 case AArch64::LDRHHpost:
3402 case AArch64::LDRHpost:
3403 case AArch64::LDRSBWpost:
3404 case AArch64::LDRSBXpost:
3405 case AArch64::LDRSHWpost:
3406 case AArch64::LDRSHXpost:
3407 case AArch64::LDRSWpost:
3408 case AArch64::LDRWpost:
3409 case AArch64::LDRXpost: {
3410 unsigned Rt = Inst.getOperand(1).getReg();
3411 unsigned Rn = Inst.getOperand(2).getReg();
3412 if (RI->isSubRegisterEq(Rn, Rt))
3413 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3414 "is also a source");
3417 case AArch64::STRBBpost:
3418 case AArch64::STRBpost:
3419 case AArch64::STRHHpost:
3420 case AArch64::STRHpost:
3421 case AArch64::STRWpost:
3422 case AArch64::STRXpost:
3423 case AArch64::STRBBpre:
3424 case AArch64::STRBpre:
3425 case AArch64::STRHHpre:
3426 case AArch64::STRHpre:
3427 case AArch64::STRWpre:
3428 case AArch64::STRXpre: {
3429 unsigned Rt = Inst.getOperand(1).getReg();
3430 unsigned Rn = Inst.getOperand(2).getReg();
3431 if (RI->isSubRegisterEq(Rn, Rt))
3432 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3433 "is also a source");
3438 // Now check immediate ranges. Separate from the above as there is overlap
3439 // in the instructions being checked and this keeps the nested conditionals
3441 switch (Inst.getOpcode()) {
3442 case AArch64::ADDSWri:
3443 case AArch64::ADDSXri:
3444 case AArch64::ADDWri:
3445 case AArch64::ADDXri:
3446 case AArch64::SUBSWri:
3447 case AArch64::SUBSXri:
3448 case AArch64::SUBWri:
3449 case AArch64::SUBXri: {
3450 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3451 // some slight duplication here.
3452 if (Inst.getOperand(2).isExpr()) {
3453 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3454 AArch64MCExpr::VariantKind ELFRefKind;
3455 MCSymbolRefExpr::VariantKind DarwinRefKind;
3457 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3458 return Error(Loc[2], "invalid immediate expression");
3461 // Only allow these with ADDXri.
3462 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3463 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3464 Inst.getOpcode() == AArch64::ADDXri)
3467 // Only allow these with ADDXri/ADDWri
3468 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3469 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3471 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3472 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3473 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3474 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3475 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3476 (Inst.getOpcode() == AArch64::ADDXri ||
3477 Inst.getOpcode() == AArch64::ADDWri))
3480 // Don't allow expressions in the immediate field otherwise
3481 return Error(Loc[2], "invalid immediate expression");
3490 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3492 case Match_MissingFeature:
3494 "instruction requires a CPU feature not currently enabled");
3495 case Match_InvalidOperand:
3496 return Error(Loc, "invalid operand for instruction");
3497 case Match_InvalidSuffix:
3498 return Error(Loc, "invalid type suffix for instruction");
3499 case Match_InvalidCondCode:
3500 return Error(Loc, "expected AArch64 condition code");
3501 case Match_AddSubRegExtendSmall:
3503 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3504 case Match_AddSubRegExtendLarge:
3506 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3507 case Match_AddSubSecondSource:
3509 "expected compatible register, symbol or integer in range [0, 4095]");
3510 case Match_LogicalSecondSource:
3511 return Error(Loc, "expected compatible register or logical immediate");
3512 case Match_InvalidMovImm32Shift:
3513 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3514 case Match_InvalidMovImm64Shift:
3515 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3516 case Match_AddSubRegShift32:
3518 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3519 case Match_AddSubRegShift64:
3521 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3522 case Match_InvalidFPImm:
3524 "expected compatible register or floating-point constant");
3525 case Match_InvalidMemoryIndexedSImm9:
3526 return Error(Loc, "index must be an integer in range [-256, 255].");
3527 case Match_InvalidMemoryIndexed4SImm7:
3528 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3529 case Match_InvalidMemoryIndexed8SImm7:
3530 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3531 case Match_InvalidMemoryIndexed16SImm7:
3532 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3533 case Match_InvalidMemoryWExtend8:
3535 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3536 case Match_InvalidMemoryWExtend16:
3538 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3539 case Match_InvalidMemoryWExtend32:
3541 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3542 case Match_InvalidMemoryWExtend64:
3544 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3545 case Match_InvalidMemoryWExtend128:
3547 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3548 case Match_InvalidMemoryXExtend8:
3550 "expected 'lsl' or 'sxtx' with optional shift of #0");
3551 case Match_InvalidMemoryXExtend16:
3553 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3554 case Match_InvalidMemoryXExtend32:
3556 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3557 case Match_InvalidMemoryXExtend64:
3559 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3560 case Match_InvalidMemoryXExtend128:
3562 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3563 case Match_InvalidMemoryIndexed1:
3564 return Error(Loc, "index must be an integer in range [0, 4095].");
3565 case Match_InvalidMemoryIndexed2:
3566 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3567 case Match_InvalidMemoryIndexed4:
3568 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3569 case Match_InvalidMemoryIndexed8:
3570 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3571 case Match_InvalidMemoryIndexed16:
3572 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3573 case Match_InvalidImm0_7:
3574 return Error(Loc, "immediate must be an integer in range [0, 7].");
3575 case Match_InvalidImm0_15:
3576 return Error(Loc, "immediate must be an integer in range [0, 15].");
3577 case Match_InvalidImm0_31:
3578 return Error(Loc, "immediate must be an integer in range [0, 31].");
3579 case Match_InvalidImm0_63:
3580 return Error(Loc, "immediate must be an integer in range [0, 63].");
3581 case Match_InvalidImm0_127:
3582 return Error(Loc, "immediate must be an integer in range [0, 127].");
3583 case Match_InvalidImm0_65535:
3584 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3585 case Match_InvalidImm1_8:
3586 return Error(Loc, "immediate must be an integer in range [1, 8].");
3587 case Match_InvalidImm1_16:
3588 return Error(Loc, "immediate must be an integer in range [1, 16].");
3589 case Match_InvalidImm1_32:
3590 return Error(Loc, "immediate must be an integer in range [1, 32].");
3591 case Match_InvalidImm1_64:
3592 return Error(Loc, "immediate must be an integer in range [1, 64].");
3593 case Match_InvalidIndex1:
3594 return Error(Loc, "expected lane specifier '[1]'");
3595 case Match_InvalidIndexB:
3596 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3597 case Match_InvalidIndexH:
3598 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3599 case Match_InvalidIndexS:
3600 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3601 case Match_InvalidIndexD:
3602 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3603 case Match_InvalidLabel:
3604 return Error(Loc, "expected label or encodable integer pc offset");
3606 return Error(Loc, "expected readable system register");
3608 return Error(Loc, "expected writable system register or pstate");
3609 case Match_MnemonicFail:
3610 return Error(Loc, "unrecognized instruction mnemonic");
3612 llvm_unreachable("unexpected error code!");
3616 static const char *getSubtargetFeatureName(uint64_t Val);
3618 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3619 OperandVector &Operands,
3621 uint64_t &ErrorInfo,
3622 bool MatchingInlineAsm) {
3623 assert(!Operands.empty() && "Unexpect empty operand list!");
3624 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3625 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3627 StringRef Tok = Op.getToken();
3628 unsigned NumOperands = Operands.size();
3630 if (NumOperands == 4 && Tok == "lsl") {
3631 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3632 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3633 if (Op2.isReg() && Op3.isImm()) {
3634 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3636 uint64_t Op3Val = Op3CE->getValue();
3637 uint64_t NewOp3Val = 0;
3638 uint64_t NewOp4Val = 0;
3639 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3641 NewOp3Val = (32 - Op3Val) & 0x1f;
3642 NewOp4Val = 31 - Op3Val;
3644 NewOp3Val = (64 - Op3Val) & 0x3f;
3645 NewOp4Val = 63 - Op3Val;
3648 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3649 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3651 Operands[0] = AArch64Operand::CreateToken(
3652 "ubfm", false, Op.getStartLoc(), getContext());
3653 Operands.push_back(AArch64Operand::CreateImm(
3654 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3655 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3656 Op3.getEndLoc(), getContext());
3659 } else if (NumOperands == 4 && Tok == "bfc") {
3660 // FIXME: Horrible hack to handle BFC->BFM alias.
3661 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3662 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3663 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3665 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3666 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3667 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3669 if (LSBCE && WidthCE) {
3670 uint64_t LSB = LSBCE->getValue();
3671 uint64_t Width = WidthCE->getValue();
3673 uint64_t RegWidth = 0;
3674 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3680 if (LSB >= RegWidth)
3681 return Error(LSBOp.getStartLoc(),
3682 "expected integer in range [0, 31]");
3683 if (Width < 1 || Width > RegWidth)
3684 return Error(WidthOp.getStartLoc(),
3685 "expected integer in range [1, 32]");
3689 ImmR = (32 - LSB) & 0x1f;
3691 ImmR = (64 - LSB) & 0x3f;
3693 uint64_t ImmS = Width - 1;
3695 if (ImmR != 0 && ImmS >= ImmR)
3696 return Error(WidthOp.getStartLoc(),
3697 "requested insert overflows register");
3699 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3700 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3701 Operands[0] = AArch64Operand::CreateToken(
3702 "bfm", false, Op.getStartLoc(), getContext());
3703 Operands[2] = AArch64Operand::CreateReg(
3704 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3705 SMLoc(), getContext());
3706 Operands[3] = AArch64Operand::CreateImm(
3707 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3708 Operands.emplace_back(
3709 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3710 WidthOp.getEndLoc(), getContext()));
3713 } else if (NumOperands == 5) {
3714 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3715 // UBFIZ -> UBFM aliases.
3716 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3717 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3718 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3719 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3721 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3722 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3723 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3725 if (Op3CE && Op4CE) {
3726 uint64_t Op3Val = Op3CE->getValue();
3727 uint64_t Op4Val = Op4CE->getValue();
3729 uint64_t RegWidth = 0;
3730 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3736 if (Op3Val >= RegWidth)
3737 return Error(Op3.getStartLoc(),
3738 "expected integer in range [0, 31]");
3739 if (Op4Val < 1 || Op4Val > RegWidth)
3740 return Error(Op4.getStartLoc(),
3741 "expected integer in range [1, 32]");
3743 uint64_t NewOp3Val = 0;
3745 NewOp3Val = (32 - Op3Val) & 0x1f;
3747 NewOp3Val = (64 - Op3Val) & 0x3f;
3749 uint64_t NewOp4Val = Op4Val - 1;
3751 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3752 return Error(Op4.getStartLoc(),
3753 "requested insert overflows register");
3755 const MCExpr *NewOp3 =
3756 MCConstantExpr::create(NewOp3Val, getContext());
3757 const MCExpr *NewOp4 =
3758 MCConstantExpr::create(NewOp4Val, getContext());
3759 Operands[3] = AArch64Operand::CreateImm(
3760 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3761 Operands[4] = AArch64Operand::CreateImm(
3762 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3764 Operands[0] = AArch64Operand::CreateToken(
3765 "bfm", false, Op.getStartLoc(), getContext());
3766 else if (Tok == "sbfiz")
3767 Operands[0] = AArch64Operand::CreateToken(
3768 "sbfm", false, Op.getStartLoc(), getContext());
3769 else if (Tok == "ubfiz")
3770 Operands[0] = AArch64Operand::CreateToken(
3771 "ubfm", false, Op.getStartLoc(), getContext());
3773 llvm_unreachable("No valid mnemonic for alias?");
3777 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3778 // UBFX -> UBFM aliases.
3779 } else if (NumOperands == 5 &&
3780 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3781 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3782 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3783 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3785 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3786 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3787 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3789 if (Op3CE && Op4CE) {
3790 uint64_t Op3Val = Op3CE->getValue();
3791 uint64_t Op4Val = Op4CE->getValue();
3793 uint64_t RegWidth = 0;
3794 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3800 if (Op3Val >= RegWidth)
3801 return Error(Op3.getStartLoc(),
3802 "expected integer in range [0, 31]");
3803 if (Op4Val < 1 || Op4Val > RegWidth)
3804 return Error(Op4.getStartLoc(),
3805 "expected integer in range [1, 32]");
3807 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3809 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3810 return Error(Op4.getStartLoc(),
3811 "requested extract overflows register");
3813 const MCExpr *NewOp4 =
3814 MCConstantExpr::create(NewOp4Val, getContext());
3815 Operands[4] = AArch64Operand::CreateImm(
3816 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3818 Operands[0] = AArch64Operand::CreateToken(
3819 "bfm", false, Op.getStartLoc(), getContext());
3820 else if (Tok == "sbfx")
3821 Operands[0] = AArch64Operand::CreateToken(
3822 "sbfm", false, Op.getStartLoc(), getContext());
3823 else if (Tok == "ubfx")
3824 Operands[0] = AArch64Operand::CreateToken(
3825 "ubfm", false, Op.getStartLoc(), getContext());
3827 llvm_unreachable("No valid mnemonic for alias?");
3832 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3833 // InstAlias can't quite handle this since the reg classes aren't
3835 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3836 // The source register can be Wn here, but the matcher expects a
3837 // GPR64. Twiddle it here if necessary.
3838 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3840 unsigned Reg = getXRegFromWReg(Op.getReg());
3841 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3842 Op.getEndLoc(), getContext());
3845 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3846 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3847 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3849 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3851 // The source register can be Wn here, but the matcher expects a
3852 // GPR64. Twiddle it here if necessary.
3853 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3855 unsigned Reg = getXRegFromWReg(Op.getReg());
3856 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3857 Op.getEndLoc(), getContext());
3861 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3862 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3863 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3865 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3867 // The source register can be Wn here, but the matcher expects a
3868 // GPR32. Twiddle it here if necessary.
3869 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3871 unsigned Reg = getWRegFromXReg(Op.getReg());
3872 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3873 Op.getEndLoc(), getContext());
3878 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3879 if (NumOperands == 3 && Tok == "fmov") {
3880 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3881 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3882 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3884 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3888 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3889 Op.getEndLoc(), getContext());
3894 // First try to match against the secondary set of tables containing the
3895 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3896 unsigned MatchResult =
3897 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3899 // If that fails, try against the alternate table containing long-form NEON:
3900 // "fadd v0.2s, v1.2s, v2.2s"
3901 if (MatchResult != Match_Success)
3903 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3905 switch (MatchResult) {
3906 case Match_Success: {
3907 // Perform range checking and other semantic validations
3908 SmallVector<SMLoc, 8> OperandLocs;
3909 NumOperands = Operands.size();
3910 for (unsigned i = 1; i < NumOperands; ++i)
3911 OperandLocs.push_back(Operands[i]->getStartLoc());
3912 if (validateInstruction(Inst, OperandLocs))
3916 Out.EmitInstruction(Inst, STI);
3919 case Match_MissingFeature: {
3920 assert(ErrorInfo && "Unknown missing feature!");
3921 // Special case the error message for the very common case where only
3922 // a single subtarget feature is missing (neon, e.g.).
3923 std::string Msg = "instruction requires:";
3925 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3926 if (ErrorInfo & Mask) {
3928 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3932 return Error(IDLoc, Msg);
3934 case Match_MnemonicFail:
3935 return showMatchError(IDLoc, MatchResult);
3936 case Match_InvalidOperand: {
3937 SMLoc ErrorLoc = IDLoc;
3938 if (ErrorInfo != ~0ULL) {
3939 if (ErrorInfo >= Operands.size())
3940 return Error(IDLoc, "too few operands for instruction");
3942 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3943 if (ErrorLoc == SMLoc())
3946 // If the match failed on a suffix token operand, tweak the diagnostic
3948 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3949 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3950 MatchResult = Match_InvalidSuffix;
3952 return showMatchError(ErrorLoc, MatchResult);
3954 case Match_InvalidMemoryIndexed1:
3955 case Match_InvalidMemoryIndexed2:
3956 case Match_InvalidMemoryIndexed4:
3957 case Match_InvalidMemoryIndexed8:
3958 case Match_InvalidMemoryIndexed16:
3959 case Match_InvalidCondCode:
3960 case Match_AddSubRegExtendSmall:
3961 case Match_AddSubRegExtendLarge:
3962 case Match_AddSubSecondSource:
3963 case Match_LogicalSecondSource:
3964 case Match_AddSubRegShift32:
3965 case Match_AddSubRegShift64:
3966 case Match_InvalidMovImm32Shift:
3967 case Match_InvalidMovImm64Shift:
3968 case Match_InvalidFPImm:
3969 case Match_InvalidMemoryWExtend8:
3970 case Match_InvalidMemoryWExtend16:
3971 case Match_InvalidMemoryWExtend32:
3972 case Match_InvalidMemoryWExtend64:
3973 case Match_InvalidMemoryWExtend128:
3974 case Match_InvalidMemoryXExtend8:
3975 case Match_InvalidMemoryXExtend16:
3976 case Match_InvalidMemoryXExtend32:
3977 case Match_InvalidMemoryXExtend64:
3978 case Match_InvalidMemoryXExtend128:
3979 case Match_InvalidMemoryIndexed4SImm7:
3980 case Match_InvalidMemoryIndexed8SImm7:
3981 case Match_InvalidMemoryIndexed16SImm7:
3982 case Match_InvalidMemoryIndexedSImm9:
3983 case Match_InvalidImm0_7:
3984 case Match_InvalidImm0_15:
3985 case Match_InvalidImm0_31:
3986 case Match_InvalidImm0_63:
3987 case Match_InvalidImm0_127:
3988 case Match_InvalidImm0_65535:
3989 case Match_InvalidImm1_8:
3990 case Match_InvalidImm1_16:
3991 case Match_InvalidImm1_32:
3992 case Match_InvalidImm1_64:
3993 case Match_InvalidIndex1:
3994 case Match_InvalidIndexB:
3995 case Match_InvalidIndexH:
3996 case Match_InvalidIndexS:
3997 case Match_InvalidIndexD:
3998 case Match_InvalidLabel:
4001 if (ErrorInfo >= Operands.size())
4002 return Error(IDLoc, "too few operands for instruction");
4003 // Any time we get here, there's nothing fancy to do. Just get the
4004 // operand SMLoc and display the diagnostic.
4005 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4006 if (ErrorLoc == SMLoc())
4008 return showMatchError(ErrorLoc, MatchResult);
4012 llvm_unreachable("Implement any new match types added!");
4015 /// ParseDirective parses the arm specific directives
4016 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4017 const MCObjectFileInfo::Environment Format =
4018 getContext().getObjectFileInfo()->getObjectFileType();
4019 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4020 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4022 StringRef IDVal = DirectiveID.getIdentifier();
4023 SMLoc Loc = DirectiveID.getLoc();
4024 if (IDVal == ".hword")
4025 return parseDirectiveWord(2, Loc);
4026 if (IDVal == ".word")
4027 return parseDirectiveWord(4, Loc);
4028 if (IDVal == ".xword")
4029 return parseDirectiveWord(8, Loc);
4030 if (IDVal == ".tlsdesccall")
4031 return parseDirectiveTLSDescCall(Loc);
4032 if (IDVal == ".ltorg" || IDVal == ".pool")
4033 return parseDirectiveLtorg(Loc);
4034 if (IDVal == ".unreq")
4035 return parseDirectiveUnreq(Loc);
4037 if (!IsMachO && !IsCOFF) {
4038 if (IDVal == ".inst")
4039 return parseDirectiveInst(Loc);
4042 return parseDirectiveLOH(IDVal, Loc);
4045 /// parseDirectiveWord
4046 /// ::= .word [ expression (, expression)* ]
4047 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4048 MCAsmParser &Parser = getParser();
4049 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4051 const MCExpr *Value;
4052 if (getParser().parseExpression(Value))
4055 getParser().getStreamer().EmitValue(Value, Size);
4057 if (getLexer().is(AsmToken::EndOfStatement))
4060 // FIXME: Improve diagnostic.
4061 if (getLexer().isNot(AsmToken::Comma))
4062 return Error(L, "unexpected token in directive");
4071 /// parseDirectiveInst
4072 /// ::= .inst opcode [, ...]
4073 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4074 MCAsmParser &Parser = getParser();
4075 if (getLexer().is(AsmToken::EndOfStatement)) {
4076 Parser.eatToEndOfStatement();
4077 Error(Loc, "expected expression following directive");
4084 if (getParser().parseExpression(Expr)) {
4085 Error(Loc, "expected expression");
4089 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4091 Error(Loc, "expected constant expression");
4095 getTargetStreamer().emitInst(Value->getValue());
4097 if (getLexer().is(AsmToken::EndOfStatement))
4100 if (getLexer().isNot(AsmToken::Comma)) {
4101 Error(Loc, "unexpected token in directive");
4105 Parser.Lex(); // Eat comma.
4112 // parseDirectiveTLSDescCall:
4113 // ::= .tlsdesccall symbol
4114 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4116 if (getParser().parseIdentifier(Name))
4117 return Error(L, "expected symbol after directive");
4119 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4120 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4121 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4124 Inst.setOpcode(AArch64::TLSDESCCALL);
4125 Inst.addOperand(MCOperand::createExpr(Expr));
4127 getParser().getStreamer().EmitInstruction(Inst, STI);
4131 /// ::= .loh <lohName | lohId> label1, ..., labelN
4132 /// The number of arguments depends on the loh identifier.
4133 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4134 if (IDVal != MCLOHDirectiveName())
4137 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4138 if (getParser().getTok().isNot(AsmToken::Integer))
4139 return TokError("expected an identifier or a number in directive");
4140 // We successfully get a numeric value for the identifier.
4141 // Check if it is valid.
4142 int64_t Id = getParser().getTok().getIntVal();
4143 if (Id <= -1U && !isValidMCLOHType(Id))
4144 return TokError("invalid numeric identifier in directive");
4145 Kind = (MCLOHType)Id;
4147 StringRef Name = getTok().getIdentifier();
4148 // We successfully parse an identifier.
4149 // Check if it is a recognized one.
4150 int Id = MCLOHNameToId(Name);
4153 return TokError("invalid identifier in directive");
4154 Kind = (MCLOHType)Id;
4156 // Consume the identifier.
4158 // Get the number of arguments of this LOH.
4159 int NbArgs = MCLOHIdToNbArgs(Kind);
4161 assert(NbArgs != -1 && "Invalid number of arguments");
4163 SmallVector<MCSymbol *, 3> Args;
4164 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4166 if (getParser().parseIdentifier(Name))
4167 return TokError("expected identifier in directive");
4168 Args.push_back(getContext().getOrCreateSymbol(Name));
4170 if (Idx + 1 == NbArgs)
4172 if (getLexer().isNot(AsmToken::Comma))
4173 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4176 if (getLexer().isNot(AsmToken::EndOfStatement))
4177 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4179 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4183 /// parseDirectiveLtorg
4184 /// ::= .ltorg | .pool
4185 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4186 getTargetStreamer().emitCurrentConstantPool();
4190 /// parseDirectiveReq
4191 /// ::= name .req registername
4192 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4193 MCAsmParser &Parser = getParser();
4194 Parser.Lex(); // Eat the '.req' token.
4195 SMLoc SRegLoc = getLoc();
4196 unsigned RegNum = tryParseRegister();
4197 bool IsVector = false;
4199 if (RegNum == static_cast<unsigned>(-1)) {
4201 RegNum = tryMatchVectorRegister(Kind, false);
4202 if (!Kind.empty()) {
4203 Error(SRegLoc, "vector register without type specifier expected");
4209 if (RegNum == static_cast<unsigned>(-1)) {
4210 Parser.eatToEndOfStatement();
4211 Error(SRegLoc, "register name or alias expected");
4215 // Shouldn't be anything else.
4216 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4217 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4218 Parser.eatToEndOfStatement();
4222 Parser.Lex(); // Consume the EndOfStatement
4224 auto pair = std::make_pair(IsVector, RegNum);
4225 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4226 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4231 /// parseDirectiveUneq
4232 /// ::= .unreq registername
4233 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4234 MCAsmParser &Parser = getParser();
4235 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4236 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4237 Parser.eatToEndOfStatement();
4240 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4241 Parser.Lex(); // Eat the identifier.
4246 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4247 AArch64MCExpr::VariantKind &ELFRefKind,
4248 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4250 ELFRefKind = AArch64MCExpr::VK_INVALID;
4251 DarwinRefKind = MCSymbolRefExpr::VK_None;
4254 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4255 ELFRefKind = AE->getKind();
4256 Expr = AE->getSubExpr();
4259 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4261 // It's a simple symbol reference with no addend.
4262 DarwinRefKind = SE->getKind();
4266 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4270 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4273 DarwinRefKind = SE->getKind();
4275 if (BE->getOpcode() != MCBinaryExpr::Add &&
4276 BE->getOpcode() != MCBinaryExpr::Sub)
4279 // See if the addend is is a constant, otherwise there's more going
4280 // on here than we can deal with.
4281 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4285 Addend = AddendExpr->getValue();
4286 if (BE->getOpcode() == MCBinaryExpr::Sub)
4289 // It's some symbol reference + a constant addend, but really
4290 // shouldn't use both Darwin and ELF syntax.
4291 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4292 DarwinRefKind == MCSymbolRefExpr::VK_None;
4295 /// Force static initialization.
4296 extern "C" void LLVMInitializeAArch64AsmParser() {
4297 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4298 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4299 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4302 #define GET_REGISTER_MATCHER
4303 #define GET_SUBTARGET_FEATURE_NAME
4304 #define GET_MATCHER_IMPLEMENTATION
4305 #include "AArch64GenAsmMatcher.inc"
4307 // Define this matcher function after the auto-generated include so we
4308 // have the match class enum definitions.
4309 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4311 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4312 // If the kind is a token for a literal immediate, check if our asm
4313 // operand matches. This is for InstAliases which have a fixed-value
4314 // immediate in the syntax.
4315 int64_t ExpectedVal;
4318 return Match_InvalidOperand;
4360 return Match_InvalidOperand;
4361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4363 return Match_InvalidOperand;
4364 if (CE->getValue() == ExpectedVal)
4365 return Match_Success;
4366 return Match_InvalidOperand;
4370 AArch64AsmParser::OperandMatchResultTy
4371 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4375 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4376 Error(S, "expected register");
4377 return MatchOperand_ParseFail;
4380 int FirstReg = tryParseRegister();
4381 if (FirstReg == -1) {
4382 return MatchOperand_ParseFail;
4384 const MCRegisterClass &WRegClass =
4385 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4386 const MCRegisterClass &XRegClass =
4387 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4389 bool isXReg = XRegClass.contains(FirstReg),
4390 isWReg = WRegClass.contains(FirstReg);
4391 if (!isXReg && !isWReg) {
4392 Error(S, "expected first even register of a "
4393 "consecutive same-size even/odd register pair");
4394 return MatchOperand_ParseFail;
4397 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4398 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4400 if (FirstEncoding & 0x1) {
4401 Error(S, "expected first even register of a "
4402 "consecutive same-size even/odd register pair");
4403 return MatchOperand_ParseFail;
4407 if (getParser().getTok().isNot(AsmToken::Comma)) {
4408 Error(M, "expected comma");
4409 return MatchOperand_ParseFail;
4415 int SecondReg = tryParseRegister();
4416 if (SecondReg ==-1) {
4417 return MatchOperand_ParseFail;
4420 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4421 (isXReg && !XRegClass.contains(SecondReg)) ||
4422 (isWReg && !WRegClass.contains(SecondReg))) {
4423 Error(E,"expected second odd register of a "
4424 "consecutive same-size even/odd register pair");
4425 return MatchOperand_ParseFail;
4430 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4431 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4433 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4434 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4437 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4440 return MatchOperand_Success;