1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
48 // Map of register aliases registers via the .req directive.
49 StringMap<std::pair<bool, unsigned> > RegisterReqs;
51 AArch64TargetStreamer &getTargetStreamer() {
52 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53 return static_cast<AArch64TargetStreamer &>(TS);
56 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
58 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62 int tryParseRegister();
63 int tryMatchVectorRegister(StringRef &Kind, bool expected);
64 bool parseRegister(OperandVector &Operands);
65 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66 bool parseVectorList(OperandVector &Operands);
67 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72 bool showMatchError(SMLoc Loc, unsigned ErrCode);
74 bool parseDirectiveWord(unsigned Size, SMLoc L);
75 bool parseDirectiveInst(SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
118 const MCInstrInfo &MII, const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(STI) {
120 MCAsmParserExtension::Initialize(Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
217 uint32_t PStateField;
230 struct ShiftExtendOp {
231 AArch64_AM::ShiftExtendType Type;
233 bool HasExplicitAmount;
243 struct VectorListOp VectorList;
244 struct VectorIndexOp VectorIndex;
246 struct ShiftedImmOp ShiftedImm;
247 struct CondCodeOp CondCode;
248 struct FPImmOp FPImm;
249 struct BarrierOp Barrier;
250 struct SysRegOp SysReg;
251 struct SysCRImmOp SysCRImm;
252 struct PrefetchOp Prefetch;
253 struct ShiftExtendOp ShiftExtend;
256 // Keep the MCContext around as the MCExprs may need manipulated during
257 // the add<>Operands() calls.
261 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265 StartLoc = o.StartLoc;
275 ShiftedImm = o.ShiftedImm;
278 CondCode = o.CondCode;
290 VectorList = o.VectorList;
293 VectorIndex = o.VectorIndex;
299 SysCRImm = o.SysCRImm;
302 Prefetch = o.Prefetch;
305 ShiftExtend = o.ShiftExtend;
310 /// getStartLoc - Get the location of the first token of this operand.
311 SMLoc getStartLoc() const override { return StartLoc; }
312 /// getEndLoc - Get the location of the last token of this operand.
313 SMLoc getEndLoc() const override { return EndLoc; }
315 StringRef getToken() const {
316 assert(Kind == k_Token && "Invalid access!");
317 return StringRef(Tok.Data, Tok.Length);
320 bool isTokenSuffix() const {
321 assert(Kind == k_Token && "Invalid access!");
325 const MCExpr *getImm() const {
326 assert(Kind == k_Immediate && "Invalid access!");
330 const MCExpr *getShiftedImmVal() const {
331 assert(Kind == k_ShiftedImm && "Invalid access!");
332 return ShiftedImm.Val;
335 unsigned getShiftedImmShift() const {
336 assert(Kind == k_ShiftedImm && "Invalid access!");
337 return ShiftedImm.ShiftAmount;
340 AArch64CC::CondCode getCondCode() const {
341 assert(Kind == k_CondCode && "Invalid access!");
342 return CondCode.Code;
345 unsigned getFPImm() const {
346 assert(Kind == k_FPImm && "Invalid access!");
350 unsigned getBarrier() const {
351 assert(Kind == k_Barrier && "Invalid access!");
355 StringRef getBarrierName() const {
356 assert(Kind == k_Barrier && "Invalid access!");
357 return StringRef(Barrier.Data, Barrier.Length);
360 unsigned getReg() const override {
361 assert(Kind == k_Register && "Invalid access!");
365 unsigned getVectorListStart() const {
366 assert(Kind == k_VectorList && "Invalid access!");
367 return VectorList.RegNum;
370 unsigned getVectorListCount() const {
371 assert(Kind == k_VectorList && "Invalid access!");
372 return VectorList.Count;
375 unsigned getVectorIndex() const {
376 assert(Kind == k_VectorIndex && "Invalid access!");
377 return VectorIndex.Val;
380 StringRef getSysReg() const {
381 assert(Kind == k_SysReg && "Invalid access!");
382 return StringRef(SysReg.Data, SysReg.Length);
385 unsigned getSysCR() const {
386 assert(Kind == k_SysCR && "Invalid access!");
390 unsigned getPrefetch() const {
391 assert(Kind == k_Prefetch && "Invalid access!");
395 StringRef getPrefetchName() const {
396 assert(Kind == k_Prefetch && "Invalid access!");
397 return StringRef(Prefetch.Data, Prefetch.Length);
400 AArch64_AM::ShiftExtendType getShiftExtendType() const {
401 assert(Kind == k_ShiftExtend && "Invalid access!");
402 return ShiftExtend.Type;
405 unsigned getShiftExtendAmount() const {
406 assert(Kind == k_ShiftExtend && "Invalid access!");
407 return ShiftExtend.Amount;
410 bool hasShiftExtendAmount() const {
411 assert(Kind == k_ShiftExtend && "Invalid access!");
412 return ShiftExtend.HasExplicitAmount;
415 bool isImm() const override { return Kind == k_Immediate; }
416 bool isMem() const override { return false; }
417 bool isSImm9() const {
420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
423 int64_t Val = MCE->getValue();
424 return (Val >= -256 && Val < 256);
426 bool isSImm7s4() const {
429 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
432 int64_t Val = MCE->getValue();
433 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435 bool isSImm7s8() const {
438 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
441 int64_t Val = MCE->getValue();
442 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444 bool isSImm7s16() const {
447 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
450 int64_t Val = MCE->getValue();
451 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
454 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
455 AArch64MCExpr::VariantKind ELFRefKind;
456 MCSymbolRefExpr::VariantKind DarwinRefKind;
458 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460 // If we don't understand the expression, assume the best and
461 // let the fixup and relocation code deal with it.
465 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
466 ELFRefKind == AArch64MCExpr::VK_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
474 // Note that we don't range-check the addend. It's adjusted modulo page
475 // size when converted, so there is no "out of range" condition when using
477 return Addend >= 0 && (Addend % Scale) == 0;
478 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
480 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
487 template <int Scale> bool isUImm12Offset() const {
491 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 return isSymbolicUImm12Offset(getImm(), Scale);
495 int64_t Val = MCE->getValue();
496 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
499 bool isImm0_7() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 8);
508 bool isImm1_8() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val > 0 && Val < 9);
517 bool isImm0_15() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val >= 0 && Val < 16);
526 bool isImm1_16() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 int64_t Val = MCE->getValue();
533 return (Val > 0 && Val < 17);
535 bool isImm0_31() const {
538 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
541 int64_t Val = MCE->getValue();
542 return (Val >= 0 && Val < 32);
544 bool isImm1_31() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 int64_t Val = MCE->getValue();
551 return (Val >= 1 && Val < 32);
553 bool isImm1_32() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 int64_t Val = MCE->getValue();
560 return (Val >= 1 && Val < 33);
562 bool isImm0_63() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
569 return (Val >= 0 && Val < 64);
571 bool isImm1_63() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
578 return (Val >= 1 && Val < 64);
580 bool isImm1_64() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val >= 1 && Val < 65);
589 bool isImm0_127() const {
592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
595 int64_t Val = MCE->getValue();
596 return (Val >= 0 && Val < 128);
598 bool isImm0_255() const {
601 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604 int64_t Val = MCE->getValue();
605 return (Val >= 0 && Val < 256);
607 bool isImm0_65535() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 int64_t Val = MCE->getValue();
614 return (Val >= 0 && Val < 65536);
616 bool isImm32_63() const {
619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
622 int64_t Val = MCE->getValue();
623 return (Val >= 32 && Val < 64);
625 bool isLogicalImm32() const {
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
631 int64_t Val = MCE->getValue();
632 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
635 return AArch64_AM::isLogicalImmediate(Val, 32);
637 bool isLogicalImm64() const {
640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
643 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
645 bool isLogicalImm32Not() const {
648 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
651 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
652 return AArch64_AM::isLogicalImmediate(Val, 32);
654 bool isLogicalImm64Not() const {
657 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
660 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
662 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
663 bool isAddSubImm() const {
664 if (!isShiftedImm() && !isImm())
669 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
670 if (isShiftedImm()) {
671 unsigned Shift = ShiftedImm.ShiftAmount;
672 Expr = ShiftedImm.Val;
673 if (Shift != 0 && Shift != 12)
679 AArch64MCExpr::VariantKind ELFRefKind;
680 MCSymbolRefExpr::VariantKind DarwinRefKind;
682 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
683 DarwinRefKind, Addend)) {
684 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
685 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
686 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
687 || ELFRefKind == AArch64MCExpr::VK_LO12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
690 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
693 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
694 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
697 // Otherwise it should be a real immediate in range:
698 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
699 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
701 bool isCondCode() const { return Kind == k_CondCode; }
702 bool isSIMDImmType10() const {
705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
708 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
710 bool isBranchTarget26() const {
713 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
716 int64_t Val = MCE->getValue();
719 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
721 bool isPCRelLabel19() const {
724 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
727 int64_t Val = MCE->getValue();
730 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
732 bool isBranchTarget14() const {
735 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738 int64_t Val = MCE->getValue();
741 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
745 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
749 AArch64MCExpr::VariantKind ELFRefKind;
750 MCSymbolRefExpr::VariantKind DarwinRefKind;
752 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
753 DarwinRefKind, Addend)) {
756 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
759 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
760 if (ELFRefKind == AllowedModifiers[i])
767 bool isMovZSymbolG3() const {
768 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
771 bool isMovZSymbolG2() const {
772 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
773 AArch64MCExpr::VK_TPREL_G2,
774 AArch64MCExpr::VK_DTPREL_G2});
777 bool isMovZSymbolG1() const {
778 return isMovWSymbol({
779 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
780 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
781 AArch64MCExpr::VK_DTPREL_G1,
785 bool isMovZSymbolG0() const {
786 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
787 AArch64MCExpr::VK_TPREL_G0,
788 AArch64MCExpr::VK_DTPREL_G0});
791 bool isMovKSymbolG3() const {
792 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
795 bool isMovKSymbolG2() const {
796 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
799 bool isMovKSymbolG1() const {
800 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
801 AArch64MCExpr::VK_TPREL_G1_NC,
802 AArch64MCExpr::VK_DTPREL_G1_NC});
805 bool isMovKSymbolG0() const {
807 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
808 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
811 template<int RegWidth, int Shift>
812 bool isMOVZMovAlias() const {
813 if (!isImm()) return false;
815 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
816 if (!CE) return false;
817 uint64_t Value = CE->getValue();
820 Value &= 0xffffffffULL;
822 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
823 if (Value == 0 && Shift != 0)
826 return (Value & ~(0xffffULL << Shift)) == 0;
829 template<int RegWidth, int Shift>
830 bool isMOVNMovAlias() const {
831 if (!isImm()) return false;
833 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
834 if (!CE) return false;
835 uint64_t Value = CE->getValue();
837 // MOVZ takes precedence over MOVN.
838 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
839 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
844 Value &= 0xffffffffULL;
846 return (Value & ~(0xffffULL << Shift)) == 0;
849 bool isFPImm() const { return Kind == k_FPImm; }
850 bool isBarrier() const { return Kind == k_Barrier; }
851 bool isSysReg() const { return Kind == k_SysReg; }
852 bool isMRSSystemRegister() const {
853 if (!isSysReg()) return false;
855 return SysReg.MRSReg != -1U;
857 bool isMSRSystemRegister() const {
858 if (!isSysReg()) return false;
860 return SysReg.MSRReg != -1U;
862 bool isSystemPStateField() const {
863 if (!isSysReg()) return false;
865 return SysReg.PStateField != -1U;
867 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
868 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
869 bool isVectorRegLo() const {
870 return Kind == k_Register && Reg.isVector &&
871 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
874 bool isGPR32as64() const {
875 return Kind == k_Register && !Reg.isVector &&
876 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
879 bool isGPR64sp0() const {
880 return Kind == k_Register && !Reg.isVector &&
881 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
884 /// Is this a vector list with the type implicit (presumably attached to the
885 /// instruction itself)?
886 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
887 return Kind == k_VectorList && VectorList.Count == NumRegs &&
888 !VectorList.ElementKind;
891 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
892 bool isTypedVectorList() const {
893 if (Kind != k_VectorList)
895 if (VectorList.Count != NumRegs)
897 if (VectorList.ElementKind != ElementKind)
899 return VectorList.NumElements == NumElements;
902 bool isVectorIndex1() const {
903 return Kind == k_VectorIndex && VectorIndex.Val == 1;
905 bool isVectorIndexB() const {
906 return Kind == k_VectorIndex && VectorIndex.Val < 16;
908 bool isVectorIndexH() const {
909 return Kind == k_VectorIndex && VectorIndex.Val < 8;
911 bool isVectorIndexS() const {
912 return Kind == k_VectorIndex && VectorIndex.Val < 4;
914 bool isVectorIndexD() const {
915 return Kind == k_VectorIndex && VectorIndex.Val < 2;
917 bool isToken() const override { return Kind == k_Token; }
918 bool isTokenEqual(StringRef Str) const {
919 return Kind == k_Token && getToken() == Str;
921 bool isSysCR() const { return Kind == k_SysCR; }
922 bool isPrefetch() const { return Kind == k_Prefetch; }
923 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
924 bool isShifter() const {
925 if (!isShiftExtend())
928 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
929 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
930 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
931 ST == AArch64_AM::MSL);
933 bool isExtend() const {
934 if (!isShiftExtend())
937 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
938 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
939 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
940 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
941 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
942 ET == AArch64_AM::LSL) &&
943 getShiftExtendAmount() <= 4;
946 bool isExtend64() const {
949 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
953 bool isExtendLSL64() const {
956 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
957 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
958 ET == AArch64_AM::LSL) &&
959 getShiftExtendAmount() <= 4;
962 template<int Width> bool isMemXExtend() const {
965 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
966 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
967 (getShiftExtendAmount() == Log2_32(Width / 8) ||
968 getShiftExtendAmount() == 0);
971 template<int Width> bool isMemWExtend() const {
974 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
975 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
976 (getShiftExtendAmount() == Log2_32(Width / 8) ||
977 getShiftExtendAmount() == 0);
980 template <unsigned width>
981 bool isArithmeticShifter() const {
985 // An arithmetic shifter is LSL, LSR, or ASR.
986 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
987 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
988 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
991 template <unsigned width>
992 bool isLogicalShifter() const {
996 // A logical shifter is LSL, LSR, ASR or ROR.
997 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
998 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
999 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1000 getShiftExtendAmount() < width;
1003 bool isMovImm32Shifter() const {
1007 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1008 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1009 if (ST != AArch64_AM::LSL)
1011 uint64_t Val = getShiftExtendAmount();
1012 return (Val == 0 || Val == 16);
1015 bool isMovImm64Shifter() const {
1019 // A MOVi shifter is LSL of 0 or 16.
1020 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1021 if (ST != AArch64_AM::LSL)
1023 uint64_t Val = getShiftExtendAmount();
1024 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1027 bool isLogicalVecShifter() const {
1031 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1032 unsigned Shift = getShiftExtendAmount();
1033 return getShiftExtendType() == AArch64_AM::LSL &&
1034 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1037 bool isLogicalVecHalfWordShifter() const {
1038 if (!isLogicalVecShifter())
1041 // A logical vector shifter is a left shift by 0 or 8.
1042 unsigned Shift = getShiftExtendAmount();
1043 return getShiftExtendType() == AArch64_AM::LSL &&
1044 (Shift == 0 || Shift == 8);
1047 bool isMoveVecShifter() const {
1048 if (!isShiftExtend())
1051 // A logical vector shifter is a left shift by 8 or 16.
1052 unsigned Shift = getShiftExtendAmount();
1053 return getShiftExtendType() == AArch64_AM::MSL &&
1054 (Shift == 8 || Shift == 16);
1057 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1058 // to LDUR/STUR when the offset is not legal for the former but is for
1059 // the latter. As such, in addition to checking for being a legal unscaled
1060 // address, also check that it is not a legal scaled address. This avoids
1061 // ambiguity in the matcher.
1063 bool isSImm9OffsetFB() const {
1064 return isSImm9() && !isUImm12Offset<Width / 8>();
1067 bool isAdrpLabel() const {
1068 // Validation was handled during parsing, so we just sanity check that
1069 // something didn't go haywire.
1073 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1074 int64_t Val = CE->getValue();
1075 int64_t Min = - (4096 * (1LL << (21 - 1)));
1076 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1077 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1083 bool isAdrLabel() const {
1084 // Validation was handled during parsing, so we just sanity check that
1085 // something didn't go haywire.
1089 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1090 int64_t Val = CE->getValue();
1091 int64_t Min = - (1LL << (21 - 1));
1092 int64_t Max = ((1LL << (21 - 1)) - 1);
1093 return Val >= Min && Val <= Max;
1099 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1100 // Add as immediates when possible. Null MCExpr = 0.
1102 Inst.addOperand(MCOperand::createImm(0));
1103 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1104 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1106 Inst.addOperand(MCOperand::createExpr(Expr));
1109 void addRegOperands(MCInst &Inst, unsigned N) const {
1110 assert(N == 1 && "Invalid number of operands!");
1111 Inst.addOperand(MCOperand::createReg(getReg()));
1114 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1115 assert(N == 1 && "Invalid number of operands!");
1117 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1119 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1120 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1121 RI->getEncodingValue(getReg()));
1123 Inst.addOperand(MCOperand::createReg(Reg));
1126 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1127 assert(N == 1 && "Invalid number of operands!");
1129 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1130 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1133 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1134 assert(N == 1 && "Invalid number of operands!");
1136 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1137 Inst.addOperand(MCOperand::createReg(getReg()));
1140 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1141 assert(N == 1 && "Invalid number of operands!");
1142 Inst.addOperand(MCOperand::createReg(getReg()));
1145 template <unsigned NumRegs>
1146 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1148 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1149 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1150 unsigned FirstReg = FirstRegs[NumRegs - 1];
1153 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1156 template <unsigned NumRegs>
1157 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1160 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1161 unsigned FirstReg = FirstRegs[NumRegs - 1];
1164 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1167 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1172 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 1 && "Invalid number of operands!");
1174 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1177 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 1 && "Invalid number of operands!");
1179 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1182 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1183 assert(N == 1 && "Invalid number of operands!");
1184 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1187 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1192 void addImmOperands(MCInst &Inst, unsigned N) const {
1193 assert(N == 1 && "Invalid number of operands!");
1194 // If this is a pageoff symrefexpr with an addend, adjust the addend
1195 // to be only the page-offset portion. Otherwise, just add the expr
1197 addExpr(Inst, getImm());
1200 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1201 assert(N == 2 && "Invalid number of operands!");
1202 if (isShiftedImm()) {
1203 addExpr(Inst, getShiftedImmVal());
1204 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1206 addExpr(Inst, getImm());
1207 Inst.addOperand(MCOperand::createImm(0));
1211 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1212 assert(N == 1 && "Invalid number of operands!");
1213 Inst.addOperand(MCOperand::createImm(getCondCode()));
1216 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1220 addExpr(Inst, getImm());
1222 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1225 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1226 addImmOperands(Inst, N);
1230 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1235 Inst.addOperand(MCOperand::createExpr(getImm()));
1238 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1241 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1243 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1244 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1247 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1249 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1250 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1253 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1256 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1259 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1260 assert(N == 1 && "Invalid number of operands!");
1261 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1262 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1265 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1266 assert(N == 1 && "Invalid number of operands!");
1267 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1268 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1271 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1272 assert(N == 1 && "Invalid number of operands!");
1273 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1274 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1277 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1280 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1283 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1286 assert(MCE && "Invalid constant immediate operand!");
1287 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1290 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!");
1292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1293 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1296 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1299 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1302 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!");
1304 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1305 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1308 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1309 assert(N == 1 && "Invalid number of operands!");
1310 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1311 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1314 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1315 assert(N == 1 && "Invalid number of operands!");
1316 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1317 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1320 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1321 assert(N == 1 && "Invalid number of operands!");
1322 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1323 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1326 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1327 assert(N == 1 && "Invalid number of operands!");
1328 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1329 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1332 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1333 assert(N == 1 && "Invalid number of operands!");
1334 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1335 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1338 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1341 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1344 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1345 assert(N == 1 && "Invalid number of operands!");
1346 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1347 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1350 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1351 assert(N == 1 && "Invalid number of operands!");
1352 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1355 Inst.addOperand(MCOperand::createImm(encoding));
1358 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1362 Inst.addOperand(MCOperand::createImm(encoding));
1365 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1366 assert(N == 1 && "Invalid number of operands!");
1367 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1368 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1369 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1370 Inst.addOperand(MCOperand::createImm(encoding));
1373 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1377 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1378 Inst.addOperand(MCOperand::createImm(encoding));
1381 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1382 assert(N == 1 && "Invalid number of operands!");
1383 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1384 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1385 Inst.addOperand(MCOperand::createImm(encoding));
1388 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1389 // Branch operands don't encode the low bits, so shift them off
1390 // here. If it's a label, however, just put it on directly as there's
1391 // not enough information now to do anything.
1392 assert(N == 1 && "Invalid number of operands!");
1393 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1395 addExpr(Inst, getImm());
1398 assert(MCE && "Invalid constant immediate operand!");
1399 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1402 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1409 addExpr(Inst, getImm());
1412 assert(MCE && "Invalid constant immediate operand!");
1413 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1416 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!");
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1423 addExpr(Inst, getImm());
1426 assert(MCE && "Invalid constant immediate operand!");
1427 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1430 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1432 Inst.addOperand(MCOperand::createImm(getFPImm()));
1435 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1437 Inst.addOperand(MCOperand::createImm(getBarrier()));
1440 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1443 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1446 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1447 assert(N == 1 && "Invalid number of operands!");
1449 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1452 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1453 assert(N == 1 && "Invalid number of operands!");
1455 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1458 void addSysCROperands(MCInst &Inst, unsigned N) const {
1459 assert(N == 1 && "Invalid number of operands!");
1460 Inst.addOperand(MCOperand::createImm(getSysCR()));
1463 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1465 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1468 void addShifterOperands(MCInst &Inst, unsigned N) const {
1469 assert(N == 1 && "Invalid number of operands!");
1471 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1472 Inst.addOperand(MCOperand::createImm(Imm));
1475 void addExtendOperands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1477 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1478 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1479 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1480 Inst.addOperand(MCOperand::createImm(Imm));
1483 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1485 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1486 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1487 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1488 Inst.addOperand(MCOperand::createImm(Imm));
1491 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1492 assert(N == 2 && "Invalid number of operands!");
1493 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1494 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1495 Inst.addOperand(MCOperand::createImm(IsSigned));
1496 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1499 // For 8-bit load/store instructions with a register offset, both the
1500 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1501 // they're disambiguated by whether the shift was explicit or implicit rather
1503 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1504 assert(N == 2 && "Invalid number of operands!");
1505 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1506 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1507 Inst.addOperand(MCOperand::createImm(IsSigned));
1508 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1512 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!");
1515 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1516 uint64_t Value = CE->getValue();
1517 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1521 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1524 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1525 uint64_t Value = CE->getValue();
1526 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1529 void print(raw_ostream &OS) const override;
1531 static std::unique_ptr<AArch64Operand>
1532 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1533 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1534 Op->Tok.Data = Str.data();
1535 Op->Tok.Length = Str.size();
1536 Op->Tok.IsSuffix = IsSuffix;
1542 static std::unique_ptr<AArch64Operand>
1543 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1544 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1545 Op->Reg.RegNum = RegNum;
1546 Op->Reg.isVector = isVector;
1552 static std::unique_ptr<AArch64Operand>
1553 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1554 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1555 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1556 Op->VectorList.RegNum = RegNum;
1557 Op->VectorList.Count = Count;
1558 Op->VectorList.NumElements = NumElements;
1559 Op->VectorList.ElementKind = ElementKind;
1565 static std::unique_ptr<AArch64Operand>
1566 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1567 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1568 Op->VectorIndex.Val = Idx;
1574 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1575 SMLoc E, MCContext &Ctx) {
1576 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1583 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1584 unsigned ShiftAmount,
1587 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1588 Op->ShiftedImm .Val = Val;
1589 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1595 static std::unique_ptr<AArch64Operand>
1596 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1597 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1598 Op->CondCode.Code = Code;
1604 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1606 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1607 Op->FPImm.Val = Val;
1613 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1617 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1618 Op->Barrier.Val = Val;
1619 Op->Barrier.Data = Str.data();
1620 Op->Barrier.Length = Str.size();
1626 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1629 uint32_t PStateField,
1631 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1632 Op->SysReg.Data = Str.data();
1633 Op->SysReg.Length = Str.size();
1634 Op->SysReg.MRSReg = MRSReg;
1635 Op->SysReg.MSRReg = MSRReg;
1636 Op->SysReg.PStateField = PStateField;
1642 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1643 SMLoc E, MCContext &Ctx) {
1644 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1645 Op->SysCRImm.Val = Val;
1651 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1655 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1656 Op->Prefetch.Val = Val;
1657 Op->Barrier.Data = Str.data();
1658 Op->Barrier.Length = Str.size();
1664 static std::unique_ptr<AArch64Operand>
1665 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1666 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1667 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1668 Op->ShiftExtend.Type = ShOp;
1669 Op->ShiftExtend.Amount = Val;
1670 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1677 } // end anonymous namespace.
1679 void AArch64Operand::print(raw_ostream &OS) const {
1682 OS << "<fpimm " << getFPImm() << "("
1683 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1686 StringRef Name = getBarrierName();
1688 OS << "<barrier " << Name << ">";
1690 OS << "<barrier invalid #" << getBarrier() << ">";
1696 case k_ShiftedImm: {
1697 unsigned Shift = getShiftedImmShift();
1698 OS << "<shiftedimm ";
1699 OS << *getShiftedImmVal();
1700 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1704 OS << "<condcode " << getCondCode() << ">";
1707 OS << "<register " << getReg() << ">";
1709 case k_VectorList: {
1710 OS << "<vectorlist ";
1711 unsigned Reg = getVectorListStart();
1712 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1713 OS << Reg + i << " ";
1718 OS << "<vectorindex " << getVectorIndex() << ">";
1721 OS << "<sysreg: " << getSysReg() << '>';
1724 OS << "'" << getToken() << "'";
1727 OS << "c" << getSysCR();
1730 StringRef Name = getPrefetchName();
1732 OS << "<prfop " << Name << ">";
1734 OS << "<prfop invalid #" << getPrefetch() << ">";
1737 case k_ShiftExtend: {
1738 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1739 << getShiftExtendAmount();
1740 if (!hasShiftExtendAmount())
1748 /// @name Auto-generated Match Functions
1751 static unsigned MatchRegisterName(StringRef Name);
1755 static unsigned matchVectorRegName(StringRef Name) {
1756 return StringSwitch<unsigned>(Name)
1757 .Case("v0", AArch64::Q0)
1758 .Case("v1", AArch64::Q1)
1759 .Case("v2", AArch64::Q2)
1760 .Case("v3", AArch64::Q3)
1761 .Case("v4", AArch64::Q4)
1762 .Case("v5", AArch64::Q5)
1763 .Case("v6", AArch64::Q6)
1764 .Case("v7", AArch64::Q7)
1765 .Case("v8", AArch64::Q8)
1766 .Case("v9", AArch64::Q9)
1767 .Case("v10", AArch64::Q10)
1768 .Case("v11", AArch64::Q11)
1769 .Case("v12", AArch64::Q12)
1770 .Case("v13", AArch64::Q13)
1771 .Case("v14", AArch64::Q14)
1772 .Case("v15", AArch64::Q15)
1773 .Case("v16", AArch64::Q16)
1774 .Case("v17", AArch64::Q17)
1775 .Case("v18", AArch64::Q18)
1776 .Case("v19", AArch64::Q19)
1777 .Case("v20", AArch64::Q20)
1778 .Case("v21", AArch64::Q21)
1779 .Case("v22", AArch64::Q22)
1780 .Case("v23", AArch64::Q23)
1781 .Case("v24", AArch64::Q24)
1782 .Case("v25", AArch64::Q25)
1783 .Case("v26", AArch64::Q26)
1784 .Case("v27", AArch64::Q27)
1785 .Case("v28", AArch64::Q28)
1786 .Case("v29", AArch64::Q29)
1787 .Case("v30", AArch64::Q30)
1788 .Case("v31", AArch64::Q31)
1792 static bool isValidVectorKind(StringRef Name) {
1793 return StringSwitch<bool>(Name.lower())
1803 // Accept the width neutral ones, too, for verbose syntax. If those
1804 // aren't used in the right places, the token operand won't match so
1805 // all will work out.
1813 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1814 char &ElementKind) {
1815 assert(isValidVectorKind(Name));
1817 ElementKind = Name.lower()[Name.size() - 1];
1820 if (Name.size() == 2)
1823 // Parse the lane count
1824 Name = Name.drop_front();
1825 while (isdigit(Name.front())) {
1826 NumElements = 10 * NumElements + (Name.front() - '0');
1827 Name = Name.drop_front();
1831 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1833 StartLoc = getLoc();
1834 RegNo = tryParseRegister();
1835 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1836 return (RegNo == (unsigned)-1);
1839 // Matches a register name or register alias previously defined by '.req'
1840 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1842 unsigned RegNum = isVector ? matchVectorRegName(Name)
1843 : MatchRegisterName(Name);
1846 // Check for aliases registered via .req. Canonicalize to lower case.
1847 // That's more consistent since register names are case insensitive, and
1848 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1849 auto Entry = RegisterReqs.find(Name.lower());
1850 if (Entry == RegisterReqs.end())
1852 // set RegNum if the match is the right kind of register
1853 if (isVector == Entry->getValue().first)
1854 RegNum = Entry->getValue().second;
1859 /// tryParseRegister - Try to parse a register name. The token must be an
1860 /// Identifier when called, and if it is a register name the token is eaten and
1861 /// the register is added to the operand list.
1862 int AArch64AsmParser::tryParseRegister() {
1863 MCAsmParser &Parser = getParser();
1864 const AsmToken &Tok = Parser.getTok();
1865 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1867 std::string lowerCase = Tok.getString().lower();
1868 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1869 // Also handle a few aliases of registers.
1871 RegNum = StringSwitch<unsigned>(lowerCase)
1872 .Case("fp", AArch64::FP)
1873 .Case("lr", AArch64::LR)
1874 .Case("x31", AArch64::XZR)
1875 .Case("w31", AArch64::WZR)
1881 Parser.Lex(); // Eat identifier token.
1885 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1886 /// kind specifier. If it is a register specifier, eat the token and return it.
1887 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1888 MCAsmParser &Parser = getParser();
1889 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1890 TokError("vector register expected");
1894 StringRef Name = Parser.getTok().getString();
1895 // If there is a kind specifier, it's separated from the register name by
1897 size_t Start = 0, Next = Name.find('.');
1898 StringRef Head = Name.slice(Start, Next);
1899 unsigned RegNum = matchRegisterNameAlias(Head, true);
1902 if (Next != StringRef::npos) {
1903 Kind = Name.slice(Next, StringRef::npos);
1904 if (!isValidVectorKind(Kind)) {
1905 TokError("invalid vector kind qualifier");
1909 Parser.Lex(); // Eat the register token.
1914 TokError("vector register expected");
1918 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1919 AArch64AsmParser::OperandMatchResultTy
1920 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1921 MCAsmParser &Parser = getParser();
1924 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1925 Error(S, "Expected cN operand where 0 <= N <= 15");
1926 return MatchOperand_ParseFail;
1929 StringRef Tok = Parser.getTok().getIdentifier();
1930 if (Tok[0] != 'c' && Tok[0] != 'C') {
1931 Error(S, "Expected cN operand where 0 <= N <= 15");
1932 return MatchOperand_ParseFail;
1936 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1937 if (BadNum || CRNum > 15) {
1938 Error(S, "Expected cN operand where 0 <= N <= 15");
1939 return MatchOperand_ParseFail;
1942 Parser.Lex(); // Eat identifier token.
1944 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1945 return MatchOperand_Success;
1948 /// tryParsePrefetch - Try to parse a prefetch operand.
1949 AArch64AsmParser::OperandMatchResultTy
1950 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1951 MCAsmParser &Parser = getParser();
1953 const AsmToken &Tok = Parser.getTok();
1954 // Either an identifier for named values or a 5-bit immediate.
1955 bool Hash = Tok.is(AsmToken::Hash);
1956 if (Hash || Tok.is(AsmToken::Integer)) {
1958 Parser.Lex(); // Eat hash token.
1959 const MCExpr *ImmVal;
1960 if (getParser().parseExpression(ImmVal))
1961 return MatchOperand_ParseFail;
1963 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1965 TokError("immediate value expected for prefetch operand");
1966 return MatchOperand_ParseFail;
1968 unsigned prfop = MCE->getValue();
1970 TokError("prefetch operand out of range, [0,31] expected");
1971 return MatchOperand_ParseFail;
1975 auto Mapper = AArch64PRFM::PRFMMapper();
1977 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
1978 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1980 return MatchOperand_Success;
1983 if (Tok.isNot(AsmToken::Identifier)) {
1984 TokError("pre-fetch hint expected");
1985 return MatchOperand_ParseFail;
1989 auto Mapper = AArch64PRFM::PRFMMapper();
1991 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
1993 TokError("pre-fetch hint expected");
1994 return MatchOperand_ParseFail;
1997 Parser.Lex(); // Eat identifier token.
1998 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2000 return MatchOperand_Success;
2003 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2005 AArch64AsmParser::OperandMatchResultTy
2006 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2007 MCAsmParser &Parser = getParser();
2011 if (Parser.getTok().is(AsmToken::Hash)) {
2012 Parser.Lex(); // Eat hash token.
2015 if (parseSymbolicImmVal(Expr))
2016 return MatchOperand_ParseFail;
2018 AArch64MCExpr::VariantKind ELFRefKind;
2019 MCSymbolRefExpr::VariantKind DarwinRefKind;
2021 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2022 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2023 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2024 // No modifier was specified at all; this is the syntax for an ELF basic
2025 // ADRP relocation (unfortunately).
2027 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2028 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2029 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2031 Error(S, "gotpage label reference not allowed an addend");
2032 return MatchOperand_ParseFail;
2033 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2034 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2035 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2036 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2037 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2038 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2039 // The operand must be an @page or @gotpage qualified symbolref.
2040 Error(S, "page or gotpage label reference expected");
2041 return MatchOperand_ParseFail;
2045 // We have either a label reference possibly with addend or an immediate. The
2046 // addend is a raw value here. The linker will adjust it to only reference the
2048 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2049 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2051 return MatchOperand_Success;
2054 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2056 AArch64AsmParser::OperandMatchResultTy
2057 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2058 MCAsmParser &Parser = getParser();
2062 if (Parser.getTok().is(AsmToken::Hash)) {
2063 Parser.Lex(); // Eat hash token.
2066 if (getParser().parseExpression(Expr))
2067 return MatchOperand_ParseFail;
2069 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2070 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2072 return MatchOperand_Success;
2075 /// tryParseFPImm - A floating point immediate expression operand.
2076 AArch64AsmParser::OperandMatchResultTy
2077 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2078 MCAsmParser &Parser = getParser();
2082 if (Parser.getTok().is(AsmToken::Hash)) {
2083 Parser.Lex(); // Eat '#'
2087 // Handle negation, as that still comes through as a separate token.
2088 bool isNegative = false;
2089 if (Parser.getTok().is(AsmToken::Minus)) {
2093 const AsmToken &Tok = Parser.getTok();
2094 if (Tok.is(AsmToken::Real)) {
2095 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2097 RealVal.changeSign();
2099 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2100 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2101 Parser.Lex(); // Eat the token.
2102 // Check for out of range values. As an exception, we let Zero through,
2103 // as we handle that special case in post-processing before matching in
2104 // order to use the zero register for it.
2105 if (Val == -1 && !RealVal.isPosZero()) {
2106 TokError("expected compatible register or floating-point constant");
2107 return MatchOperand_ParseFail;
2109 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2110 return MatchOperand_Success;
2112 if (Tok.is(AsmToken::Integer)) {
2114 if (!isNegative && Tok.getString().startswith("0x")) {
2115 Val = Tok.getIntVal();
2116 if (Val > 255 || Val < 0) {
2117 TokError("encoded floating point value out of range");
2118 return MatchOperand_ParseFail;
2121 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2122 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2123 // If we had a '-' in front, toggle the sign bit.
2124 IntVal ^= (uint64_t)isNegative << 63;
2125 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2127 Parser.Lex(); // Eat the token.
2128 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2129 return MatchOperand_Success;
2133 return MatchOperand_NoMatch;
2135 TokError("invalid floating point immediate");
2136 return MatchOperand_ParseFail;
2139 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2140 AArch64AsmParser::OperandMatchResultTy
2141 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2142 MCAsmParser &Parser = getParser();
2145 if (Parser.getTok().is(AsmToken::Hash))
2146 Parser.Lex(); // Eat '#'
2147 else if (Parser.getTok().isNot(AsmToken::Integer))
2148 // Operand should start from # or should be integer, emit error otherwise.
2149 return MatchOperand_NoMatch;
2152 if (parseSymbolicImmVal(Imm))
2153 return MatchOperand_ParseFail;
2154 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2155 uint64_t ShiftAmount = 0;
2156 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2158 int64_t Val = MCE->getValue();
2159 if (Val > 0xfff && (Val & 0xfff) == 0) {
2160 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2164 SMLoc E = Parser.getTok().getLoc();
2165 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2167 return MatchOperand_Success;
2173 // The optional operand must be "lsl #N" where N is non-negative.
2174 if (!Parser.getTok().is(AsmToken::Identifier) ||
2175 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2176 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2177 return MatchOperand_ParseFail;
2183 if (Parser.getTok().is(AsmToken::Hash)) {
2187 if (Parser.getTok().isNot(AsmToken::Integer)) {
2188 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2189 return MatchOperand_ParseFail;
2192 int64_t ShiftAmount = Parser.getTok().getIntVal();
2194 if (ShiftAmount < 0) {
2195 Error(Parser.getTok().getLoc(), "positive shift amount required");
2196 return MatchOperand_ParseFail;
2198 Parser.Lex(); // Eat the number
2200 SMLoc E = Parser.getTok().getLoc();
2201 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2202 S, E, getContext()));
2203 return MatchOperand_Success;
2206 /// parseCondCodeString - Parse a Condition Code string.
2207 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2208 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2209 .Case("eq", AArch64CC::EQ)
2210 .Case("ne", AArch64CC::NE)
2211 .Case("cs", AArch64CC::HS)
2212 .Case("hs", AArch64CC::HS)
2213 .Case("cc", AArch64CC::LO)
2214 .Case("lo", AArch64CC::LO)
2215 .Case("mi", AArch64CC::MI)
2216 .Case("pl", AArch64CC::PL)
2217 .Case("vs", AArch64CC::VS)
2218 .Case("vc", AArch64CC::VC)
2219 .Case("hi", AArch64CC::HI)
2220 .Case("ls", AArch64CC::LS)
2221 .Case("ge", AArch64CC::GE)
2222 .Case("lt", AArch64CC::LT)
2223 .Case("gt", AArch64CC::GT)
2224 .Case("le", AArch64CC::LE)
2225 .Case("al", AArch64CC::AL)
2226 .Case("nv", AArch64CC::NV)
2227 .Default(AArch64CC::Invalid);
2231 /// parseCondCode - Parse a Condition Code operand.
2232 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2233 bool invertCondCode) {
2234 MCAsmParser &Parser = getParser();
2236 const AsmToken &Tok = Parser.getTok();
2237 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2239 StringRef Cond = Tok.getString();
2240 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2241 if (CC == AArch64CC::Invalid)
2242 return TokError("invalid condition code");
2243 Parser.Lex(); // Eat identifier token.
2245 if (invertCondCode) {
2246 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2247 return TokError("condition codes AL and NV are invalid for this instruction");
2248 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2252 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2256 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2257 /// them if present.
2258 AArch64AsmParser::OperandMatchResultTy
2259 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2260 MCAsmParser &Parser = getParser();
2261 const AsmToken &Tok = Parser.getTok();
2262 std::string LowerID = Tok.getString().lower();
2263 AArch64_AM::ShiftExtendType ShOp =
2264 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2265 .Case("lsl", AArch64_AM::LSL)
2266 .Case("lsr", AArch64_AM::LSR)
2267 .Case("asr", AArch64_AM::ASR)
2268 .Case("ror", AArch64_AM::ROR)
2269 .Case("msl", AArch64_AM::MSL)
2270 .Case("uxtb", AArch64_AM::UXTB)
2271 .Case("uxth", AArch64_AM::UXTH)
2272 .Case("uxtw", AArch64_AM::UXTW)
2273 .Case("uxtx", AArch64_AM::UXTX)
2274 .Case("sxtb", AArch64_AM::SXTB)
2275 .Case("sxth", AArch64_AM::SXTH)
2276 .Case("sxtw", AArch64_AM::SXTW)
2277 .Case("sxtx", AArch64_AM::SXTX)
2278 .Default(AArch64_AM::InvalidShiftExtend);
2280 if (ShOp == AArch64_AM::InvalidShiftExtend)
2281 return MatchOperand_NoMatch;
2283 SMLoc S = Tok.getLoc();
2286 bool Hash = getLexer().is(AsmToken::Hash);
2287 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2288 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2289 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2290 ShOp == AArch64_AM::MSL) {
2291 // We expect a number here.
2292 TokError("expected #imm after shift specifier");
2293 return MatchOperand_ParseFail;
2296 // "extend" type operatoins don't need an immediate, #0 is implicit.
2297 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2299 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2300 return MatchOperand_Success;
2304 Parser.Lex(); // Eat the '#'.
2306 // Make sure we do actually have a number or a parenthesized expression.
2307 SMLoc E = Parser.getTok().getLoc();
2308 if (!Parser.getTok().is(AsmToken::Integer) &&
2309 !Parser.getTok().is(AsmToken::LParen)) {
2310 Error(E, "expected integer shift amount");
2311 return MatchOperand_ParseFail;
2314 const MCExpr *ImmVal;
2315 if (getParser().parseExpression(ImmVal))
2316 return MatchOperand_ParseFail;
2318 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2320 Error(E, "expected constant '#imm' after shift specifier");
2321 return MatchOperand_ParseFail;
2324 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2325 Operands.push_back(AArch64Operand::CreateShiftExtend(
2326 ShOp, MCE->getValue(), true, S, E, getContext()));
2327 return MatchOperand_Success;
2330 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2331 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2332 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2333 OperandVector &Operands) {
2334 if (Name.find('.') != StringRef::npos)
2335 return TokError("invalid operand");
2339 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2341 MCAsmParser &Parser = getParser();
2342 const AsmToken &Tok = Parser.getTok();
2343 StringRef Op = Tok.getString();
2344 SMLoc S = Tok.getLoc();
2346 const MCExpr *Expr = nullptr;
2348 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2350 Expr = MCConstantExpr::Create(op1, getContext()); \
2351 Operands.push_back( \
2352 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2353 Operands.push_back( \
2354 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2355 Operands.push_back( \
2356 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2357 Expr = MCConstantExpr::Create(op2, getContext()); \
2358 Operands.push_back( \
2359 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2362 if (Mnemonic == "ic") {
2363 if (!Op.compare_lower("ialluis")) {
2364 // SYS #0, C7, C1, #0
2365 SYS_ALIAS(0, 7, 1, 0);
2366 } else if (!Op.compare_lower("iallu")) {
2367 // SYS #0, C7, C5, #0
2368 SYS_ALIAS(0, 7, 5, 0);
2369 } else if (!Op.compare_lower("ivau")) {
2370 // SYS #3, C7, C5, #1
2371 SYS_ALIAS(3, 7, 5, 1);
2373 return TokError("invalid operand for IC instruction");
2375 } else if (Mnemonic == "dc") {
2376 if (!Op.compare_lower("zva")) {
2377 // SYS #3, C7, C4, #1
2378 SYS_ALIAS(3, 7, 4, 1);
2379 } else if (!Op.compare_lower("ivac")) {
2380 // SYS #3, C7, C6, #1
2381 SYS_ALIAS(0, 7, 6, 1);
2382 } else if (!Op.compare_lower("isw")) {
2383 // SYS #0, C7, C6, #2
2384 SYS_ALIAS(0, 7, 6, 2);
2385 } else if (!Op.compare_lower("cvac")) {
2386 // SYS #3, C7, C10, #1
2387 SYS_ALIAS(3, 7, 10, 1);
2388 } else if (!Op.compare_lower("csw")) {
2389 // SYS #0, C7, C10, #2
2390 SYS_ALIAS(0, 7, 10, 2);
2391 } else if (!Op.compare_lower("cvau")) {
2392 // SYS #3, C7, C11, #1
2393 SYS_ALIAS(3, 7, 11, 1);
2394 } else if (!Op.compare_lower("civac")) {
2395 // SYS #3, C7, C14, #1
2396 SYS_ALIAS(3, 7, 14, 1);
2397 } else if (!Op.compare_lower("cisw")) {
2398 // SYS #0, C7, C14, #2
2399 SYS_ALIAS(0, 7, 14, 2);
2401 return TokError("invalid operand for DC instruction");
2403 } else if (Mnemonic == "at") {
2404 if (!Op.compare_lower("s1e1r")) {
2405 // SYS #0, C7, C8, #0
2406 SYS_ALIAS(0, 7, 8, 0);
2407 } else if (!Op.compare_lower("s1e2r")) {
2408 // SYS #4, C7, C8, #0
2409 SYS_ALIAS(4, 7, 8, 0);
2410 } else if (!Op.compare_lower("s1e3r")) {
2411 // SYS #6, C7, C8, #0
2412 SYS_ALIAS(6, 7, 8, 0);
2413 } else if (!Op.compare_lower("s1e1w")) {
2414 // SYS #0, C7, C8, #1
2415 SYS_ALIAS(0, 7, 8, 1);
2416 } else if (!Op.compare_lower("s1e2w")) {
2417 // SYS #4, C7, C8, #1
2418 SYS_ALIAS(4, 7, 8, 1);
2419 } else if (!Op.compare_lower("s1e3w")) {
2420 // SYS #6, C7, C8, #1
2421 SYS_ALIAS(6, 7, 8, 1);
2422 } else if (!Op.compare_lower("s1e0r")) {
2423 // SYS #0, C7, C8, #3
2424 SYS_ALIAS(0, 7, 8, 2);
2425 } else if (!Op.compare_lower("s1e0w")) {
2426 // SYS #0, C7, C8, #3
2427 SYS_ALIAS(0, 7, 8, 3);
2428 } else if (!Op.compare_lower("s12e1r")) {
2429 // SYS #4, C7, C8, #4
2430 SYS_ALIAS(4, 7, 8, 4);
2431 } else if (!Op.compare_lower("s12e1w")) {
2432 // SYS #4, C7, C8, #5
2433 SYS_ALIAS(4, 7, 8, 5);
2434 } else if (!Op.compare_lower("s12e0r")) {
2435 // SYS #4, C7, C8, #6
2436 SYS_ALIAS(4, 7, 8, 6);
2437 } else if (!Op.compare_lower("s12e0w")) {
2438 // SYS #4, C7, C8, #7
2439 SYS_ALIAS(4, 7, 8, 7);
2441 return TokError("invalid operand for AT instruction");
2443 } else if (Mnemonic == "tlbi") {
2444 if (!Op.compare_lower("vmalle1is")) {
2445 // SYS #0, C8, C3, #0
2446 SYS_ALIAS(0, 8, 3, 0);
2447 } else if (!Op.compare_lower("alle2is")) {
2448 // SYS #4, C8, C3, #0
2449 SYS_ALIAS(4, 8, 3, 0);
2450 } else if (!Op.compare_lower("alle3is")) {
2451 // SYS #6, C8, C3, #0
2452 SYS_ALIAS(6, 8, 3, 0);
2453 } else if (!Op.compare_lower("vae1is")) {
2454 // SYS #0, C8, C3, #1
2455 SYS_ALIAS(0, 8, 3, 1);
2456 } else if (!Op.compare_lower("vae2is")) {
2457 // SYS #4, C8, C3, #1
2458 SYS_ALIAS(4, 8, 3, 1);
2459 } else if (!Op.compare_lower("vae3is")) {
2460 // SYS #6, C8, C3, #1
2461 SYS_ALIAS(6, 8, 3, 1);
2462 } else if (!Op.compare_lower("aside1is")) {
2463 // SYS #0, C8, C3, #2
2464 SYS_ALIAS(0, 8, 3, 2);
2465 } else if (!Op.compare_lower("vaae1is")) {
2466 // SYS #0, C8, C3, #3
2467 SYS_ALIAS(0, 8, 3, 3);
2468 } else if (!Op.compare_lower("alle1is")) {
2469 // SYS #4, C8, C3, #4
2470 SYS_ALIAS(4, 8, 3, 4);
2471 } else if (!Op.compare_lower("vale1is")) {
2472 // SYS #0, C8, C3, #5
2473 SYS_ALIAS(0, 8, 3, 5);
2474 } else if (!Op.compare_lower("vaale1is")) {
2475 // SYS #0, C8, C3, #7
2476 SYS_ALIAS(0, 8, 3, 7);
2477 } else if (!Op.compare_lower("vmalle1")) {
2478 // SYS #0, C8, C7, #0
2479 SYS_ALIAS(0, 8, 7, 0);
2480 } else if (!Op.compare_lower("alle2")) {
2481 // SYS #4, C8, C7, #0
2482 SYS_ALIAS(4, 8, 7, 0);
2483 } else if (!Op.compare_lower("vale2is")) {
2484 // SYS #4, C8, C3, #5
2485 SYS_ALIAS(4, 8, 3, 5);
2486 } else if (!Op.compare_lower("vale3is")) {
2487 // SYS #6, C8, C3, #5
2488 SYS_ALIAS(6, 8, 3, 5);
2489 } else if (!Op.compare_lower("alle3")) {
2490 // SYS #6, C8, C7, #0
2491 SYS_ALIAS(6, 8, 7, 0);
2492 } else if (!Op.compare_lower("vae1")) {
2493 // SYS #0, C8, C7, #1
2494 SYS_ALIAS(0, 8, 7, 1);
2495 } else if (!Op.compare_lower("vae2")) {
2496 // SYS #4, C8, C7, #1
2497 SYS_ALIAS(4, 8, 7, 1);
2498 } else if (!Op.compare_lower("vae3")) {
2499 // SYS #6, C8, C7, #1
2500 SYS_ALIAS(6, 8, 7, 1);
2501 } else if (!Op.compare_lower("aside1")) {
2502 // SYS #0, C8, C7, #2
2503 SYS_ALIAS(0, 8, 7, 2);
2504 } else if (!Op.compare_lower("vaae1")) {
2505 // SYS #0, C8, C7, #3
2506 SYS_ALIAS(0, 8, 7, 3);
2507 } else if (!Op.compare_lower("alle1")) {
2508 // SYS #4, C8, C7, #4
2509 SYS_ALIAS(4, 8, 7, 4);
2510 } else if (!Op.compare_lower("vale1")) {
2511 // SYS #0, C8, C7, #5
2512 SYS_ALIAS(0, 8, 7, 5);
2513 } else if (!Op.compare_lower("vale2")) {
2514 // SYS #4, C8, C7, #5
2515 SYS_ALIAS(4, 8, 7, 5);
2516 } else if (!Op.compare_lower("vale3")) {
2517 // SYS #6, C8, C7, #5
2518 SYS_ALIAS(6, 8, 7, 5);
2519 } else if (!Op.compare_lower("vaale1")) {
2520 // SYS #0, C8, C7, #7
2521 SYS_ALIAS(0, 8, 7, 7);
2522 } else if (!Op.compare_lower("ipas2e1")) {
2523 // SYS #4, C8, C4, #1
2524 SYS_ALIAS(4, 8, 4, 1);
2525 } else if (!Op.compare_lower("ipas2le1")) {
2526 // SYS #4, C8, C4, #5
2527 SYS_ALIAS(4, 8, 4, 5);
2528 } else if (!Op.compare_lower("ipas2e1is")) {
2529 // SYS #4, C8, C4, #1
2530 SYS_ALIAS(4, 8, 0, 1);
2531 } else if (!Op.compare_lower("ipas2le1is")) {
2532 // SYS #4, C8, C4, #5
2533 SYS_ALIAS(4, 8, 0, 5);
2534 } else if (!Op.compare_lower("vmalls12e1")) {
2535 // SYS #4, C8, C7, #6
2536 SYS_ALIAS(4, 8, 7, 6);
2537 } else if (!Op.compare_lower("vmalls12e1is")) {
2538 // SYS #4, C8, C3, #6
2539 SYS_ALIAS(4, 8, 3, 6);
2541 return TokError("invalid operand for TLBI instruction");
2547 Parser.Lex(); // Eat operand.
2549 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2550 bool HasRegister = false;
2552 // Check for the optional register operand.
2553 if (getLexer().is(AsmToken::Comma)) {
2554 Parser.Lex(); // Eat comma.
2556 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2557 return TokError("expected register operand");
2562 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2563 Parser.eatToEndOfStatement();
2564 return TokError("unexpected token in argument list");
2567 if (ExpectRegister && !HasRegister) {
2568 return TokError("specified " + Mnemonic + " op requires a register");
2570 else if (!ExpectRegister && HasRegister) {
2571 return TokError("specified " + Mnemonic + " op does not use a register");
2574 Parser.Lex(); // Consume the EndOfStatement
2578 AArch64AsmParser::OperandMatchResultTy
2579 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2580 MCAsmParser &Parser = getParser();
2581 const AsmToken &Tok = Parser.getTok();
2583 // Can be either a #imm style literal or an option name
2584 bool Hash = Tok.is(AsmToken::Hash);
2585 if (Hash || Tok.is(AsmToken::Integer)) {
2586 // Immediate operand.
2588 Parser.Lex(); // Eat the '#'
2589 const MCExpr *ImmVal;
2590 SMLoc ExprLoc = getLoc();
2591 if (getParser().parseExpression(ImmVal))
2592 return MatchOperand_ParseFail;
2593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2595 Error(ExprLoc, "immediate value expected for barrier operand");
2596 return MatchOperand_ParseFail;
2598 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2599 Error(ExprLoc, "barrier operand out of range");
2600 return MatchOperand_ParseFail;
2603 auto Mapper = AArch64DB::DBarrierMapper();
2605 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2606 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2607 ExprLoc, getContext()));
2608 return MatchOperand_Success;
2611 if (Tok.isNot(AsmToken::Identifier)) {
2612 TokError("invalid operand for instruction");
2613 return MatchOperand_ParseFail;
2617 auto Mapper = AArch64DB::DBarrierMapper();
2619 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2621 TokError("invalid barrier option name");
2622 return MatchOperand_ParseFail;
2625 // The only valid named option for ISB is 'sy'
2626 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2627 TokError("'sy' or #imm operand expected");
2628 return MatchOperand_ParseFail;
2631 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2632 getLoc(), getContext()));
2633 Parser.Lex(); // Consume the option
2635 return MatchOperand_Success;
2638 AArch64AsmParser::OperandMatchResultTy
2639 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2640 MCAsmParser &Parser = getParser();
2641 const AsmToken &Tok = Parser.getTok();
2643 if (Tok.isNot(AsmToken::Identifier))
2644 return MatchOperand_NoMatch;
2647 auto MRSMapper = AArch64SysReg::MRSMapper();
2648 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2650 assert(IsKnown == (MRSReg != -1U) &&
2651 "register should be -1 if and only if it's unknown");
2653 auto MSRMapper = AArch64SysReg::MSRMapper();
2654 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2656 assert(IsKnown == (MSRReg != -1U) &&
2657 "register should be -1 if and only if it's unknown");
2659 auto PStateMapper = AArch64PState::PStateMapper();
2660 uint32_t PStateField =
2661 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2662 assert(IsKnown == (PStateField != -1U) &&
2663 "register should be -1 if and only if it's unknown");
2665 Operands.push_back(AArch64Operand::CreateSysReg(
2666 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2667 Parser.Lex(); // Eat identifier
2669 return MatchOperand_Success;
2672 /// tryParseVectorRegister - Parse a vector register operand.
2673 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2674 MCAsmParser &Parser = getParser();
2675 if (Parser.getTok().isNot(AsmToken::Identifier))
2679 // Check for a vector register specifier first.
2681 int64_t Reg = tryMatchVectorRegister(Kind, false);
2685 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2686 // If there was an explicit qualifier, that goes on as a literal text
2690 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2692 // If there is an index specifier following the register, parse that too.
2693 if (Parser.getTok().is(AsmToken::LBrac)) {
2694 SMLoc SIdx = getLoc();
2695 Parser.Lex(); // Eat left bracket token.
2697 const MCExpr *ImmVal;
2698 if (getParser().parseExpression(ImmVal))
2700 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2702 TokError("immediate value expected for vector index");
2707 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2708 Error(E, "']' expected");
2712 Parser.Lex(); // Eat right bracket token.
2714 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2721 /// parseRegister - Parse a non-vector register operand.
2722 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2723 MCAsmParser &Parser = getParser();
2725 // Try for a vector register.
2726 if (!tryParseVectorRegister(Operands))
2729 // Try for a scalar register.
2730 int64_t Reg = tryParseRegister();
2734 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2736 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2737 // as a string token in the instruction itself.
2738 if (getLexer().getKind() == AsmToken::LBrac) {
2739 SMLoc LBracS = getLoc();
2741 const AsmToken &Tok = Parser.getTok();
2742 if (Tok.is(AsmToken::Integer)) {
2743 SMLoc IntS = getLoc();
2744 int64_t Val = Tok.getIntVal();
2747 if (getLexer().getKind() == AsmToken::RBrac) {
2748 SMLoc RBracS = getLoc();
2751 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2753 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2755 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2765 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2766 MCAsmParser &Parser = getParser();
2767 bool HasELFModifier = false;
2768 AArch64MCExpr::VariantKind RefKind;
2770 if (Parser.getTok().is(AsmToken::Colon)) {
2771 Parser.Lex(); // Eat ':"
2772 HasELFModifier = true;
2774 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2775 Error(Parser.getTok().getLoc(),
2776 "expect relocation specifier in operand after ':'");
2780 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2781 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2782 .Case("lo12", AArch64MCExpr::VK_LO12)
2783 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2784 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2785 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2786 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2787 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2788 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2789 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2790 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2791 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2792 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2793 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2794 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2795 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2796 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2797 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2798 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2799 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2800 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2801 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2802 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2803 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2804 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2805 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2806 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2807 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2808 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2809 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2810 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2811 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2812 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2813 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2814 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2815 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2816 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2817 .Default(AArch64MCExpr::VK_INVALID);
2819 if (RefKind == AArch64MCExpr::VK_INVALID) {
2820 Error(Parser.getTok().getLoc(),
2821 "expect relocation specifier in operand after ':'");
2825 Parser.Lex(); // Eat identifier
2827 if (Parser.getTok().isNot(AsmToken::Colon)) {
2828 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2831 Parser.Lex(); // Eat ':'
2834 if (getParser().parseExpression(ImmVal))
2838 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2843 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2844 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2845 MCAsmParser &Parser = getParser();
2846 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2848 Parser.Lex(); // Eat left bracket token.
2850 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2853 int64_t PrevReg = FirstReg;
2856 if (Parser.getTok().is(AsmToken::Minus)) {
2857 Parser.Lex(); // Eat the minus.
2859 SMLoc Loc = getLoc();
2861 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2864 // Any Kind suffices must match on all regs in the list.
2865 if (Kind != NextKind)
2866 return Error(Loc, "mismatched register size suffix");
2868 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2870 if (Space == 0 || Space > 3) {
2871 return Error(Loc, "invalid number of vectors");
2877 while (Parser.getTok().is(AsmToken::Comma)) {
2878 Parser.Lex(); // Eat the comma token.
2880 SMLoc Loc = getLoc();
2882 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2885 // Any Kind suffices must match on all regs in the list.
2886 if (Kind != NextKind)
2887 return Error(Loc, "mismatched register size suffix");
2889 // Registers must be incremental (with wraparound at 31)
2890 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2891 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2892 return Error(Loc, "registers must be sequential");
2899 if (Parser.getTok().isNot(AsmToken::RCurly))
2900 return Error(getLoc(), "'}' expected");
2901 Parser.Lex(); // Eat the '}' token.
2904 return Error(S, "invalid number of vectors");
2906 unsigned NumElements = 0;
2907 char ElementKind = 0;
2909 parseValidVectorKind(Kind, NumElements, ElementKind);
2911 Operands.push_back(AArch64Operand::CreateVectorList(
2912 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2914 // If there is an index specifier following the list, parse that too.
2915 if (Parser.getTok().is(AsmToken::LBrac)) {
2916 SMLoc SIdx = getLoc();
2917 Parser.Lex(); // Eat left bracket token.
2919 const MCExpr *ImmVal;
2920 if (getParser().parseExpression(ImmVal))
2922 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2924 TokError("immediate value expected for vector index");
2929 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2930 Error(E, "']' expected");
2934 Parser.Lex(); // Eat right bracket token.
2936 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2942 AArch64AsmParser::OperandMatchResultTy
2943 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2944 MCAsmParser &Parser = getParser();
2945 const AsmToken &Tok = Parser.getTok();
2946 if (!Tok.is(AsmToken::Identifier))
2947 return MatchOperand_NoMatch;
2949 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2951 MCContext &Ctx = getContext();
2952 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2953 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2954 return MatchOperand_NoMatch;
2957 Parser.Lex(); // Eat register
2959 if (Parser.getTok().isNot(AsmToken::Comma)) {
2961 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2962 return MatchOperand_Success;
2964 Parser.Lex(); // Eat comma.
2966 if (Parser.getTok().is(AsmToken::Hash))
2967 Parser.Lex(); // Eat hash
2969 if (Parser.getTok().isNot(AsmToken::Integer)) {
2970 Error(getLoc(), "index must be absent or #0");
2971 return MatchOperand_ParseFail;
2974 const MCExpr *ImmVal;
2975 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2976 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2977 Error(getLoc(), "index must be absent or #0");
2978 return MatchOperand_ParseFail;
2982 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2983 return MatchOperand_Success;
2986 /// parseOperand - Parse a arm instruction operand. For now this parses the
2987 /// operand regardless of the mnemonic.
2988 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2989 bool invertCondCode) {
2990 MCAsmParser &Parser = getParser();
2991 // Check if the current operand has a custom associated parser, if so, try to
2992 // custom parse the operand, or fallback to the general approach.
2993 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2994 if (ResTy == MatchOperand_Success)
2996 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2997 // there was a match, but an error occurred, in which case, just return that
2998 // the operand parsing failed.
2999 if (ResTy == MatchOperand_ParseFail)
3002 // Nothing custom, so do general case parsing.
3004 switch (getLexer().getKind()) {
3008 if (parseSymbolicImmVal(Expr))
3009 return Error(S, "invalid operand");
3011 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3012 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3015 case AsmToken::LBrac: {
3016 SMLoc Loc = Parser.getTok().getLoc();
3017 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3019 Parser.Lex(); // Eat '['
3021 // There's no comma after a '[', so we can parse the next operand
3023 return parseOperand(Operands, false, false);
3025 case AsmToken::LCurly:
3026 return parseVectorList(Operands);
3027 case AsmToken::Identifier: {
3028 // If we're expecting a Condition Code operand, then just parse that.
3030 return parseCondCode(Operands, invertCondCode);
3032 // If it's a register name, parse it.
3033 if (!parseRegister(Operands))
3036 // This could be an optional "shift" or "extend" operand.
3037 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3038 // We can only continue if no tokens were eaten.
3039 if (GotShift != MatchOperand_NoMatch)
3042 // This was not a register so parse other operands that start with an
3043 // identifier (like labels) as expressions and create them as immediates.
3044 const MCExpr *IdVal;
3046 if (getParser().parseExpression(IdVal))
3049 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3050 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3053 case AsmToken::Integer:
3054 case AsmToken::Real:
3055 case AsmToken::Hash: {
3056 // #42 -> immediate.
3058 if (getLexer().is(AsmToken::Hash))
3061 // Parse a negative sign
3062 bool isNegative = false;
3063 if (Parser.getTok().is(AsmToken::Minus)) {
3065 // We need to consume this token only when we have a Real, otherwise
3066 // we let parseSymbolicImmVal take care of it
3067 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3071 // The only Real that should come through here is a literal #0.0 for
3072 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3073 // so convert the value.
3074 const AsmToken &Tok = Parser.getTok();
3075 if (Tok.is(AsmToken::Real)) {
3076 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3077 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3078 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3079 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3080 Mnemonic != "fcmlt")
3081 return TokError("unexpected floating point literal");
3082 else if (IntVal != 0 || isNegative)
3083 return TokError("expected floating-point constant #0.0");
3084 Parser.Lex(); // Eat the token.
3087 AArch64Operand::CreateToken("#0", false, S, getContext()));
3089 AArch64Operand::CreateToken(".0", false, S, getContext()));
3093 const MCExpr *ImmVal;
3094 if (parseSymbolicImmVal(ImmVal))
3097 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3098 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3101 case AsmToken::Equal: {
3102 SMLoc Loc = Parser.getTok().getLoc();
3103 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3104 return Error(Loc, "unexpected token in operand");
3105 Parser.Lex(); // Eat '='
3106 const MCExpr *SubExprVal;
3107 if (getParser().parseExpression(SubExprVal))
3110 if (Operands.size() < 2 ||
3111 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3115 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3116 Operands[1]->getReg());
3118 MCContext& Ctx = getContext();
3119 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3120 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3121 if (isa<MCConstantExpr>(SubExprVal)) {
3122 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3123 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3124 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3128 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3129 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3130 Operands.push_back(AArch64Operand::CreateImm(
3131 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3133 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3134 ShiftAmt, true, S, E, Ctx));
3137 APInt Simm = APInt(64, Imm << ShiftAmt);
3138 // check if the immediate is an unsigned or signed 32-bit int for W regs
3139 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3140 return Error(Loc, "Immediate too large for register");
3142 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3143 const MCExpr *CPLoc =
3144 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3145 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3151 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3153 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3154 StringRef Name, SMLoc NameLoc,
3155 OperandVector &Operands) {
3156 MCAsmParser &Parser = getParser();
3157 Name = StringSwitch<StringRef>(Name.lower())
3158 .Case("beq", "b.eq")
3159 .Case("bne", "b.ne")
3160 .Case("bhs", "b.hs")
3161 .Case("bcs", "b.cs")
3162 .Case("blo", "b.lo")
3163 .Case("bcc", "b.cc")
3164 .Case("bmi", "b.mi")
3165 .Case("bpl", "b.pl")
3166 .Case("bvs", "b.vs")
3167 .Case("bvc", "b.vc")
3168 .Case("bhi", "b.hi")
3169 .Case("bls", "b.ls")
3170 .Case("bge", "b.ge")
3171 .Case("blt", "b.lt")
3172 .Case("bgt", "b.gt")
3173 .Case("ble", "b.le")
3174 .Case("bal", "b.al")
3175 .Case("bnv", "b.nv")
3178 // First check for the AArch64-specific .req directive.
3179 if (Parser.getTok().is(AsmToken::Identifier) &&
3180 Parser.getTok().getIdentifier() == ".req") {
3181 parseDirectiveReq(Name, NameLoc);
3182 // We always return 'error' for this, as we're done with this
3183 // statement and don't need to match the 'instruction."
3187 // Create the leading tokens for the mnemonic, split by '.' characters.
3188 size_t Start = 0, Next = Name.find('.');
3189 StringRef Head = Name.slice(Start, Next);
3191 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3192 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3193 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3194 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3195 Parser.eatToEndOfStatement();
3200 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3203 // Handle condition codes for a branch mnemonic
3204 if (Head == "b" && Next != StringRef::npos) {
3206 Next = Name.find('.', Start + 1);
3207 Head = Name.slice(Start + 1, Next);
3209 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3210 (Head.data() - Name.data()));
3211 AArch64CC::CondCode CC = parseCondCodeString(Head);
3212 if (CC == AArch64CC::Invalid)
3213 return Error(SuffixLoc, "invalid condition code");
3215 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3217 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3220 // Add the remaining tokens in the mnemonic.
3221 while (Next != StringRef::npos) {
3223 Next = Name.find('.', Start + 1);
3224 Head = Name.slice(Start, Next);
3225 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3226 (Head.data() - Name.data()) + 1);
3228 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3231 // Conditional compare instructions have a Condition Code operand, which needs
3232 // to be parsed and an immediate operand created.
3233 bool condCodeFourthOperand =
3234 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3235 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3236 Head == "csinc" || Head == "csinv" || Head == "csneg");
3238 // These instructions are aliases to some of the conditional select
3239 // instructions. However, the condition code is inverted in the aliased
3242 // FIXME: Is this the correct way to handle these? Or should the parser
3243 // generate the aliased instructions directly?
3244 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3245 bool condCodeThirdOperand =
3246 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3248 // Read the remaining operands.
3249 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3250 // Read the first operand.
3251 if (parseOperand(Operands, false, false)) {
3252 Parser.eatToEndOfStatement();
3257 while (getLexer().is(AsmToken::Comma)) {
3258 Parser.Lex(); // Eat the comma.
3260 // Parse and remember the operand.
3261 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3262 (N == 3 && condCodeThirdOperand) ||
3263 (N == 2 && condCodeSecondOperand),
3264 condCodeSecondOperand || condCodeThirdOperand)) {
3265 Parser.eatToEndOfStatement();
3269 // After successfully parsing some operands there are two special cases to
3270 // consider (i.e. notional operands not separated by commas). Both are due
3271 // to memory specifiers:
3272 // + An RBrac will end an address for load/store/prefetch
3273 // + An '!' will indicate a pre-indexed operation.
3275 // It's someone else's responsibility to make sure these tokens are sane
3276 // in the given context!
3277 if (Parser.getTok().is(AsmToken::RBrac)) {
3278 SMLoc Loc = Parser.getTok().getLoc();
3279 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3284 if (Parser.getTok().is(AsmToken::Exclaim)) {
3285 SMLoc Loc = Parser.getTok().getLoc();
3286 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3295 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3296 SMLoc Loc = Parser.getTok().getLoc();
3297 Parser.eatToEndOfStatement();
3298 return Error(Loc, "unexpected token in argument list");
3301 Parser.Lex(); // Consume the EndOfStatement
3305 // FIXME: This entire function is a giant hack to provide us with decent
3306 // operand range validation/diagnostics until TableGen/MC can be extended
3307 // to support autogeneration of this kind of validation.
3308 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3309 SmallVectorImpl<SMLoc> &Loc) {
3310 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3311 // Check for indexed addressing modes w/ the base register being the
3312 // same as a destination/source register or pair load where
3313 // the Rt == Rt2. All of those are undefined behaviour.
3314 switch (Inst.getOpcode()) {
3315 case AArch64::LDPSWpre:
3316 case AArch64::LDPWpost:
3317 case AArch64::LDPWpre:
3318 case AArch64::LDPXpost:
3319 case AArch64::LDPXpre: {
3320 unsigned Rt = Inst.getOperand(1).getReg();
3321 unsigned Rt2 = Inst.getOperand(2).getReg();
3322 unsigned Rn = Inst.getOperand(3).getReg();
3323 if (RI->isSubRegisterEq(Rn, Rt))
3324 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3325 "is also a destination");
3326 if (RI->isSubRegisterEq(Rn, Rt2))
3327 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3328 "is also a destination");
3331 case AArch64::LDPDi:
3332 case AArch64::LDPQi:
3333 case AArch64::LDPSi:
3334 case AArch64::LDPSWi:
3335 case AArch64::LDPWi:
3336 case AArch64::LDPXi: {
3337 unsigned Rt = Inst.getOperand(0).getReg();
3338 unsigned Rt2 = Inst.getOperand(1).getReg();
3340 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3343 case AArch64::LDPDpost:
3344 case AArch64::LDPDpre:
3345 case AArch64::LDPQpost:
3346 case AArch64::LDPQpre:
3347 case AArch64::LDPSpost:
3348 case AArch64::LDPSpre:
3349 case AArch64::LDPSWpost: {
3350 unsigned Rt = Inst.getOperand(1).getReg();
3351 unsigned Rt2 = Inst.getOperand(2).getReg();
3353 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3356 case AArch64::STPDpost:
3357 case AArch64::STPDpre:
3358 case AArch64::STPQpost:
3359 case AArch64::STPQpre:
3360 case AArch64::STPSpost:
3361 case AArch64::STPSpre:
3362 case AArch64::STPWpost:
3363 case AArch64::STPWpre:
3364 case AArch64::STPXpost:
3365 case AArch64::STPXpre: {
3366 unsigned Rt = Inst.getOperand(1).getReg();
3367 unsigned Rt2 = Inst.getOperand(2).getReg();
3368 unsigned Rn = Inst.getOperand(3).getReg();
3369 if (RI->isSubRegisterEq(Rn, Rt))
3370 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3371 "is also a source");
3372 if (RI->isSubRegisterEq(Rn, Rt2))
3373 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3374 "is also a source");
3377 case AArch64::LDRBBpre:
3378 case AArch64::LDRBpre:
3379 case AArch64::LDRHHpre:
3380 case AArch64::LDRHpre:
3381 case AArch64::LDRSBWpre:
3382 case AArch64::LDRSBXpre:
3383 case AArch64::LDRSHWpre:
3384 case AArch64::LDRSHXpre:
3385 case AArch64::LDRSWpre:
3386 case AArch64::LDRWpre:
3387 case AArch64::LDRXpre:
3388 case AArch64::LDRBBpost:
3389 case AArch64::LDRBpost:
3390 case AArch64::LDRHHpost:
3391 case AArch64::LDRHpost:
3392 case AArch64::LDRSBWpost:
3393 case AArch64::LDRSBXpost:
3394 case AArch64::LDRSHWpost:
3395 case AArch64::LDRSHXpost:
3396 case AArch64::LDRSWpost:
3397 case AArch64::LDRWpost:
3398 case AArch64::LDRXpost: {
3399 unsigned Rt = Inst.getOperand(1).getReg();
3400 unsigned Rn = Inst.getOperand(2).getReg();
3401 if (RI->isSubRegisterEq(Rn, Rt))
3402 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3403 "is also a source");
3406 case AArch64::STRBBpost:
3407 case AArch64::STRBpost:
3408 case AArch64::STRHHpost:
3409 case AArch64::STRHpost:
3410 case AArch64::STRWpost:
3411 case AArch64::STRXpost:
3412 case AArch64::STRBBpre:
3413 case AArch64::STRBpre:
3414 case AArch64::STRHHpre:
3415 case AArch64::STRHpre:
3416 case AArch64::STRWpre:
3417 case AArch64::STRXpre: {
3418 unsigned Rt = Inst.getOperand(1).getReg();
3419 unsigned Rn = Inst.getOperand(2).getReg();
3420 if (RI->isSubRegisterEq(Rn, Rt))
3421 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3422 "is also a source");
3427 // Now check immediate ranges. Separate from the above as there is overlap
3428 // in the instructions being checked and this keeps the nested conditionals
3430 switch (Inst.getOpcode()) {
3431 case AArch64::ADDSWri:
3432 case AArch64::ADDSXri:
3433 case AArch64::ADDWri:
3434 case AArch64::ADDXri:
3435 case AArch64::SUBSWri:
3436 case AArch64::SUBSXri:
3437 case AArch64::SUBWri:
3438 case AArch64::SUBXri: {
3439 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3440 // some slight duplication here.
3441 if (Inst.getOperand(2).isExpr()) {
3442 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3443 AArch64MCExpr::VariantKind ELFRefKind;
3444 MCSymbolRefExpr::VariantKind DarwinRefKind;
3446 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3447 return Error(Loc[2], "invalid immediate expression");
3450 // Only allow these with ADDXri.
3451 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3452 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3453 Inst.getOpcode() == AArch64::ADDXri)
3456 // Only allow these with ADDXri/ADDWri
3457 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3458 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3461 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3463 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3464 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3465 (Inst.getOpcode() == AArch64::ADDXri ||
3466 Inst.getOpcode() == AArch64::ADDWri))
3469 // Don't allow expressions in the immediate field otherwise
3470 return Error(Loc[2], "invalid immediate expression");
3479 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3481 case Match_MissingFeature:
3483 "instruction requires a CPU feature not currently enabled");
3484 case Match_InvalidOperand:
3485 return Error(Loc, "invalid operand for instruction");
3486 case Match_InvalidSuffix:
3487 return Error(Loc, "invalid type suffix for instruction");
3488 case Match_InvalidCondCode:
3489 return Error(Loc, "expected AArch64 condition code");
3490 case Match_AddSubRegExtendSmall:
3492 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3493 case Match_AddSubRegExtendLarge:
3495 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3496 case Match_AddSubSecondSource:
3498 "expected compatible register, symbol or integer in range [0, 4095]");
3499 case Match_LogicalSecondSource:
3500 return Error(Loc, "expected compatible register or logical immediate");
3501 case Match_InvalidMovImm32Shift:
3502 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3503 case Match_InvalidMovImm64Shift:
3504 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3505 case Match_AddSubRegShift32:
3507 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3508 case Match_AddSubRegShift64:
3510 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3511 case Match_InvalidFPImm:
3513 "expected compatible register or floating-point constant");
3514 case Match_InvalidMemoryIndexedSImm9:
3515 return Error(Loc, "index must be an integer in range [-256, 255].");
3516 case Match_InvalidMemoryIndexed4SImm7:
3517 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3518 case Match_InvalidMemoryIndexed8SImm7:
3519 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3520 case Match_InvalidMemoryIndexed16SImm7:
3521 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3522 case Match_InvalidMemoryWExtend8:
3524 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3525 case Match_InvalidMemoryWExtend16:
3527 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3528 case Match_InvalidMemoryWExtend32:
3530 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3531 case Match_InvalidMemoryWExtend64:
3533 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3534 case Match_InvalidMemoryWExtend128:
3536 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3537 case Match_InvalidMemoryXExtend8:
3539 "expected 'lsl' or 'sxtx' with optional shift of #0");
3540 case Match_InvalidMemoryXExtend16:
3542 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3543 case Match_InvalidMemoryXExtend32:
3545 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3546 case Match_InvalidMemoryXExtend64:
3548 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3549 case Match_InvalidMemoryXExtend128:
3551 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3552 case Match_InvalidMemoryIndexed1:
3553 return Error(Loc, "index must be an integer in range [0, 4095].");
3554 case Match_InvalidMemoryIndexed2:
3555 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3556 case Match_InvalidMemoryIndexed4:
3557 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3558 case Match_InvalidMemoryIndexed8:
3559 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3560 case Match_InvalidMemoryIndexed16:
3561 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3562 case Match_InvalidImm0_7:
3563 return Error(Loc, "immediate must be an integer in range [0, 7].");
3564 case Match_InvalidImm0_15:
3565 return Error(Loc, "immediate must be an integer in range [0, 15].");
3566 case Match_InvalidImm0_31:
3567 return Error(Loc, "immediate must be an integer in range [0, 31].");
3568 case Match_InvalidImm0_63:
3569 return Error(Loc, "immediate must be an integer in range [0, 63].");
3570 case Match_InvalidImm0_127:
3571 return Error(Loc, "immediate must be an integer in range [0, 127].");
3572 case Match_InvalidImm0_65535:
3573 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3574 case Match_InvalidImm1_8:
3575 return Error(Loc, "immediate must be an integer in range [1, 8].");
3576 case Match_InvalidImm1_16:
3577 return Error(Loc, "immediate must be an integer in range [1, 16].");
3578 case Match_InvalidImm1_32:
3579 return Error(Loc, "immediate must be an integer in range [1, 32].");
3580 case Match_InvalidImm1_64:
3581 return Error(Loc, "immediate must be an integer in range [1, 64].");
3582 case Match_InvalidIndex1:
3583 return Error(Loc, "expected lane specifier '[1]'");
3584 case Match_InvalidIndexB:
3585 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3586 case Match_InvalidIndexH:
3587 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3588 case Match_InvalidIndexS:
3589 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3590 case Match_InvalidIndexD:
3591 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3592 case Match_InvalidLabel:
3593 return Error(Loc, "expected label or encodable integer pc offset");
3595 return Error(Loc, "expected readable system register");
3597 return Error(Loc, "expected writable system register or pstate");
3598 case Match_MnemonicFail:
3599 return Error(Loc, "unrecognized instruction mnemonic");
3601 llvm_unreachable("unexpected error code!");
3605 static const char *getSubtargetFeatureName(uint64_t Val);
3607 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3608 OperandVector &Operands,
3610 uint64_t &ErrorInfo,
3611 bool MatchingInlineAsm) {
3612 assert(!Operands.empty() && "Unexpect empty operand list!");
3613 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3614 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3616 StringRef Tok = Op.getToken();
3617 unsigned NumOperands = Operands.size();
3619 if (NumOperands == 4 && Tok == "lsl") {
3620 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3621 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3622 if (Op2.isReg() && Op3.isImm()) {
3623 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3625 uint64_t Op3Val = Op3CE->getValue();
3626 uint64_t NewOp3Val = 0;
3627 uint64_t NewOp4Val = 0;
3628 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3630 NewOp3Val = (32 - Op3Val) & 0x1f;
3631 NewOp4Val = 31 - Op3Val;
3633 NewOp3Val = (64 - Op3Val) & 0x3f;
3634 NewOp4Val = 63 - Op3Val;
3637 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3638 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3640 Operands[0] = AArch64Operand::CreateToken(
3641 "ubfm", false, Op.getStartLoc(), getContext());
3642 Operands.push_back(AArch64Operand::CreateImm(
3643 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3644 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3645 Op3.getEndLoc(), getContext());
3648 } else if (NumOperands == 4 && Tok == "bfc") {
3649 // FIXME: Horrible hack to handle BFC->BFM alias.
3650 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3651 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3652 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3654 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3655 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3656 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3658 if (LSBCE && WidthCE) {
3659 uint64_t LSB = LSBCE->getValue();
3660 uint64_t Width = WidthCE->getValue();
3662 uint64_t RegWidth = 0;
3663 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3669 if (LSB >= RegWidth)
3670 return Error(LSBOp.getStartLoc(),
3671 "expected integer in range [0, 31]");
3672 if (Width < 1 || Width > RegWidth)
3673 return Error(WidthOp.getStartLoc(),
3674 "expected integer in range [1, 32]");
3678 ImmR = (32 - LSB) & 0x1f;
3680 ImmR = (64 - LSB) & 0x3f;
3682 uint64_t ImmS = Width - 1;
3684 if (ImmR != 0 && ImmS >= ImmR)
3685 return Error(WidthOp.getStartLoc(),
3686 "requested insert overflows register");
3688 const MCExpr *ImmRExpr = MCConstantExpr::Create(ImmR, getContext());
3689 const MCExpr *ImmSExpr = MCConstantExpr::Create(ImmS, getContext());
3690 Operands[0] = AArch64Operand::CreateToken(
3691 "bfm", false, Op.getStartLoc(), getContext());
3692 Operands[2] = AArch64Operand::CreateReg(
3693 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3694 SMLoc(), getContext());
3695 Operands[3] = AArch64Operand::CreateImm(
3696 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3697 Operands.emplace_back(
3698 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3699 WidthOp.getEndLoc(), getContext()));
3702 } else if (NumOperands == 5) {
3703 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3704 // UBFIZ -> UBFM aliases.
3705 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3706 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3707 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3708 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3710 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3711 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3712 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3714 if (Op3CE && Op4CE) {
3715 uint64_t Op3Val = Op3CE->getValue();
3716 uint64_t Op4Val = Op4CE->getValue();
3718 uint64_t RegWidth = 0;
3719 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3725 if (Op3Val >= RegWidth)
3726 return Error(Op3.getStartLoc(),
3727 "expected integer in range [0, 31]");
3728 if (Op4Val < 1 || Op4Val > RegWidth)
3729 return Error(Op4.getStartLoc(),
3730 "expected integer in range [1, 32]");
3732 uint64_t NewOp3Val = 0;
3734 NewOp3Val = (32 - Op3Val) & 0x1f;
3736 NewOp3Val = (64 - Op3Val) & 0x3f;
3738 uint64_t NewOp4Val = Op4Val - 1;
3740 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3741 return Error(Op4.getStartLoc(),
3742 "requested insert overflows register");
3744 const MCExpr *NewOp3 =
3745 MCConstantExpr::Create(NewOp3Val, getContext());
3746 const MCExpr *NewOp4 =
3747 MCConstantExpr::Create(NewOp4Val, getContext());
3748 Operands[3] = AArch64Operand::CreateImm(
3749 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3750 Operands[4] = AArch64Operand::CreateImm(
3751 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3753 Operands[0] = AArch64Operand::CreateToken(
3754 "bfm", false, Op.getStartLoc(), getContext());
3755 else if (Tok == "sbfiz")
3756 Operands[0] = AArch64Operand::CreateToken(
3757 "sbfm", false, Op.getStartLoc(), getContext());
3758 else if (Tok == "ubfiz")
3759 Operands[0] = AArch64Operand::CreateToken(
3760 "ubfm", false, Op.getStartLoc(), getContext());
3762 llvm_unreachable("No valid mnemonic for alias?");
3766 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3767 // UBFX -> UBFM aliases.
3768 } else if (NumOperands == 5 &&
3769 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3770 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3771 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3772 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3774 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3775 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3776 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3778 if (Op3CE && Op4CE) {
3779 uint64_t Op3Val = Op3CE->getValue();
3780 uint64_t Op4Val = Op4CE->getValue();
3782 uint64_t RegWidth = 0;
3783 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3789 if (Op3Val >= RegWidth)
3790 return Error(Op3.getStartLoc(),
3791 "expected integer in range [0, 31]");
3792 if (Op4Val < 1 || Op4Val > RegWidth)
3793 return Error(Op4.getStartLoc(),
3794 "expected integer in range [1, 32]");
3796 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3798 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3799 return Error(Op4.getStartLoc(),
3800 "requested extract overflows register");
3802 const MCExpr *NewOp4 =
3803 MCConstantExpr::Create(NewOp4Val, getContext());
3804 Operands[4] = AArch64Operand::CreateImm(
3805 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3807 Operands[0] = AArch64Operand::CreateToken(
3808 "bfm", false, Op.getStartLoc(), getContext());
3809 else if (Tok == "sbfx")
3810 Operands[0] = AArch64Operand::CreateToken(
3811 "sbfm", false, Op.getStartLoc(), getContext());
3812 else if (Tok == "ubfx")
3813 Operands[0] = AArch64Operand::CreateToken(
3814 "ubfm", false, Op.getStartLoc(), getContext());
3816 llvm_unreachable("No valid mnemonic for alias?");
3821 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3822 // InstAlias can't quite handle this since the reg classes aren't
3824 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3825 // The source register can be Wn here, but the matcher expects a
3826 // GPR64. Twiddle it here if necessary.
3827 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3829 unsigned Reg = getXRegFromWReg(Op.getReg());
3830 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3831 Op.getEndLoc(), getContext());
3834 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3835 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3836 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3838 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3840 // The source register can be Wn here, but the matcher expects a
3841 // GPR64. Twiddle it here if necessary.
3842 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3844 unsigned Reg = getXRegFromWReg(Op.getReg());
3845 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3846 Op.getEndLoc(), getContext());
3850 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3851 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3852 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3854 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3856 // The source register can be Wn here, but the matcher expects a
3857 // GPR32. Twiddle it here if necessary.
3858 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3860 unsigned Reg = getWRegFromXReg(Op.getReg());
3861 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3862 Op.getEndLoc(), getContext());
3867 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3868 if (NumOperands == 3 && Tok == "fmov") {
3869 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3870 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3871 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3873 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3877 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3878 Op.getEndLoc(), getContext());
3883 // First try to match against the secondary set of tables containing the
3884 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3885 unsigned MatchResult =
3886 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3888 // If that fails, try against the alternate table containing long-form NEON:
3889 // "fadd v0.2s, v1.2s, v2.2s"
3890 if (MatchResult != Match_Success)
3892 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3894 switch (MatchResult) {
3895 case Match_Success: {
3896 // Perform range checking and other semantic validations
3897 SmallVector<SMLoc, 8> OperandLocs;
3898 NumOperands = Operands.size();
3899 for (unsigned i = 1; i < NumOperands; ++i)
3900 OperandLocs.push_back(Operands[i]->getStartLoc());
3901 if (validateInstruction(Inst, OperandLocs))
3905 Out.EmitInstruction(Inst, STI);
3908 case Match_MissingFeature: {
3909 assert(ErrorInfo && "Unknown missing feature!");
3910 // Special case the error message for the very common case where only
3911 // a single subtarget feature is missing (neon, e.g.).
3912 std::string Msg = "instruction requires:";
3914 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3915 if (ErrorInfo & Mask) {
3917 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3921 return Error(IDLoc, Msg);
3923 case Match_MnemonicFail:
3924 return showMatchError(IDLoc, MatchResult);
3925 case Match_InvalidOperand: {
3926 SMLoc ErrorLoc = IDLoc;
3927 if (ErrorInfo != ~0ULL) {
3928 if (ErrorInfo >= Operands.size())
3929 return Error(IDLoc, "too few operands for instruction");
3931 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3932 if (ErrorLoc == SMLoc())
3935 // If the match failed on a suffix token operand, tweak the diagnostic
3937 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3938 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3939 MatchResult = Match_InvalidSuffix;
3941 return showMatchError(ErrorLoc, MatchResult);
3943 case Match_InvalidMemoryIndexed1:
3944 case Match_InvalidMemoryIndexed2:
3945 case Match_InvalidMemoryIndexed4:
3946 case Match_InvalidMemoryIndexed8:
3947 case Match_InvalidMemoryIndexed16:
3948 case Match_InvalidCondCode:
3949 case Match_AddSubRegExtendSmall:
3950 case Match_AddSubRegExtendLarge:
3951 case Match_AddSubSecondSource:
3952 case Match_LogicalSecondSource:
3953 case Match_AddSubRegShift32:
3954 case Match_AddSubRegShift64:
3955 case Match_InvalidMovImm32Shift:
3956 case Match_InvalidMovImm64Shift:
3957 case Match_InvalidFPImm:
3958 case Match_InvalidMemoryWExtend8:
3959 case Match_InvalidMemoryWExtend16:
3960 case Match_InvalidMemoryWExtend32:
3961 case Match_InvalidMemoryWExtend64:
3962 case Match_InvalidMemoryWExtend128:
3963 case Match_InvalidMemoryXExtend8:
3964 case Match_InvalidMemoryXExtend16:
3965 case Match_InvalidMemoryXExtend32:
3966 case Match_InvalidMemoryXExtend64:
3967 case Match_InvalidMemoryXExtend128:
3968 case Match_InvalidMemoryIndexed4SImm7:
3969 case Match_InvalidMemoryIndexed8SImm7:
3970 case Match_InvalidMemoryIndexed16SImm7:
3971 case Match_InvalidMemoryIndexedSImm9:
3972 case Match_InvalidImm0_7:
3973 case Match_InvalidImm0_15:
3974 case Match_InvalidImm0_31:
3975 case Match_InvalidImm0_63:
3976 case Match_InvalidImm0_127:
3977 case Match_InvalidImm0_65535:
3978 case Match_InvalidImm1_8:
3979 case Match_InvalidImm1_16:
3980 case Match_InvalidImm1_32:
3981 case Match_InvalidImm1_64:
3982 case Match_InvalidIndex1:
3983 case Match_InvalidIndexB:
3984 case Match_InvalidIndexH:
3985 case Match_InvalidIndexS:
3986 case Match_InvalidIndexD:
3987 case Match_InvalidLabel:
3990 if (ErrorInfo >= Operands.size())
3991 return Error(IDLoc, "too few operands for instruction");
3992 // Any time we get here, there's nothing fancy to do. Just get the
3993 // operand SMLoc and display the diagnostic.
3994 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3995 if (ErrorLoc == SMLoc())
3997 return showMatchError(ErrorLoc, MatchResult);
4001 llvm_unreachable("Implement any new match types added!");
4004 /// ParseDirective parses the arm specific directives
4005 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4006 const MCObjectFileInfo::Environment Format =
4007 getContext().getObjectFileInfo()->getObjectFileType();
4008 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4009 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4011 StringRef IDVal = DirectiveID.getIdentifier();
4012 SMLoc Loc = DirectiveID.getLoc();
4013 if (IDVal == ".hword")
4014 return parseDirectiveWord(2, Loc);
4015 if (IDVal == ".word")
4016 return parseDirectiveWord(4, Loc);
4017 if (IDVal == ".xword")
4018 return parseDirectiveWord(8, Loc);
4019 if (IDVal == ".tlsdesccall")
4020 return parseDirectiveTLSDescCall(Loc);
4021 if (IDVal == ".ltorg" || IDVal == ".pool")
4022 return parseDirectiveLtorg(Loc);
4023 if (IDVal == ".unreq")
4024 return parseDirectiveUnreq(Loc);
4026 if (!IsMachO && !IsCOFF) {
4027 if (IDVal == ".inst")
4028 return parseDirectiveInst(Loc);
4031 return parseDirectiveLOH(IDVal, Loc);
4034 /// parseDirectiveWord
4035 /// ::= .word [ expression (, expression)* ]
4036 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4037 MCAsmParser &Parser = getParser();
4038 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4040 const MCExpr *Value;
4041 if (getParser().parseExpression(Value))
4044 getParser().getStreamer().EmitValue(Value, Size);
4046 if (getLexer().is(AsmToken::EndOfStatement))
4049 // FIXME: Improve diagnostic.
4050 if (getLexer().isNot(AsmToken::Comma))
4051 return Error(L, "unexpected token in directive");
4060 /// parseDirectiveInst
4061 /// ::= .inst opcode [, ...]
4062 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4063 MCAsmParser &Parser = getParser();
4064 if (getLexer().is(AsmToken::EndOfStatement)) {
4065 Parser.eatToEndOfStatement();
4066 Error(Loc, "expected expression following directive");
4073 if (getParser().parseExpression(Expr)) {
4074 Error(Loc, "expected expression");
4078 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4080 Error(Loc, "expected constant expression");
4084 getTargetStreamer().emitInst(Value->getValue());
4086 if (getLexer().is(AsmToken::EndOfStatement))
4089 if (getLexer().isNot(AsmToken::Comma)) {
4090 Error(Loc, "unexpected token in directive");
4094 Parser.Lex(); // Eat comma.
4101 // parseDirectiveTLSDescCall:
4102 // ::= .tlsdesccall symbol
4103 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4105 if (getParser().parseIdentifier(Name))
4106 return Error(L, "expected symbol after directive");
4108 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4109 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4110 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4113 Inst.setOpcode(AArch64::TLSDESCCALL);
4114 Inst.addOperand(MCOperand::createExpr(Expr));
4116 getParser().getStreamer().EmitInstruction(Inst, STI);
4120 /// ::= .loh <lohName | lohId> label1, ..., labelN
4121 /// The number of arguments depends on the loh identifier.
4122 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4123 if (IDVal != MCLOHDirectiveName())
4126 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4127 if (getParser().getTok().isNot(AsmToken::Integer))
4128 return TokError("expected an identifier or a number in directive");
4129 // We successfully get a numeric value for the identifier.
4130 // Check if it is valid.
4131 int64_t Id = getParser().getTok().getIntVal();
4132 if (Id <= -1U && !isValidMCLOHType(Id))
4133 return TokError("invalid numeric identifier in directive");
4134 Kind = (MCLOHType)Id;
4136 StringRef Name = getTok().getIdentifier();
4137 // We successfully parse an identifier.
4138 // Check if it is a recognized one.
4139 int Id = MCLOHNameToId(Name);
4142 return TokError("invalid identifier in directive");
4143 Kind = (MCLOHType)Id;
4145 // Consume the identifier.
4147 // Get the number of arguments of this LOH.
4148 int NbArgs = MCLOHIdToNbArgs(Kind);
4150 assert(NbArgs != -1 && "Invalid number of arguments");
4152 SmallVector<MCSymbol *, 3> Args;
4153 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4155 if (getParser().parseIdentifier(Name))
4156 return TokError("expected identifier in directive");
4157 Args.push_back(getContext().getOrCreateSymbol(Name));
4159 if (Idx + 1 == NbArgs)
4161 if (getLexer().isNot(AsmToken::Comma))
4162 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4165 if (getLexer().isNot(AsmToken::EndOfStatement))
4166 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4168 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4172 /// parseDirectiveLtorg
4173 /// ::= .ltorg | .pool
4174 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4175 getTargetStreamer().emitCurrentConstantPool();
4179 /// parseDirectiveReq
4180 /// ::= name .req registername
4181 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4182 MCAsmParser &Parser = getParser();
4183 Parser.Lex(); // Eat the '.req' token.
4184 SMLoc SRegLoc = getLoc();
4185 unsigned RegNum = tryParseRegister();
4186 bool IsVector = false;
4188 if (RegNum == static_cast<unsigned>(-1)) {
4190 RegNum = tryMatchVectorRegister(Kind, false);
4191 if (!Kind.empty()) {
4192 Error(SRegLoc, "vector register without type specifier expected");
4198 if (RegNum == static_cast<unsigned>(-1)) {
4199 Parser.eatToEndOfStatement();
4200 Error(SRegLoc, "register name or alias expected");
4204 // Shouldn't be anything else.
4205 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4206 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4207 Parser.eatToEndOfStatement();
4211 Parser.Lex(); // Consume the EndOfStatement
4213 auto pair = std::make_pair(IsVector, RegNum);
4214 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4215 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4220 /// parseDirectiveUneq
4221 /// ::= .unreq registername
4222 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4223 MCAsmParser &Parser = getParser();
4224 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4225 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4226 Parser.eatToEndOfStatement();
4229 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4230 Parser.Lex(); // Eat the identifier.
4235 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4236 AArch64MCExpr::VariantKind &ELFRefKind,
4237 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4239 ELFRefKind = AArch64MCExpr::VK_INVALID;
4240 DarwinRefKind = MCSymbolRefExpr::VK_None;
4243 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4244 ELFRefKind = AE->getKind();
4245 Expr = AE->getSubExpr();
4248 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4250 // It's a simple symbol reference with no addend.
4251 DarwinRefKind = SE->getKind();
4255 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4259 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4262 DarwinRefKind = SE->getKind();
4264 if (BE->getOpcode() != MCBinaryExpr::Add &&
4265 BE->getOpcode() != MCBinaryExpr::Sub)
4268 // See if the addend is is a constant, otherwise there's more going
4269 // on here than we can deal with.
4270 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4274 Addend = AddendExpr->getValue();
4275 if (BE->getOpcode() == MCBinaryExpr::Sub)
4278 // It's some symbol reference + a constant addend, but really
4279 // shouldn't use both Darwin and ELF syntax.
4280 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4281 DarwinRefKind == MCSymbolRefExpr::VK_None;
4284 /// Force static initialization.
4285 extern "C" void LLVMInitializeAArch64AsmParser() {
4286 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4287 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4288 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4291 #define GET_REGISTER_MATCHER
4292 #define GET_SUBTARGET_FEATURE_NAME
4293 #define GET_MATCHER_IMPLEMENTATION
4294 #include "AArch64GenAsmMatcher.inc"
4296 // Define this matcher function after the auto-generated include so we
4297 // have the match class enum definitions.
4298 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4300 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4301 // If the kind is a token for a literal immediate, check if our asm
4302 // operand matches. This is for InstAliases which have a fixed-value
4303 // immediate in the syntax.
4304 int64_t ExpectedVal;
4307 return Match_InvalidOperand;
4349 return Match_InvalidOperand;
4350 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4352 return Match_InvalidOperand;
4353 if (CE->getValue() == ExpectedVal)
4354 return Match_Success;
4355 return Match_InvalidOperand;