1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI) {
120 MCAsmParserExtension::Initialize(_Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
215 uint32_t PStateField;
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
229 bool HasExplicitAmount;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
262 StartLoc = o.StartLoc;
272 ShiftedImm = o.ShiftedImm;
275 CondCode = o.CondCode;
287 VectorList = o.VectorList;
290 VectorIndex = o.VectorIndex;
296 SysCRImm = o.SysCRImm;
299 Prefetch = o.Prefetch;
302 ShiftExtend = o.ShiftExtend;
307 /// getStartLoc - Get the location of the first token of this operand.
308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
310 SMLoc getEndLoc() const override { return EndLoc; }
312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
377 unsigned getSysCR() const {
378 assert(Kind == k_SysCR && "Invalid access!");
382 unsigned getPrefetch() const {
383 assert(Kind == k_Prefetch && "Invalid access!");
387 AArch64_AM::ShiftExtendType getShiftExtendType() const {
388 assert(Kind == k_ShiftExtend && "Invalid access!");
389 return ShiftExtend.Type;
392 unsigned getShiftExtendAmount() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Amount;
397 bool hasShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.HasExplicitAmount;
402 bool isImm() const override { return Kind == k_Immediate; }
403 bool isMem() const override { return false; }
404 bool isSImm9() const {
407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
410 int64_t Val = MCE->getValue();
411 return (Val >= -256 && Val < 256);
413 bool isSImm7s4() const {
416 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
419 int64_t Val = MCE->getValue();
420 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
422 bool isSImm7s8() const {
425 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
428 int64_t Val = MCE->getValue();
429 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
431 bool isSImm7s16() const {
434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
437 int64_t Val = MCE->getValue();
438 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
441 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
442 AArch64MCExpr::VariantKind ELFRefKind;
443 MCSymbolRefExpr::VariantKind DarwinRefKind;
445 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
447 // If we don't understand the expression, assume the best and
448 // let the fixup and relocation code deal with it.
452 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
453 ELFRefKind == AArch64MCExpr::VK_LO12 ||
454 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
455 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
456 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
457 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
459 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
460 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
461 // Note that we don't range-check the addend. It's adjusted modulo page
462 // size when converted, so there is no "out of range" condition when using
464 return Addend >= 0 && (Addend % Scale) == 0;
465 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
466 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
467 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
474 template <int Scale> bool isUImm12Offset() const {
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480 return isSymbolicUImm12Offset(getImm(), Scale);
482 int64_t Val = MCE->getValue();
483 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
486 bool isImm0_7() const {
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 int64_t Val = MCE->getValue();
493 return (Val >= 0 && Val < 8);
495 bool isImm1_8() const {
498 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
501 int64_t Val = MCE->getValue();
502 return (Val > 0 && Val < 9);
504 bool isImm0_15() const {
507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
510 int64_t Val = MCE->getValue();
511 return (Val >= 0 && Val < 16);
513 bool isImm1_16() const {
516 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
519 int64_t Val = MCE->getValue();
520 return (Val > 0 && Val < 17);
522 bool isImm0_31() const {
525 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
528 int64_t Val = MCE->getValue();
529 return (Val >= 0 && Val < 32);
531 bool isImm1_31() const {
534 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
537 int64_t Val = MCE->getValue();
538 return (Val >= 1 && Val < 32);
540 bool isImm1_32() const {
543 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
546 int64_t Val = MCE->getValue();
547 return (Val >= 1 && Val < 33);
549 bool isImm0_63() const {
552 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
555 int64_t Val = MCE->getValue();
556 return (Val >= 0 && Val < 64);
558 bool isImm1_63() const {
561 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
564 int64_t Val = MCE->getValue();
565 return (Val >= 1 && Val < 64);
567 bool isImm1_64() const {
570 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
573 int64_t Val = MCE->getValue();
574 return (Val >= 1 && Val < 65);
576 bool isImm0_127() const {
579 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
582 int64_t Val = MCE->getValue();
583 return (Val >= 0 && Val < 128);
585 bool isImm0_255() const {
588 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
591 int64_t Val = MCE->getValue();
592 return (Val >= 0 && Val < 256);
594 bool isImm0_65535() const {
597 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
600 int64_t Val = MCE->getValue();
601 return (Val >= 0 && Val < 65536);
603 bool isImm32_63() const {
606 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
609 int64_t Val = MCE->getValue();
610 return (Val >= 32 && Val < 64);
612 bool isLogicalImm32() const {
615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
618 int64_t Val = MCE->getValue();
619 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
622 return AArch64_AM::isLogicalImmediate(Val, 32);
624 bool isLogicalImm64() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
632 bool isLogicalImm32Not() const {
635 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
638 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
639 return AArch64_AM::isLogicalImmediate(Val, 32);
641 bool isLogicalImm64Not() const {
644 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
647 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
649 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
650 bool isAddSubImm() const {
651 if (!isShiftedImm() && !isImm())
656 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
657 if (isShiftedImm()) {
658 unsigned Shift = ShiftedImm.ShiftAmount;
659 Expr = ShiftedImm.Val;
660 if (Shift != 0 && Shift != 12)
666 AArch64MCExpr::VariantKind ELFRefKind;
667 MCSymbolRefExpr::VariantKind DarwinRefKind;
669 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
670 DarwinRefKind, Addend)) {
671 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
672 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
673 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
674 || ELFRefKind == AArch64MCExpr::VK_LO12
675 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
676 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
677 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
678 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
679 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
680 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
681 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
684 // Otherwise it should be a real immediate in range:
685 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
686 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
688 bool isCondCode() const { return Kind == k_CondCode; }
689 bool isSIMDImmType10() const {
692 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
695 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
697 bool isBranchTarget26() const {
700 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
703 int64_t Val = MCE->getValue();
706 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
708 bool isPCRelLabel19() const {
711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
714 int64_t Val = MCE->getValue();
717 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
719 bool isBranchTarget14() const {
722 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
725 int64_t Val = MCE->getValue();
728 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
732 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
736 AArch64MCExpr::VariantKind ELFRefKind;
737 MCSymbolRefExpr::VariantKind DarwinRefKind;
739 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
740 DarwinRefKind, Addend)) {
743 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
746 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
747 if (ELFRefKind == AllowedModifiers[i])
754 bool isMovZSymbolG3() const {
755 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
756 return isMovWSymbol(Variants);
759 bool isMovZSymbolG2() const {
760 static AArch64MCExpr::VariantKind Variants[] = {
761 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
762 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
763 return isMovWSymbol(Variants);
766 bool isMovZSymbolG1() const {
767 static AArch64MCExpr::VariantKind Variants[] = {
768 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
769 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
770 AArch64MCExpr::VK_DTPREL_G1,
772 return isMovWSymbol(Variants);
775 bool isMovZSymbolG0() const {
776 static AArch64MCExpr::VariantKind Variants[] = {
777 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
778 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
779 return isMovWSymbol(Variants);
782 bool isMovKSymbolG3() const {
783 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
784 return isMovWSymbol(Variants);
787 bool isMovKSymbolG2() const {
788 static AArch64MCExpr::VariantKind Variants[] = {
789 AArch64MCExpr::VK_ABS_G2_NC};
790 return isMovWSymbol(Variants);
793 bool isMovKSymbolG1() const {
794 static AArch64MCExpr::VariantKind Variants[] = {
795 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
796 AArch64MCExpr::VK_DTPREL_G1_NC
798 return isMovWSymbol(Variants);
801 bool isMovKSymbolG0() const {
802 static AArch64MCExpr::VariantKind Variants[] = {
803 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
804 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
806 return isMovWSymbol(Variants);
809 template<int RegWidth, int Shift>
810 bool isMOVZMovAlias() const {
811 if (!isImm()) return false;
813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814 if (!CE) return false;
815 uint64_t Value = CE->getValue();
818 Value &= 0xffffffffULL;
820 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
821 if (Value == 0 && Shift != 0)
824 return (Value & ~(0xffffULL << Shift)) == 0;
827 template<int RegWidth, int Shift>
828 bool isMOVNMovAlias() const {
829 if (!isImm()) return false;
831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
832 if (!CE) return false;
833 uint64_t Value = CE->getValue();
835 // MOVZ takes precedence over MOVN.
836 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
837 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
842 Value &= 0xffffffffULL;
844 return (Value & ~(0xffffULL << Shift)) == 0;
847 bool isFPImm() const { return Kind == k_FPImm; }
848 bool isBarrier() const { return Kind == k_Barrier; }
849 bool isSysReg() const { return Kind == k_SysReg; }
850 bool isMRSSystemRegister() const {
851 if (!isSysReg()) return false;
853 return SysReg.MRSReg != -1U;
855 bool isMSRSystemRegister() const {
856 if (!isSysReg()) return false;
858 return SysReg.MSRReg != -1U;
860 bool isSystemPStateField() const {
861 if (!isSysReg()) return false;
863 return SysReg.PStateField != -1U;
865 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
866 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
867 bool isVectorRegLo() const {
868 return Kind == k_Register && Reg.isVector &&
869 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
872 bool isGPR32as64() const {
873 return Kind == k_Register && !Reg.isVector &&
874 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
877 bool isGPR64sp0() const {
878 return Kind == k_Register && !Reg.isVector &&
879 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
882 /// Is this a vector list with the type implicit (presumably attached to the
883 /// instruction itself)?
884 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
885 return Kind == k_VectorList && VectorList.Count == NumRegs &&
886 !VectorList.ElementKind;
889 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
890 bool isTypedVectorList() const {
891 if (Kind != k_VectorList)
893 if (VectorList.Count != NumRegs)
895 if (VectorList.ElementKind != ElementKind)
897 return VectorList.NumElements == NumElements;
900 bool isVectorIndex1() const {
901 return Kind == k_VectorIndex && VectorIndex.Val == 1;
903 bool isVectorIndexB() const {
904 return Kind == k_VectorIndex && VectorIndex.Val < 16;
906 bool isVectorIndexH() const {
907 return Kind == k_VectorIndex && VectorIndex.Val < 8;
909 bool isVectorIndexS() const {
910 return Kind == k_VectorIndex && VectorIndex.Val < 4;
912 bool isVectorIndexD() const {
913 return Kind == k_VectorIndex && VectorIndex.Val < 2;
915 bool isToken() const override { return Kind == k_Token; }
916 bool isTokenEqual(StringRef Str) const {
917 return Kind == k_Token && getToken() == Str;
919 bool isSysCR() const { return Kind == k_SysCR; }
920 bool isPrefetch() const { return Kind == k_Prefetch; }
921 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
922 bool isShifter() const {
923 if (!isShiftExtend())
926 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
927 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
928 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
929 ST == AArch64_AM::MSL);
931 bool isExtend() const {
932 if (!isShiftExtend())
935 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
936 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
937 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
938 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
939 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
940 ET == AArch64_AM::LSL) &&
941 getShiftExtendAmount() <= 4;
944 bool isExtend64() const {
947 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
948 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
949 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
951 bool isExtendLSL64() const {
954 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
955 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
956 ET == AArch64_AM::LSL) &&
957 getShiftExtendAmount() <= 4;
960 template<int Width> bool isMemXExtend() const {
963 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
965 (getShiftExtendAmount() == Log2_32(Width / 8) ||
966 getShiftExtendAmount() == 0);
969 template<int Width> bool isMemWExtend() const {
972 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
973 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
974 (getShiftExtendAmount() == Log2_32(Width / 8) ||
975 getShiftExtendAmount() == 0);
978 template <unsigned width>
979 bool isArithmeticShifter() const {
983 // An arithmetic shifter is LSL, LSR, or ASR.
984 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
985 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
986 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
989 template <unsigned width>
990 bool isLogicalShifter() const {
994 // A logical shifter is LSL, LSR, ASR or ROR.
995 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
996 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
997 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
998 getShiftExtendAmount() < width;
1001 bool isMovImm32Shifter() const {
1005 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1006 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1007 if (ST != AArch64_AM::LSL)
1009 uint64_t Val = getShiftExtendAmount();
1010 return (Val == 0 || Val == 16);
1013 bool isMovImm64Shifter() const {
1017 // A MOVi shifter is LSL of 0 or 16.
1018 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1019 if (ST != AArch64_AM::LSL)
1021 uint64_t Val = getShiftExtendAmount();
1022 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1025 bool isLogicalVecShifter() const {
1029 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1030 unsigned Shift = getShiftExtendAmount();
1031 return getShiftExtendType() == AArch64_AM::LSL &&
1032 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1035 bool isLogicalVecHalfWordShifter() const {
1036 if (!isLogicalVecShifter())
1039 // A logical vector shifter is a left shift by 0 or 8.
1040 unsigned Shift = getShiftExtendAmount();
1041 return getShiftExtendType() == AArch64_AM::LSL &&
1042 (Shift == 0 || Shift == 8);
1045 bool isMoveVecShifter() const {
1046 if (!isShiftExtend())
1049 // A logical vector shifter is a left shift by 8 or 16.
1050 unsigned Shift = getShiftExtendAmount();
1051 return getShiftExtendType() == AArch64_AM::MSL &&
1052 (Shift == 8 || Shift == 16);
1055 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1056 // to LDUR/STUR when the offset is not legal for the former but is for
1057 // the latter. As such, in addition to checking for being a legal unscaled
1058 // address, also check that it is not a legal scaled address. This avoids
1059 // ambiguity in the matcher.
1061 bool isSImm9OffsetFB() const {
1062 return isSImm9() && !isUImm12Offset<Width / 8>();
1065 bool isAdrpLabel() const {
1066 // Validation was handled during parsing, so we just sanity check that
1067 // something didn't go haywire.
1071 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072 int64_t Val = CE->getValue();
1073 int64_t Min = - (4096 * (1LL << (21 - 1)));
1074 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1075 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1081 bool isAdrLabel() const {
1082 // Validation was handled during parsing, so we just sanity check that
1083 // something didn't go haywire.
1087 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088 int64_t Val = CE->getValue();
1089 int64_t Min = - (1LL << (21 - 1));
1090 int64_t Max = ((1LL << (21 - 1)) - 1);
1091 return Val >= Min && Val <= Max;
1097 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1098 // Add as immediates when possible. Null MCExpr = 0.
1100 Inst.addOperand(MCOperand::CreateImm(0));
1101 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1102 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1104 Inst.addOperand(MCOperand::CreateExpr(Expr));
1107 void addRegOperands(MCInst &Inst, unsigned N) const {
1108 assert(N == 1 && "Invalid number of operands!");
1109 Inst.addOperand(MCOperand::CreateReg(getReg()));
1112 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1115 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1117 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1118 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1119 RI->getEncodingValue(getReg()));
1121 Inst.addOperand(MCOperand::CreateReg(Reg));
1124 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1125 assert(N == 1 && "Invalid number of operands!");
1127 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1128 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1131 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!");
1134 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1135 Inst.addOperand(MCOperand::CreateReg(getReg()));
1138 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1139 assert(N == 1 && "Invalid number of operands!");
1140 Inst.addOperand(MCOperand::CreateReg(getReg()));
1143 template <unsigned NumRegs>
1144 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1147 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1148 unsigned FirstReg = FirstRegs[NumRegs - 1];
1151 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1154 template <unsigned NumRegs>
1155 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1157 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1158 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1159 unsigned FirstReg = FirstRegs[NumRegs - 1];
1162 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1165 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1166 assert(N == 1 && "Invalid number of operands!");
1167 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1170 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1175 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1176 assert(N == 1 && "Invalid number of operands!");
1177 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1180 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1185 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1190 void addImmOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 // If this is a pageoff symrefexpr with an addend, adjust the addend
1193 // to be only the page-offset portion. Otherwise, just add the expr
1195 addExpr(Inst, getImm());
1198 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1199 assert(N == 2 && "Invalid number of operands!");
1200 if (isShiftedImm()) {
1201 addExpr(Inst, getShiftedImmVal());
1202 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1204 addExpr(Inst, getImm());
1205 Inst.addOperand(MCOperand::CreateImm(0));
1209 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1214 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 1 && "Invalid number of operands!");
1216 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1218 addExpr(Inst, getImm());
1220 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1223 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1224 addImmOperands(Inst, N);
1228 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1239 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1242 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1245 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1246 assert(N == 1 && "Invalid number of operands!");
1247 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1248 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1251 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1252 assert(N == 1 && "Invalid number of operands!");
1253 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1254 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1257 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1258 assert(N == 1 && "Invalid number of operands!");
1259 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1260 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1263 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1264 assert(N == 1 && "Invalid number of operands!");
1265 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1266 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1269 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1270 assert(N == 1 && "Invalid number of operands!");
1271 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1275 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1278 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1281 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1284 assert(MCE && "Invalid constant immediate operand!");
1285 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1288 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1289 assert(N == 1 && "Invalid number of operands!");
1290 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1291 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1294 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1295 assert(N == 1 && "Invalid number of operands!");
1296 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1297 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1300 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1303 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1306 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1307 assert(N == 1 && "Invalid number of operands!");
1308 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1309 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1312 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1313 assert(N == 1 && "Invalid number of operands!");
1314 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1315 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1318 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1320 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1321 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1324 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1327 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1330 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1332 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1333 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1336 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1339 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1342 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1343 assert(N == 1 && "Invalid number of operands!");
1344 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1345 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1348 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1353 Inst.addOperand(MCOperand::CreateImm(encoding));
1356 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1360 Inst.addOperand(MCOperand::CreateImm(encoding));
1363 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1366 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1367 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1368 Inst.addOperand(MCOperand::CreateImm(encoding));
1371 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1372 assert(N == 1 && "Invalid number of operands!");
1373 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1375 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1376 Inst.addOperand(MCOperand::CreateImm(encoding));
1379 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1383 Inst.addOperand(MCOperand::CreateImm(encoding));
1386 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1387 // Branch operands don't encode the low bits, so shift them off
1388 // here. If it's a label, however, just put it on directly as there's
1389 // not enough information now to do anything.
1390 assert(N == 1 && "Invalid number of operands!");
1391 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1393 addExpr(Inst, getImm());
1396 assert(MCE && "Invalid constant immediate operand!");
1397 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1400 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1401 // Branch operands don't encode the low bits, so shift them off
1402 // here. If it's a label, however, just put it on directly as there's
1403 // not enough information now to do anything.
1404 assert(N == 1 && "Invalid number of operands!");
1405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1407 addExpr(Inst, getImm());
1410 assert(MCE && "Invalid constant immediate operand!");
1411 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1414 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1415 // Branch operands don't encode the low bits, so shift them off
1416 // here. If it's a label, however, just put it on directly as there's
1417 // not enough information now to do anything.
1418 assert(N == 1 && "Invalid number of operands!");
1419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1421 addExpr(Inst, getImm());
1424 assert(MCE && "Invalid constant immediate operand!");
1425 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1428 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1433 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!");
1435 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1438 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1439 assert(N == 1 && "Invalid number of operands!");
1441 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1444 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1447 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1450 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1453 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1456 void addSysCROperands(MCInst &Inst, unsigned N) const {
1457 assert(N == 1 && "Invalid number of operands!");
1458 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1461 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1462 assert(N == 1 && "Invalid number of operands!");
1463 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1466 void addShifterOperands(MCInst &Inst, unsigned N) const {
1467 assert(N == 1 && "Invalid number of operands!");
1469 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1470 Inst.addOperand(MCOperand::CreateImm(Imm));
1473 void addExtendOperands(MCInst &Inst, unsigned N) const {
1474 assert(N == 1 && "Invalid number of operands!");
1475 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1476 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1477 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1478 Inst.addOperand(MCOperand::CreateImm(Imm));
1481 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1482 assert(N == 1 && "Invalid number of operands!");
1483 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1484 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1485 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1486 Inst.addOperand(MCOperand::CreateImm(Imm));
1489 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 2 && "Invalid number of operands!");
1491 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1492 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1493 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1494 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1497 // For 8-bit load/store instructions with a register offset, both the
1498 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1499 // they're disambiguated by whether the shift was explicit or implicit rather
1501 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1502 assert(N == 2 && "Invalid number of operands!");
1503 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1505 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1506 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1510 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1511 assert(N == 1 && "Invalid number of operands!");
1513 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1514 uint64_t Value = CE->getValue();
1515 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1519 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 1 && "Invalid number of operands!");
1522 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1523 uint64_t Value = CE->getValue();
1524 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1527 void print(raw_ostream &OS) const override;
1529 static std::unique_ptr<AArch64Operand>
1530 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1531 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1532 Op->Tok.Data = Str.data();
1533 Op->Tok.Length = Str.size();
1534 Op->Tok.IsSuffix = IsSuffix;
1540 static std::unique_ptr<AArch64Operand>
1541 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1542 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1543 Op->Reg.RegNum = RegNum;
1544 Op->Reg.isVector = isVector;
1550 static std::unique_ptr<AArch64Operand>
1551 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1552 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1553 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1554 Op->VectorList.RegNum = RegNum;
1555 Op->VectorList.Count = Count;
1556 Op->VectorList.NumElements = NumElements;
1557 Op->VectorList.ElementKind = ElementKind;
1563 static std::unique_ptr<AArch64Operand>
1564 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1565 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1566 Op->VectorIndex.Val = Idx;
1572 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1573 SMLoc E, MCContext &Ctx) {
1574 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1581 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1582 unsigned ShiftAmount,
1585 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1586 Op->ShiftedImm .Val = Val;
1587 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1593 static std::unique_ptr<AArch64Operand>
1594 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1595 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1596 Op->CondCode.Code = Code;
1602 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1604 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1605 Op->FPImm.Val = Val;
1611 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1613 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1614 Op->Barrier.Val = Val;
1620 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1623 uint32_t PStateField,
1625 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1626 Op->SysReg.Data = Str.data();
1627 Op->SysReg.Length = Str.size();
1628 Op->SysReg.MRSReg = MRSReg;
1629 Op->SysReg.MSRReg = MSRReg;
1630 Op->SysReg.PStateField = PStateField;
1636 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1637 SMLoc E, MCContext &Ctx) {
1638 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1639 Op->SysCRImm.Val = Val;
1645 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1647 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1648 Op->Prefetch.Val = Val;
1654 static std::unique_ptr<AArch64Operand>
1655 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1656 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1657 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1658 Op->ShiftExtend.Type = ShOp;
1659 Op->ShiftExtend.Amount = Val;
1660 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1667 } // end anonymous namespace.
1669 void AArch64Operand::print(raw_ostream &OS) const {
1672 OS << "<fpimm " << getFPImm() << "("
1673 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1677 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1679 OS << "<barrier " << Name << ">";
1681 OS << "<barrier invalid #" << getBarrier() << ">";
1685 getImm()->print(OS);
1687 case k_ShiftedImm: {
1688 unsigned Shift = getShiftedImmShift();
1689 OS << "<shiftedimm ";
1690 getShiftedImmVal()->print(OS);
1691 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1695 OS << "<condcode " << getCondCode() << ">";
1698 OS << "<register " << getReg() << ">";
1700 case k_VectorList: {
1701 OS << "<vectorlist ";
1702 unsigned Reg = getVectorListStart();
1703 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1704 OS << Reg + i << " ";
1709 OS << "<vectorindex " << getVectorIndex() << ">";
1712 OS << "<sysreg: " << getSysReg() << '>';
1715 OS << "'" << getToken() << "'";
1718 OS << "c" << getSysCR();
1722 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1724 OS << "<prfop " << Name << ">";
1726 OS << "<prfop invalid #" << getPrefetch() << ">";
1729 case k_ShiftExtend: {
1730 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1731 << getShiftExtendAmount();
1732 if (!hasShiftExtendAmount())
1740 /// @name Auto-generated Match Functions
1743 static unsigned MatchRegisterName(StringRef Name);
1747 static unsigned matchVectorRegName(StringRef Name) {
1748 return StringSwitch<unsigned>(Name)
1749 .Case("v0", AArch64::Q0)
1750 .Case("v1", AArch64::Q1)
1751 .Case("v2", AArch64::Q2)
1752 .Case("v3", AArch64::Q3)
1753 .Case("v4", AArch64::Q4)
1754 .Case("v5", AArch64::Q5)
1755 .Case("v6", AArch64::Q6)
1756 .Case("v7", AArch64::Q7)
1757 .Case("v8", AArch64::Q8)
1758 .Case("v9", AArch64::Q9)
1759 .Case("v10", AArch64::Q10)
1760 .Case("v11", AArch64::Q11)
1761 .Case("v12", AArch64::Q12)
1762 .Case("v13", AArch64::Q13)
1763 .Case("v14", AArch64::Q14)
1764 .Case("v15", AArch64::Q15)
1765 .Case("v16", AArch64::Q16)
1766 .Case("v17", AArch64::Q17)
1767 .Case("v18", AArch64::Q18)
1768 .Case("v19", AArch64::Q19)
1769 .Case("v20", AArch64::Q20)
1770 .Case("v21", AArch64::Q21)
1771 .Case("v22", AArch64::Q22)
1772 .Case("v23", AArch64::Q23)
1773 .Case("v24", AArch64::Q24)
1774 .Case("v25", AArch64::Q25)
1775 .Case("v26", AArch64::Q26)
1776 .Case("v27", AArch64::Q27)
1777 .Case("v28", AArch64::Q28)
1778 .Case("v29", AArch64::Q29)
1779 .Case("v30", AArch64::Q30)
1780 .Case("v31", AArch64::Q31)
1784 static bool isValidVectorKind(StringRef Name) {
1785 return StringSwitch<bool>(Name.lower())
1795 // Accept the width neutral ones, too, for verbose syntax. If those
1796 // aren't used in the right places, the token operand won't match so
1797 // all will work out.
1805 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1806 char &ElementKind) {
1807 assert(isValidVectorKind(Name));
1809 ElementKind = Name.lower()[Name.size() - 1];
1812 if (Name.size() == 2)
1815 // Parse the lane count
1816 Name = Name.drop_front();
1817 while (isdigit(Name.front())) {
1818 NumElements = 10 * NumElements + (Name.front() - '0');
1819 Name = Name.drop_front();
1823 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1825 StartLoc = getLoc();
1826 RegNo = tryParseRegister();
1827 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1828 return (RegNo == (unsigned)-1);
1831 // Matches a register name or register alias previously defined by '.req'
1832 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1834 unsigned RegNum = isVector ? matchVectorRegName(Name)
1835 : MatchRegisterName(Name);
1838 // Check for aliases registered via .req. Canonicalize to lower case.
1839 // That's more consistent since register names are case insensitive, and
1840 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1841 auto Entry = RegisterReqs.find(Name.lower());
1842 if (Entry == RegisterReqs.end())
1844 // set RegNum if the match is the right kind of register
1845 if (isVector == Entry->getValue().first)
1846 RegNum = Entry->getValue().second;
1851 /// tryParseRegister - Try to parse a register name. The token must be an
1852 /// Identifier when called, and if it is a register name the token is eaten and
1853 /// the register is added to the operand list.
1854 int AArch64AsmParser::tryParseRegister() {
1855 MCAsmParser &Parser = getParser();
1856 const AsmToken &Tok = Parser.getTok();
1857 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1859 std::string lowerCase = Tok.getString().lower();
1860 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1861 // Also handle a few aliases of registers.
1863 RegNum = StringSwitch<unsigned>(lowerCase)
1864 .Case("fp", AArch64::FP)
1865 .Case("lr", AArch64::LR)
1866 .Case("x31", AArch64::XZR)
1867 .Case("w31", AArch64::WZR)
1873 Parser.Lex(); // Eat identifier token.
1877 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1878 /// kind specifier. If it is a register specifier, eat the token and return it.
1879 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1880 MCAsmParser &Parser = getParser();
1881 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1882 TokError("vector register expected");
1886 StringRef Name = Parser.getTok().getString();
1887 // If there is a kind specifier, it's separated from the register name by
1889 size_t Start = 0, Next = Name.find('.');
1890 StringRef Head = Name.slice(Start, Next);
1891 unsigned RegNum = matchRegisterNameAlias(Head, true);
1894 if (Next != StringRef::npos) {
1895 Kind = Name.slice(Next, StringRef::npos);
1896 if (!isValidVectorKind(Kind)) {
1897 TokError("invalid vector kind qualifier");
1901 Parser.Lex(); // Eat the register token.
1906 TokError("vector register expected");
1910 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1911 AArch64AsmParser::OperandMatchResultTy
1912 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1913 MCAsmParser &Parser = getParser();
1916 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1917 Error(S, "Expected cN operand where 0 <= N <= 15");
1918 return MatchOperand_ParseFail;
1921 StringRef Tok = Parser.getTok().getIdentifier();
1922 if (Tok[0] != 'c' && Tok[0] != 'C') {
1923 Error(S, "Expected cN operand where 0 <= N <= 15");
1924 return MatchOperand_ParseFail;
1928 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1929 if (BadNum || CRNum > 15) {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1934 Parser.Lex(); // Eat identifier token.
1936 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1937 return MatchOperand_Success;
1940 /// tryParsePrefetch - Try to parse a prefetch operand.
1941 AArch64AsmParser::OperandMatchResultTy
1942 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1943 MCAsmParser &Parser = getParser();
1945 const AsmToken &Tok = Parser.getTok();
1946 // Either an identifier for named values or a 5-bit immediate.
1947 bool Hash = Tok.is(AsmToken::Hash);
1948 if (Hash || Tok.is(AsmToken::Integer)) {
1950 Parser.Lex(); // Eat hash token.
1951 const MCExpr *ImmVal;
1952 if (getParser().parseExpression(ImmVal))
1953 return MatchOperand_ParseFail;
1955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1957 TokError("immediate value expected for prefetch operand");
1958 return MatchOperand_ParseFail;
1960 unsigned prfop = MCE->getValue();
1962 TokError("prefetch operand out of range, [0,31] expected");
1963 return MatchOperand_ParseFail;
1966 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1967 return MatchOperand_Success;
1970 if (Tok.isNot(AsmToken::Identifier)) {
1971 TokError("pre-fetch hint expected");
1972 return MatchOperand_ParseFail;
1976 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1978 TokError("pre-fetch hint expected");
1979 return MatchOperand_ParseFail;
1982 Parser.Lex(); // Eat identifier token.
1983 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1984 return MatchOperand_Success;
1987 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1989 AArch64AsmParser::OperandMatchResultTy
1990 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1991 MCAsmParser &Parser = getParser();
1995 if (Parser.getTok().is(AsmToken::Hash)) {
1996 Parser.Lex(); // Eat hash token.
1999 if (parseSymbolicImmVal(Expr))
2000 return MatchOperand_ParseFail;
2002 AArch64MCExpr::VariantKind ELFRefKind;
2003 MCSymbolRefExpr::VariantKind DarwinRefKind;
2005 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2006 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2007 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2008 // No modifier was specified at all; this is the syntax for an ELF basic
2009 // ADRP relocation (unfortunately).
2011 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2012 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2013 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2015 Error(S, "gotpage label reference not allowed an addend");
2016 return MatchOperand_ParseFail;
2017 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2018 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2019 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2020 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2021 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2022 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2023 // The operand must be an @page or @gotpage qualified symbolref.
2024 Error(S, "page or gotpage label reference expected");
2025 return MatchOperand_ParseFail;
2029 // We have either a label reference possibly with addend or an immediate. The
2030 // addend is a raw value here. The linker will adjust it to only reference the
2032 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2033 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2035 return MatchOperand_Success;
2038 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2040 AArch64AsmParser::OperandMatchResultTy
2041 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2042 MCAsmParser &Parser = getParser();
2046 if (Parser.getTok().is(AsmToken::Hash)) {
2047 Parser.Lex(); // Eat hash token.
2050 if (getParser().parseExpression(Expr))
2051 return MatchOperand_ParseFail;
2053 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2054 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2056 return MatchOperand_Success;
2059 /// tryParseFPImm - A floating point immediate expression operand.
2060 AArch64AsmParser::OperandMatchResultTy
2061 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2062 MCAsmParser &Parser = getParser();
2066 if (Parser.getTok().is(AsmToken::Hash)) {
2067 Parser.Lex(); // Eat '#'
2071 // Handle negation, as that still comes through as a separate token.
2072 bool isNegative = false;
2073 if (Parser.getTok().is(AsmToken::Minus)) {
2077 const AsmToken &Tok = Parser.getTok();
2078 if (Tok.is(AsmToken::Real)) {
2079 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2080 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2081 // If we had a '-' in front, toggle the sign bit.
2082 IntVal ^= (uint64_t)isNegative << 63;
2083 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2084 Parser.Lex(); // Eat the token.
2085 // Check for out of range values. As an exception, we let Zero through,
2086 // as we handle that special case in post-processing before matching in
2087 // order to use the zero register for it.
2088 if (Val == -1 && !RealVal.isZero()) {
2089 TokError("expected compatible register or floating-point constant");
2090 return MatchOperand_ParseFail;
2092 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2093 return MatchOperand_Success;
2095 if (Tok.is(AsmToken::Integer)) {
2097 if (!isNegative && Tok.getString().startswith("0x")) {
2098 Val = Tok.getIntVal();
2099 if (Val > 255 || Val < 0) {
2100 TokError("encoded floating point value out of range");
2101 return MatchOperand_ParseFail;
2104 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2105 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2106 // If we had a '-' in front, toggle the sign bit.
2107 IntVal ^= (uint64_t)isNegative << 63;
2108 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2110 Parser.Lex(); // Eat the token.
2111 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2112 return MatchOperand_Success;
2116 return MatchOperand_NoMatch;
2118 TokError("invalid floating point immediate");
2119 return MatchOperand_ParseFail;
2122 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2123 AArch64AsmParser::OperandMatchResultTy
2124 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2125 MCAsmParser &Parser = getParser();
2128 if (Parser.getTok().is(AsmToken::Hash))
2129 Parser.Lex(); // Eat '#'
2130 else if (Parser.getTok().isNot(AsmToken::Integer))
2131 // Operand should start from # or should be integer, emit error otherwise.
2132 return MatchOperand_NoMatch;
2135 if (parseSymbolicImmVal(Imm))
2136 return MatchOperand_ParseFail;
2137 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2138 uint64_t ShiftAmount = 0;
2139 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2141 int64_t Val = MCE->getValue();
2142 if (Val > 0xfff && (Val & 0xfff) == 0) {
2143 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2147 SMLoc E = Parser.getTok().getLoc();
2148 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2150 return MatchOperand_Success;
2156 // The optional operand must be "lsl #N" where N is non-negative.
2157 if (!Parser.getTok().is(AsmToken::Identifier) ||
2158 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2159 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2160 return MatchOperand_ParseFail;
2166 if (Parser.getTok().is(AsmToken::Hash)) {
2170 if (Parser.getTok().isNot(AsmToken::Integer)) {
2171 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2172 return MatchOperand_ParseFail;
2175 int64_t ShiftAmount = Parser.getTok().getIntVal();
2177 if (ShiftAmount < 0) {
2178 Error(Parser.getTok().getLoc(), "positive shift amount required");
2179 return MatchOperand_ParseFail;
2181 Parser.Lex(); // Eat the number
2183 SMLoc E = Parser.getTok().getLoc();
2184 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2185 S, E, getContext()));
2186 return MatchOperand_Success;
2189 /// parseCondCodeString - Parse a Condition Code string.
2190 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2191 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2192 .Case("eq", AArch64CC::EQ)
2193 .Case("ne", AArch64CC::NE)
2194 .Case("cs", AArch64CC::HS)
2195 .Case("hs", AArch64CC::HS)
2196 .Case("cc", AArch64CC::LO)
2197 .Case("lo", AArch64CC::LO)
2198 .Case("mi", AArch64CC::MI)
2199 .Case("pl", AArch64CC::PL)
2200 .Case("vs", AArch64CC::VS)
2201 .Case("vc", AArch64CC::VC)
2202 .Case("hi", AArch64CC::HI)
2203 .Case("ls", AArch64CC::LS)
2204 .Case("ge", AArch64CC::GE)
2205 .Case("lt", AArch64CC::LT)
2206 .Case("gt", AArch64CC::GT)
2207 .Case("le", AArch64CC::LE)
2208 .Case("al", AArch64CC::AL)
2209 .Case("nv", AArch64CC::NV)
2210 .Default(AArch64CC::Invalid);
2214 /// parseCondCode - Parse a Condition Code operand.
2215 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2216 bool invertCondCode) {
2217 MCAsmParser &Parser = getParser();
2219 const AsmToken &Tok = Parser.getTok();
2220 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2222 StringRef Cond = Tok.getString();
2223 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2224 if (CC == AArch64CC::Invalid)
2225 return TokError("invalid condition code");
2226 Parser.Lex(); // Eat identifier token.
2228 if (invertCondCode) {
2229 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2230 return TokError("condition codes AL and NV are invalid for this instruction");
2231 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2235 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2239 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2240 /// them if present.
2241 AArch64AsmParser::OperandMatchResultTy
2242 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2243 MCAsmParser &Parser = getParser();
2244 const AsmToken &Tok = Parser.getTok();
2245 std::string LowerID = Tok.getString().lower();
2246 AArch64_AM::ShiftExtendType ShOp =
2247 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2248 .Case("lsl", AArch64_AM::LSL)
2249 .Case("lsr", AArch64_AM::LSR)
2250 .Case("asr", AArch64_AM::ASR)
2251 .Case("ror", AArch64_AM::ROR)
2252 .Case("msl", AArch64_AM::MSL)
2253 .Case("uxtb", AArch64_AM::UXTB)
2254 .Case("uxth", AArch64_AM::UXTH)
2255 .Case("uxtw", AArch64_AM::UXTW)
2256 .Case("uxtx", AArch64_AM::UXTX)
2257 .Case("sxtb", AArch64_AM::SXTB)
2258 .Case("sxth", AArch64_AM::SXTH)
2259 .Case("sxtw", AArch64_AM::SXTW)
2260 .Case("sxtx", AArch64_AM::SXTX)
2261 .Default(AArch64_AM::InvalidShiftExtend);
2263 if (ShOp == AArch64_AM::InvalidShiftExtend)
2264 return MatchOperand_NoMatch;
2266 SMLoc S = Tok.getLoc();
2269 bool Hash = getLexer().is(AsmToken::Hash);
2270 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2271 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2272 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2273 ShOp == AArch64_AM::MSL) {
2274 // We expect a number here.
2275 TokError("expected #imm after shift specifier");
2276 return MatchOperand_ParseFail;
2279 // "extend" type operatoins don't need an immediate, #0 is implicit.
2280 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2282 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2283 return MatchOperand_Success;
2287 Parser.Lex(); // Eat the '#'.
2289 // Make sure we do actually have a number or a parenthesized expression.
2290 SMLoc E = Parser.getTok().getLoc();
2291 if (!Parser.getTok().is(AsmToken::Integer) &&
2292 !Parser.getTok().is(AsmToken::LParen)) {
2293 Error(E, "expected integer shift amount");
2294 return MatchOperand_ParseFail;
2297 const MCExpr *ImmVal;
2298 if (getParser().parseExpression(ImmVal))
2299 return MatchOperand_ParseFail;
2301 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2303 Error(E, "expected constant '#imm' after shift specifier");
2304 return MatchOperand_ParseFail;
2307 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2308 Operands.push_back(AArch64Operand::CreateShiftExtend(
2309 ShOp, MCE->getValue(), true, S, E, getContext()));
2310 return MatchOperand_Success;
2313 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2314 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2315 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2316 OperandVector &Operands) {
2317 if (Name.find('.') != StringRef::npos)
2318 return TokError("invalid operand");
2322 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2324 MCAsmParser &Parser = getParser();
2325 const AsmToken &Tok = Parser.getTok();
2326 StringRef Op = Tok.getString();
2327 SMLoc S = Tok.getLoc();
2329 const MCExpr *Expr = nullptr;
2331 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2333 Expr = MCConstantExpr::Create(op1, getContext()); \
2334 Operands.push_back( \
2335 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2336 Operands.push_back( \
2337 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2338 Operands.push_back( \
2339 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2340 Expr = MCConstantExpr::Create(op2, getContext()); \
2341 Operands.push_back( \
2342 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2345 if (Mnemonic == "ic") {
2346 if (!Op.compare_lower("ialluis")) {
2347 // SYS #0, C7, C1, #0
2348 SYS_ALIAS(0, 7, 1, 0);
2349 } else if (!Op.compare_lower("iallu")) {
2350 // SYS #0, C7, C5, #0
2351 SYS_ALIAS(0, 7, 5, 0);
2352 } else if (!Op.compare_lower("ivau")) {
2353 // SYS #3, C7, C5, #1
2354 SYS_ALIAS(3, 7, 5, 1);
2356 return TokError("invalid operand for IC instruction");
2358 } else if (Mnemonic == "dc") {
2359 if (!Op.compare_lower("zva")) {
2360 // SYS #3, C7, C4, #1
2361 SYS_ALIAS(3, 7, 4, 1);
2362 } else if (!Op.compare_lower("ivac")) {
2363 // SYS #3, C7, C6, #1
2364 SYS_ALIAS(0, 7, 6, 1);
2365 } else if (!Op.compare_lower("isw")) {
2366 // SYS #0, C7, C6, #2
2367 SYS_ALIAS(0, 7, 6, 2);
2368 } else if (!Op.compare_lower("cvac")) {
2369 // SYS #3, C7, C10, #1
2370 SYS_ALIAS(3, 7, 10, 1);
2371 } else if (!Op.compare_lower("csw")) {
2372 // SYS #0, C7, C10, #2
2373 SYS_ALIAS(0, 7, 10, 2);
2374 } else if (!Op.compare_lower("cvau")) {
2375 // SYS #3, C7, C11, #1
2376 SYS_ALIAS(3, 7, 11, 1);
2377 } else if (!Op.compare_lower("civac")) {
2378 // SYS #3, C7, C14, #1
2379 SYS_ALIAS(3, 7, 14, 1);
2380 } else if (!Op.compare_lower("cisw")) {
2381 // SYS #0, C7, C14, #2
2382 SYS_ALIAS(0, 7, 14, 2);
2384 return TokError("invalid operand for DC instruction");
2386 } else if (Mnemonic == "at") {
2387 if (!Op.compare_lower("s1e1r")) {
2388 // SYS #0, C7, C8, #0
2389 SYS_ALIAS(0, 7, 8, 0);
2390 } else if (!Op.compare_lower("s1e2r")) {
2391 // SYS #4, C7, C8, #0
2392 SYS_ALIAS(4, 7, 8, 0);
2393 } else if (!Op.compare_lower("s1e3r")) {
2394 // SYS #6, C7, C8, #0
2395 SYS_ALIAS(6, 7, 8, 0);
2396 } else if (!Op.compare_lower("s1e1w")) {
2397 // SYS #0, C7, C8, #1
2398 SYS_ALIAS(0, 7, 8, 1);
2399 } else if (!Op.compare_lower("s1e2w")) {
2400 // SYS #4, C7, C8, #1
2401 SYS_ALIAS(4, 7, 8, 1);
2402 } else if (!Op.compare_lower("s1e3w")) {
2403 // SYS #6, C7, C8, #1
2404 SYS_ALIAS(6, 7, 8, 1);
2405 } else if (!Op.compare_lower("s1e0r")) {
2406 // SYS #0, C7, C8, #3
2407 SYS_ALIAS(0, 7, 8, 2);
2408 } else if (!Op.compare_lower("s1e0w")) {
2409 // SYS #0, C7, C8, #3
2410 SYS_ALIAS(0, 7, 8, 3);
2411 } else if (!Op.compare_lower("s12e1r")) {
2412 // SYS #4, C7, C8, #4
2413 SYS_ALIAS(4, 7, 8, 4);
2414 } else if (!Op.compare_lower("s12e1w")) {
2415 // SYS #4, C7, C8, #5
2416 SYS_ALIAS(4, 7, 8, 5);
2417 } else if (!Op.compare_lower("s12e0r")) {
2418 // SYS #4, C7, C8, #6
2419 SYS_ALIAS(4, 7, 8, 6);
2420 } else if (!Op.compare_lower("s12e0w")) {
2421 // SYS #4, C7, C8, #7
2422 SYS_ALIAS(4, 7, 8, 7);
2424 return TokError("invalid operand for AT instruction");
2426 } else if (Mnemonic == "tlbi") {
2427 if (!Op.compare_lower("vmalle1is")) {
2428 // SYS #0, C8, C3, #0
2429 SYS_ALIAS(0, 8, 3, 0);
2430 } else if (!Op.compare_lower("alle2is")) {
2431 // SYS #4, C8, C3, #0
2432 SYS_ALIAS(4, 8, 3, 0);
2433 } else if (!Op.compare_lower("alle3is")) {
2434 // SYS #6, C8, C3, #0
2435 SYS_ALIAS(6, 8, 3, 0);
2436 } else if (!Op.compare_lower("vae1is")) {
2437 // SYS #0, C8, C3, #1
2438 SYS_ALIAS(0, 8, 3, 1);
2439 } else if (!Op.compare_lower("vae2is")) {
2440 // SYS #4, C8, C3, #1
2441 SYS_ALIAS(4, 8, 3, 1);
2442 } else if (!Op.compare_lower("vae3is")) {
2443 // SYS #6, C8, C3, #1
2444 SYS_ALIAS(6, 8, 3, 1);
2445 } else if (!Op.compare_lower("aside1is")) {
2446 // SYS #0, C8, C3, #2
2447 SYS_ALIAS(0, 8, 3, 2);
2448 } else if (!Op.compare_lower("vaae1is")) {
2449 // SYS #0, C8, C3, #3
2450 SYS_ALIAS(0, 8, 3, 3);
2451 } else if (!Op.compare_lower("alle1is")) {
2452 // SYS #4, C8, C3, #4
2453 SYS_ALIAS(4, 8, 3, 4);
2454 } else if (!Op.compare_lower("vale1is")) {
2455 // SYS #0, C8, C3, #5
2456 SYS_ALIAS(0, 8, 3, 5);
2457 } else if (!Op.compare_lower("vaale1is")) {
2458 // SYS #0, C8, C3, #7
2459 SYS_ALIAS(0, 8, 3, 7);
2460 } else if (!Op.compare_lower("vmalle1")) {
2461 // SYS #0, C8, C7, #0
2462 SYS_ALIAS(0, 8, 7, 0);
2463 } else if (!Op.compare_lower("alle2")) {
2464 // SYS #4, C8, C7, #0
2465 SYS_ALIAS(4, 8, 7, 0);
2466 } else if (!Op.compare_lower("vale2is")) {
2467 // SYS #4, C8, C3, #5
2468 SYS_ALIAS(4, 8, 3, 5);
2469 } else if (!Op.compare_lower("vale3is")) {
2470 // SYS #6, C8, C3, #5
2471 SYS_ALIAS(6, 8, 3, 5);
2472 } else if (!Op.compare_lower("alle3")) {
2473 // SYS #6, C8, C7, #0
2474 SYS_ALIAS(6, 8, 7, 0);
2475 } else if (!Op.compare_lower("vae1")) {
2476 // SYS #0, C8, C7, #1
2477 SYS_ALIAS(0, 8, 7, 1);
2478 } else if (!Op.compare_lower("vae2")) {
2479 // SYS #4, C8, C7, #1
2480 SYS_ALIAS(4, 8, 7, 1);
2481 } else if (!Op.compare_lower("vae3")) {
2482 // SYS #6, C8, C7, #1
2483 SYS_ALIAS(6, 8, 7, 1);
2484 } else if (!Op.compare_lower("aside1")) {
2485 // SYS #0, C8, C7, #2
2486 SYS_ALIAS(0, 8, 7, 2);
2487 } else if (!Op.compare_lower("vaae1")) {
2488 // SYS #0, C8, C7, #3
2489 SYS_ALIAS(0, 8, 7, 3);
2490 } else if (!Op.compare_lower("alle1")) {
2491 // SYS #4, C8, C7, #4
2492 SYS_ALIAS(4, 8, 7, 4);
2493 } else if (!Op.compare_lower("vale1")) {
2494 // SYS #0, C8, C7, #5
2495 SYS_ALIAS(0, 8, 7, 5);
2496 } else if (!Op.compare_lower("vale2")) {
2497 // SYS #4, C8, C7, #5
2498 SYS_ALIAS(4, 8, 7, 5);
2499 } else if (!Op.compare_lower("vale3")) {
2500 // SYS #6, C8, C7, #5
2501 SYS_ALIAS(6, 8, 7, 5);
2502 } else if (!Op.compare_lower("vaale1")) {
2503 // SYS #0, C8, C7, #7
2504 SYS_ALIAS(0, 8, 7, 7);
2505 } else if (!Op.compare_lower("ipas2e1")) {
2506 // SYS #4, C8, C4, #1
2507 SYS_ALIAS(4, 8, 4, 1);
2508 } else if (!Op.compare_lower("ipas2le1")) {
2509 // SYS #4, C8, C4, #5
2510 SYS_ALIAS(4, 8, 4, 5);
2511 } else if (!Op.compare_lower("ipas2e1is")) {
2512 // SYS #4, C8, C4, #1
2513 SYS_ALIAS(4, 8, 0, 1);
2514 } else if (!Op.compare_lower("ipas2le1is")) {
2515 // SYS #4, C8, C4, #5
2516 SYS_ALIAS(4, 8, 0, 5);
2517 } else if (!Op.compare_lower("vmalls12e1")) {
2518 // SYS #4, C8, C7, #6
2519 SYS_ALIAS(4, 8, 7, 6);
2520 } else if (!Op.compare_lower("vmalls12e1is")) {
2521 // SYS #4, C8, C3, #6
2522 SYS_ALIAS(4, 8, 3, 6);
2524 return TokError("invalid operand for TLBI instruction");
2530 Parser.Lex(); // Eat operand.
2532 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2533 bool HasRegister = false;
2535 // Check for the optional register operand.
2536 if (getLexer().is(AsmToken::Comma)) {
2537 Parser.Lex(); // Eat comma.
2539 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2540 return TokError("expected register operand");
2545 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2546 Parser.eatToEndOfStatement();
2547 return TokError("unexpected token in argument list");
2550 if (ExpectRegister && !HasRegister) {
2551 return TokError("specified " + Mnemonic + " op requires a register");
2553 else if (!ExpectRegister && HasRegister) {
2554 return TokError("specified " + Mnemonic + " op does not use a register");
2557 Parser.Lex(); // Consume the EndOfStatement
2561 AArch64AsmParser::OperandMatchResultTy
2562 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2563 MCAsmParser &Parser = getParser();
2564 const AsmToken &Tok = Parser.getTok();
2566 // Can be either a #imm style literal or an option name
2567 bool Hash = Tok.is(AsmToken::Hash);
2568 if (Hash || Tok.is(AsmToken::Integer)) {
2569 // Immediate operand.
2571 Parser.Lex(); // Eat the '#'
2572 const MCExpr *ImmVal;
2573 SMLoc ExprLoc = getLoc();
2574 if (getParser().parseExpression(ImmVal))
2575 return MatchOperand_ParseFail;
2576 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2578 Error(ExprLoc, "immediate value expected for barrier operand");
2579 return MatchOperand_ParseFail;
2581 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2582 Error(ExprLoc, "barrier operand out of range");
2583 return MatchOperand_ParseFail;
2586 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2587 return MatchOperand_Success;
2590 if (Tok.isNot(AsmToken::Identifier)) {
2591 TokError("invalid operand for instruction");
2592 return MatchOperand_ParseFail;
2596 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2598 TokError("invalid barrier option name");
2599 return MatchOperand_ParseFail;
2602 // The only valid named option for ISB is 'sy'
2603 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2604 TokError("'sy' or #imm operand expected");
2605 return MatchOperand_ParseFail;
2609 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2610 Parser.Lex(); // Consume the option
2612 return MatchOperand_Success;
2615 AArch64AsmParser::OperandMatchResultTy
2616 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2617 MCAsmParser &Parser = getParser();
2618 const AsmToken &Tok = Parser.getTok();
2620 if (Tok.isNot(AsmToken::Identifier))
2621 return MatchOperand_NoMatch;
2624 auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
2625 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
2626 assert(IsKnown == (MRSReg != -1U) &&
2627 "register should be -1 if and only if it's unknown");
2629 auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
2630 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
2631 assert(IsKnown == (MSRReg != -1U) &&
2632 "register should be -1 if and only if it's unknown");
2634 uint32_t PStateField =
2635 AArch64PState::PStateMapper().fromString(Tok.getString(), IsKnown);
2636 assert(IsKnown == (PStateField != -1U) &&
2637 "register should be -1 if and only if it's unknown");
2639 Operands.push_back(AArch64Operand::CreateSysReg(
2640 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2641 Parser.Lex(); // Eat identifier
2643 return MatchOperand_Success;
2646 /// tryParseVectorRegister - Parse a vector register operand.
2647 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2648 MCAsmParser &Parser = getParser();
2649 if (Parser.getTok().isNot(AsmToken::Identifier))
2653 // Check for a vector register specifier first.
2655 int64_t Reg = tryMatchVectorRegister(Kind, false);
2659 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2660 // If there was an explicit qualifier, that goes on as a literal text
2664 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2666 // If there is an index specifier following the register, parse that too.
2667 if (Parser.getTok().is(AsmToken::LBrac)) {
2668 SMLoc SIdx = getLoc();
2669 Parser.Lex(); // Eat left bracket token.
2671 const MCExpr *ImmVal;
2672 if (getParser().parseExpression(ImmVal))
2674 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2676 TokError("immediate value expected for vector index");
2681 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2682 Error(E, "']' expected");
2686 Parser.Lex(); // Eat right bracket token.
2688 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2695 /// parseRegister - Parse a non-vector register operand.
2696 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2697 MCAsmParser &Parser = getParser();
2699 // Try for a vector register.
2700 if (!tryParseVectorRegister(Operands))
2703 // Try for a scalar register.
2704 int64_t Reg = tryParseRegister();
2708 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2710 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2711 // as a string token in the instruction itself.
2712 if (getLexer().getKind() == AsmToken::LBrac) {
2713 SMLoc LBracS = getLoc();
2715 const AsmToken &Tok = Parser.getTok();
2716 if (Tok.is(AsmToken::Integer)) {
2717 SMLoc IntS = getLoc();
2718 int64_t Val = Tok.getIntVal();
2721 if (getLexer().getKind() == AsmToken::RBrac) {
2722 SMLoc RBracS = getLoc();
2725 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2727 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2729 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2739 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2740 MCAsmParser &Parser = getParser();
2741 bool HasELFModifier = false;
2742 AArch64MCExpr::VariantKind RefKind;
2744 if (Parser.getTok().is(AsmToken::Colon)) {
2745 Parser.Lex(); // Eat ':"
2746 HasELFModifier = true;
2748 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2749 Error(Parser.getTok().getLoc(),
2750 "expect relocation specifier in operand after ':'");
2754 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2755 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2756 .Case("lo12", AArch64MCExpr::VK_LO12)
2757 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2758 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2759 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2760 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2761 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2762 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2763 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2764 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2765 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2766 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2767 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2768 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2769 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2770 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2771 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2772 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2773 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2774 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2775 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2776 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2777 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2778 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2779 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2780 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2781 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2782 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2783 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2784 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2785 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2786 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2787 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2788 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2789 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2790 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2791 .Default(AArch64MCExpr::VK_INVALID);
2793 if (RefKind == AArch64MCExpr::VK_INVALID) {
2794 Error(Parser.getTok().getLoc(),
2795 "expect relocation specifier in operand after ':'");
2799 Parser.Lex(); // Eat identifier
2801 if (Parser.getTok().isNot(AsmToken::Colon)) {
2802 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2805 Parser.Lex(); // Eat ':'
2808 if (getParser().parseExpression(ImmVal))
2812 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2817 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2818 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2819 MCAsmParser &Parser = getParser();
2820 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2822 Parser.Lex(); // Eat left bracket token.
2824 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2827 int64_t PrevReg = FirstReg;
2830 if (Parser.getTok().is(AsmToken::Minus)) {
2831 Parser.Lex(); // Eat the minus.
2833 SMLoc Loc = getLoc();
2835 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2838 // Any Kind suffices must match on all regs in the list.
2839 if (Kind != NextKind)
2840 return Error(Loc, "mismatched register size suffix");
2842 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2844 if (Space == 0 || Space > 3) {
2845 return Error(Loc, "invalid number of vectors");
2851 while (Parser.getTok().is(AsmToken::Comma)) {
2852 Parser.Lex(); // Eat the comma token.
2854 SMLoc Loc = getLoc();
2856 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2859 // Any Kind suffices must match on all regs in the list.
2860 if (Kind != NextKind)
2861 return Error(Loc, "mismatched register size suffix");
2863 // Registers must be incremental (with wraparound at 31)
2864 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2865 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2866 return Error(Loc, "registers must be sequential");
2873 if (Parser.getTok().isNot(AsmToken::RCurly))
2874 return Error(getLoc(), "'}' expected");
2875 Parser.Lex(); // Eat the '}' token.
2878 return Error(S, "invalid number of vectors");
2880 unsigned NumElements = 0;
2881 char ElementKind = 0;
2883 parseValidVectorKind(Kind, NumElements, ElementKind);
2885 Operands.push_back(AArch64Operand::CreateVectorList(
2886 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2888 // If there is an index specifier following the list, parse that too.
2889 if (Parser.getTok().is(AsmToken::LBrac)) {
2890 SMLoc SIdx = getLoc();
2891 Parser.Lex(); // Eat left bracket token.
2893 const MCExpr *ImmVal;
2894 if (getParser().parseExpression(ImmVal))
2896 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2898 TokError("immediate value expected for vector index");
2903 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2904 Error(E, "']' expected");
2908 Parser.Lex(); // Eat right bracket token.
2910 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2916 AArch64AsmParser::OperandMatchResultTy
2917 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2918 MCAsmParser &Parser = getParser();
2919 const AsmToken &Tok = Parser.getTok();
2920 if (!Tok.is(AsmToken::Identifier))
2921 return MatchOperand_NoMatch;
2923 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2925 MCContext &Ctx = getContext();
2926 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2927 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2928 return MatchOperand_NoMatch;
2931 Parser.Lex(); // Eat register
2933 if (Parser.getTok().isNot(AsmToken::Comma)) {
2935 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2936 return MatchOperand_Success;
2938 Parser.Lex(); // Eat comma.
2940 if (Parser.getTok().is(AsmToken::Hash))
2941 Parser.Lex(); // Eat hash
2943 if (Parser.getTok().isNot(AsmToken::Integer)) {
2944 Error(getLoc(), "index must be absent or #0");
2945 return MatchOperand_ParseFail;
2948 const MCExpr *ImmVal;
2949 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2950 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2951 Error(getLoc(), "index must be absent or #0");
2952 return MatchOperand_ParseFail;
2956 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2957 return MatchOperand_Success;
2960 /// parseOperand - Parse a arm instruction operand. For now this parses the
2961 /// operand regardless of the mnemonic.
2962 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2963 bool invertCondCode) {
2964 MCAsmParser &Parser = getParser();
2965 // Check if the current operand has a custom associated parser, if so, try to
2966 // custom parse the operand, or fallback to the general approach.
2967 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2968 if (ResTy == MatchOperand_Success)
2970 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2971 // there was a match, but an error occurred, in which case, just return that
2972 // the operand parsing failed.
2973 if (ResTy == MatchOperand_ParseFail)
2976 // Nothing custom, so do general case parsing.
2978 switch (getLexer().getKind()) {
2982 if (parseSymbolicImmVal(Expr))
2983 return Error(S, "invalid operand");
2985 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2986 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2989 case AsmToken::LBrac: {
2990 SMLoc Loc = Parser.getTok().getLoc();
2991 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2993 Parser.Lex(); // Eat '['
2995 // There's no comma after a '[', so we can parse the next operand
2997 return parseOperand(Operands, false, false);
2999 case AsmToken::LCurly:
3000 return parseVectorList(Operands);
3001 case AsmToken::Identifier: {
3002 // If we're expecting a Condition Code operand, then just parse that.
3004 return parseCondCode(Operands, invertCondCode);
3006 // If it's a register name, parse it.
3007 if (!parseRegister(Operands))
3010 // This could be an optional "shift" or "extend" operand.
3011 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3012 // We can only continue if no tokens were eaten.
3013 if (GotShift != MatchOperand_NoMatch)
3016 // This was not a register so parse other operands that start with an
3017 // identifier (like labels) as expressions and create them as immediates.
3018 const MCExpr *IdVal;
3020 if (getParser().parseExpression(IdVal))
3023 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3024 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3027 case AsmToken::Integer:
3028 case AsmToken::Real:
3029 case AsmToken::Hash: {
3030 // #42 -> immediate.
3032 if (getLexer().is(AsmToken::Hash))
3035 // Parse a negative sign
3036 bool isNegative = false;
3037 if (Parser.getTok().is(AsmToken::Minus)) {
3039 // We need to consume this token only when we have a Real, otherwise
3040 // we let parseSymbolicImmVal take care of it
3041 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3045 // The only Real that should come through here is a literal #0.0 for
3046 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3047 // so convert the value.
3048 const AsmToken &Tok = Parser.getTok();
3049 if (Tok.is(AsmToken::Real)) {
3050 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3051 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3052 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3053 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3054 Mnemonic != "fcmlt")
3055 return TokError("unexpected floating point literal");
3056 else if (IntVal != 0 || isNegative)
3057 return TokError("expected floating-point constant #0.0");
3058 Parser.Lex(); // Eat the token.
3061 AArch64Operand::CreateToken("#0", false, S, getContext()));
3063 AArch64Operand::CreateToken(".0", false, S, getContext()));
3067 const MCExpr *ImmVal;
3068 if (parseSymbolicImmVal(ImmVal))
3071 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3072 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3075 case AsmToken::Equal: {
3076 SMLoc Loc = Parser.getTok().getLoc();
3077 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3078 return Error(Loc, "unexpected token in operand");
3079 Parser.Lex(); // Eat '='
3080 const MCExpr *SubExprVal;
3081 if (getParser().parseExpression(SubExprVal))
3084 if (Operands.size() < 2 ||
3085 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3089 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3090 Operands[1]->getReg());
3092 MCContext& Ctx = getContext();
3093 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3094 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3095 if (isa<MCConstantExpr>(SubExprVal)) {
3096 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3097 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3098 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3102 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3103 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3104 Operands.push_back(AArch64Operand::CreateImm(
3105 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3107 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3108 ShiftAmt, true, S, E, Ctx));
3111 APInt Simm = APInt(64, Imm << ShiftAmt);
3112 // check if the immediate is an unsigned or signed 32-bit int for W regs
3113 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3114 return Error(Loc, "Immediate too large for register");
3116 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3117 const MCExpr *CPLoc =
3118 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3119 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3125 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3127 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3128 StringRef Name, SMLoc NameLoc,
3129 OperandVector &Operands) {
3130 MCAsmParser &Parser = getParser();
3131 Name = StringSwitch<StringRef>(Name.lower())
3132 .Case("beq", "b.eq")
3133 .Case("bne", "b.ne")
3134 .Case("bhs", "b.hs")
3135 .Case("bcs", "b.cs")
3136 .Case("blo", "b.lo")
3137 .Case("bcc", "b.cc")
3138 .Case("bmi", "b.mi")
3139 .Case("bpl", "b.pl")
3140 .Case("bvs", "b.vs")
3141 .Case("bvc", "b.vc")
3142 .Case("bhi", "b.hi")
3143 .Case("bls", "b.ls")
3144 .Case("bge", "b.ge")
3145 .Case("blt", "b.lt")
3146 .Case("bgt", "b.gt")
3147 .Case("ble", "b.le")
3148 .Case("bal", "b.al")
3149 .Case("bnv", "b.nv")
3152 // First check for the AArch64-specific .req directive.
3153 if (Parser.getTok().is(AsmToken::Identifier) &&
3154 Parser.getTok().getIdentifier() == ".req") {
3155 parseDirectiveReq(Name, NameLoc);
3156 // We always return 'error' for this, as we're done with this
3157 // statement and don't need to match the 'instruction."
3161 // Create the leading tokens for the mnemonic, split by '.' characters.
3162 size_t Start = 0, Next = Name.find('.');
3163 StringRef Head = Name.slice(Start, Next);
3165 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3166 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3167 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3168 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3169 Parser.eatToEndOfStatement();
3174 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3177 // Handle condition codes for a branch mnemonic
3178 if (Head == "b" && Next != StringRef::npos) {
3180 Next = Name.find('.', Start + 1);
3181 Head = Name.slice(Start + 1, Next);
3183 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3184 (Head.data() - Name.data()));
3185 AArch64CC::CondCode CC = parseCondCodeString(Head);
3186 if (CC == AArch64CC::Invalid)
3187 return Error(SuffixLoc, "invalid condition code");
3189 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3191 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3194 // Add the remaining tokens in the mnemonic.
3195 while (Next != StringRef::npos) {
3197 Next = Name.find('.', Start + 1);
3198 Head = Name.slice(Start, Next);
3199 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3200 (Head.data() - Name.data()) + 1);
3202 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3205 // Conditional compare instructions have a Condition Code operand, which needs
3206 // to be parsed and an immediate operand created.
3207 bool condCodeFourthOperand =
3208 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3209 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3210 Head == "csinc" || Head == "csinv" || Head == "csneg");
3212 // These instructions are aliases to some of the conditional select
3213 // instructions. However, the condition code is inverted in the aliased
3216 // FIXME: Is this the correct way to handle these? Or should the parser
3217 // generate the aliased instructions directly?
3218 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3219 bool condCodeThirdOperand =
3220 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3222 // Read the remaining operands.
3223 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3224 // Read the first operand.
3225 if (parseOperand(Operands, false, false)) {
3226 Parser.eatToEndOfStatement();
3231 while (getLexer().is(AsmToken::Comma)) {
3232 Parser.Lex(); // Eat the comma.
3234 // Parse and remember the operand.
3235 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3236 (N == 3 && condCodeThirdOperand) ||
3237 (N == 2 && condCodeSecondOperand),
3238 condCodeSecondOperand || condCodeThirdOperand)) {
3239 Parser.eatToEndOfStatement();
3243 // After successfully parsing some operands there are two special cases to
3244 // consider (i.e. notional operands not separated by commas). Both are due
3245 // to memory specifiers:
3246 // + An RBrac will end an address for load/store/prefetch
3247 // + An '!' will indicate a pre-indexed operation.
3249 // It's someone else's responsibility to make sure these tokens are sane
3250 // in the given context!
3251 if (Parser.getTok().is(AsmToken::RBrac)) {
3252 SMLoc Loc = Parser.getTok().getLoc();
3253 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3258 if (Parser.getTok().is(AsmToken::Exclaim)) {
3259 SMLoc Loc = Parser.getTok().getLoc();
3260 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3269 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3270 SMLoc Loc = Parser.getTok().getLoc();
3271 Parser.eatToEndOfStatement();
3272 return Error(Loc, "unexpected token in argument list");
3275 Parser.Lex(); // Consume the EndOfStatement
3279 // FIXME: This entire function is a giant hack to provide us with decent
3280 // operand range validation/diagnostics until TableGen/MC can be extended
3281 // to support autogeneration of this kind of validation.
3282 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3283 SmallVectorImpl<SMLoc> &Loc) {
3284 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3285 // Check for indexed addressing modes w/ the base register being the
3286 // same as a destination/source register or pair load where
3287 // the Rt == Rt2. All of those are undefined behaviour.
3288 switch (Inst.getOpcode()) {
3289 case AArch64::LDPSWpre:
3290 case AArch64::LDPWpost:
3291 case AArch64::LDPWpre:
3292 case AArch64::LDPXpost:
3293 case AArch64::LDPXpre: {
3294 unsigned Rt = Inst.getOperand(1).getReg();
3295 unsigned Rt2 = Inst.getOperand(2).getReg();
3296 unsigned Rn = Inst.getOperand(3).getReg();
3297 if (RI->isSubRegisterEq(Rn, Rt))
3298 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3299 "is also a destination");
3300 if (RI->isSubRegisterEq(Rn, Rt2))
3301 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3302 "is also a destination");
3305 case AArch64::LDPDi:
3306 case AArch64::LDPQi:
3307 case AArch64::LDPSi:
3308 case AArch64::LDPSWi:
3309 case AArch64::LDPWi:
3310 case AArch64::LDPXi: {
3311 unsigned Rt = Inst.getOperand(0).getReg();
3312 unsigned Rt2 = Inst.getOperand(1).getReg();
3314 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3317 case AArch64::LDPDpost:
3318 case AArch64::LDPDpre:
3319 case AArch64::LDPQpost:
3320 case AArch64::LDPQpre:
3321 case AArch64::LDPSpost:
3322 case AArch64::LDPSpre:
3323 case AArch64::LDPSWpost: {
3324 unsigned Rt = Inst.getOperand(1).getReg();
3325 unsigned Rt2 = Inst.getOperand(2).getReg();
3327 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3330 case AArch64::STPDpost:
3331 case AArch64::STPDpre:
3332 case AArch64::STPQpost:
3333 case AArch64::STPQpre:
3334 case AArch64::STPSpost:
3335 case AArch64::STPSpre:
3336 case AArch64::STPWpost:
3337 case AArch64::STPWpre:
3338 case AArch64::STPXpost:
3339 case AArch64::STPXpre: {
3340 unsigned Rt = Inst.getOperand(1).getReg();
3341 unsigned Rt2 = Inst.getOperand(2).getReg();
3342 unsigned Rn = Inst.getOperand(3).getReg();
3343 if (RI->isSubRegisterEq(Rn, Rt))
3344 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3345 "is also a source");
3346 if (RI->isSubRegisterEq(Rn, Rt2))
3347 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3348 "is also a source");
3351 case AArch64::LDRBBpre:
3352 case AArch64::LDRBpre:
3353 case AArch64::LDRHHpre:
3354 case AArch64::LDRHpre:
3355 case AArch64::LDRSBWpre:
3356 case AArch64::LDRSBXpre:
3357 case AArch64::LDRSHWpre:
3358 case AArch64::LDRSHXpre:
3359 case AArch64::LDRSWpre:
3360 case AArch64::LDRWpre:
3361 case AArch64::LDRXpre:
3362 case AArch64::LDRBBpost:
3363 case AArch64::LDRBpost:
3364 case AArch64::LDRHHpost:
3365 case AArch64::LDRHpost:
3366 case AArch64::LDRSBWpost:
3367 case AArch64::LDRSBXpost:
3368 case AArch64::LDRSHWpost:
3369 case AArch64::LDRSHXpost:
3370 case AArch64::LDRSWpost:
3371 case AArch64::LDRWpost:
3372 case AArch64::LDRXpost: {
3373 unsigned Rt = Inst.getOperand(1).getReg();
3374 unsigned Rn = Inst.getOperand(2).getReg();
3375 if (RI->isSubRegisterEq(Rn, Rt))
3376 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3377 "is also a source");
3380 case AArch64::STRBBpost:
3381 case AArch64::STRBpost:
3382 case AArch64::STRHHpost:
3383 case AArch64::STRHpost:
3384 case AArch64::STRWpost:
3385 case AArch64::STRXpost:
3386 case AArch64::STRBBpre:
3387 case AArch64::STRBpre:
3388 case AArch64::STRHHpre:
3389 case AArch64::STRHpre:
3390 case AArch64::STRWpre:
3391 case AArch64::STRXpre: {
3392 unsigned Rt = Inst.getOperand(1).getReg();
3393 unsigned Rn = Inst.getOperand(2).getReg();
3394 if (RI->isSubRegisterEq(Rn, Rt))
3395 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3396 "is also a source");
3401 // Now check immediate ranges. Separate from the above as there is overlap
3402 // in the instructions being checked and this keeps the nested conditionals
3404 switch (Inst.getOpcode()) {
3405 case AArch64::ADDSWri:
3406 case AArch64::ADDSXri:
3407 case AArch64::ADDWri:
3408 case AArch64::ADDXri:
3409 case AArch64::SUBSWri:
3410 case AArch64::SUBSXri:
3411 case AArch64::SUBWri:
3412 case AArch64::SUBXri: {
3413 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3414 // some slight duplication here.
3415 if (Inst.getOperand(2).isExpr()) {
3416 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3417 AArch64MCExpr::VariantKind ELFRefKind;
3418 MCSymbolRefExpr::VariantKind DarwinRefKind;
3420 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3421 return Error(Loc[2], "invalid immediate expression");
3424 // Only allow these with ADDXri.
3425 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3426 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3427 Inst.getOpcode() == AArch64::ADDXri)
3430 // Only allow these with ADDXri/ADDWri
3431 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3432 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3433 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3434 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3435 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3436 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3437 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3438 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3439 (Inst.getOpcode() == AArch64::ADDXri ||
3440 Inst.getOpcode() == AArch64::ADDWri))
3443 // Don't allow expressions in the immediate field otherwise
3444 return Error(Loc[2], "invalid immediate expression");
3453 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3455 case Match_MissingFeature:
3457 "instruction requires a CPU feature not currently enabled");
3458 case Match_InvalidOperand:
3459 return Error(Loc, "invalid operand for instruction");
3460 case Match_InvalidSuffix:
3461 return Error(Loc, "invalid type suffix for instruction");
3462 case Match_InvalidCondCode:
3463 return Error(Loc, "expected AArch64 condition code");
3464 case Match_AddSubRegExtendSmall:
3466 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3467 case Match_AddSubRegExtendLarge:
3469 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3470 case Match_AddSubSecondSource:
3472 "expected compatible register, symbol or integer in range [0, 4095]");
3473 case Match_LogicalSecondSource:
3474 return Error(Loc, "expected compatible register or logical immediate");
3475 case Match_InvalidMovImm32Shift:
3476 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3477 case Match_InvalidMovImm64Shift:
3478 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3479 case Match_AddSubRegShift32:
3481 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3482 case Match_AddSubRegShift64:
3484 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3485 case Match_InvalidFPImm:
3487 "expected compatible register or floating-point constant");
3488 case Match_InvalidMemoryIndexedSImm9:
3489 return Error(Loc, "index must be an integer in range [-256, 255].");
3490 case Match_InvalidMemoryIndexed4SImm7:
3491 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3492 case Match_InvalidMemoryIndexed8SImm7:
3493 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3494 case Match_InvalidMemoryIndexed16SImm7:
3495 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3496 case Match_InvalidMemoryWExtend8:
3498 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3499 case Match_InvalidMemoryWExtend16:
3501 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3502 case Match_InvalidMemoryWExtend32:
3504 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3505 case Match_InvalidMemoryWExtend64:
3507 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3508 case Match_InvalidMemoryWExtend128:
3510 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3511 case Match_InvalidMemoryXExtend8:
3513 "expected 'lsl' or 'sxtx' with optional shift of #0");
3514 case Match_InvalidMemoryXExtend16:
3516 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3517 case Match_InvalidMemoryXExtend32:
3519 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3520 case Match_InvalidMemoryXExtend64:
3522 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3523 case Match_InvalidMemoryXExtend128:
3525 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3526 case Match_InvalidMemoryIndexed1:
3527 return Error(Loc, "index must be an integer in range [0, 4095].");
3528 case Match_InvalidMemoryIndexed2:
3529 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3530 case Match_InvalidMemoryIndexed4:
3531 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3532 case Match_InvalidMemoryIndexed8:
3533 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3534 case Match_InvalidMemoryIndexed16:
3535 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3536 case Match_InvalidImm0_7:
3537 return Error(Loc, "immediate must be an integer in range [0, 7].");
3538 case Match_InvalidImm0_15:
3539 return Error(Loc, "immediate must be an integer in range [0, 15].");
3540 case Match_InvalidImm0_31:
3541 return Error(Loc, "immediate must be an integer in range [0, 31].");
3542 case Match_InvalidImm0_63:
3543 return Error(Loc, "immediate must be an integer in range [0, 63].");
3544 case Match_InvalidImm0_127:
3545 return Error(Loc, "immediate must be an integer in range [0, 127].");
3546 case Match_InvalidImm0_65535:
3547 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3548 case Match_InvalidImm1_8:
3549 return Error(Loc, "immediate must be an integer in range [1, 8].");
3550 case Match_InvalidImm1_16:
3551 return Error(Loc, "immediate must be an integer in range [1, 16].");
3552 case Match_InvalidImm1_32:
3553 return Error(Loc, "immediate must be an integer in range [1, 32].");
3554 case Match_InvalidImm1_64:
3555 return Error(Loc, "immediate must be an integer in range [1, 64].");
3556 case Match_InvalidIndex1:
3557 return Error(Loc, "expected lane specifier '[1]'");
3558 case Match_InvalidIndexB:
3559 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3560 case Match_InvalidIndexH:
3561 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3562 case Match_InvalidIndexS:
3563 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3564 case Match_InvalidIndexD:
3565 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3566 case Match_InvalidLabel:
3567 return Error(Loc, "expected label or encodable integer pc offset");
3569 return Error(Loc, "expected readable system register");
3571 return Error(Loc, "expected writable system register or pstate");
3572 case Match_MnemonicFail:
3573 return Error(Loc, "unrecognized instruction mnemonic");
3575 llvm_unreachable("unexpected error code!");
3579 static const char *getSubtargetFeatureName(uint64_t Val);
3581 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3582 OperandVector &Operands,
3584 uint64_t &ErrorInfo,
3585 bool MatchingInlineAsm) {
3586 assert(!Operands.empty() && "Unexpect empty operand list!");
3587 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3588 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3590 StringRef Tok = Op.getToken();
3591 unsigned NumOperands = Operands.size();
3593 if (NumOperands == 4 && Tok == "lsl") {
3594 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3595 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3596 if (Op2.isReg() && Op3.isImm()) {
3597 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3599 uint64_t Op3Val = Op3CE->getValue();
3600 uint64_t NewOp3Val = 0;
3601 uint64_t NewOp4Val = 0;
3602 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3604 NewOp3Val = (32 - Op3Val) & 0x1f;
3605 NewOp4Val = 31 - Op3Val;
3607 NewOp3Val = (64 - Op3Val) & 0x3f;
3608 NewOp4Val = 63 - Op3Val;
3611 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3612 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3614 Operands[0] = AArch64Operand::CreateToken(
3615 "ubfm", false, Op.getStartLoc(), getContext());
3616 Operands.push_back(AArch64Operand::CreateImm(
3617 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3618 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3619 Op3.getEndLoc(), getContext());
3622 } else if (NumOperands == 5) {
3623 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3624 // UBFIZ -> UBFM aliases.
3625 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3626 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3627 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3628 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3630 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3631 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3632 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3634 if (Op3CE && Op4CE) {
3635 uint64_t Op3Val = Op3CE->getValue();
3636 uint64_t Op4Val = Op4CE->getValue();
3638 uint64_t RegWidth = 0;
3639 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3645 if (Op3Val >= RegWidth)
3646 return Error(Op3.getStartLoc(),
3647 "expected integer in range [0, 31]");
3648 if (Op4Val < 1 || Op4Val > RegWidth)
3649 return Error(Op4.getStartLoc(),
3650 "expected integer in range [1, 32]");
3652 uint64_t NewOp3Val = 0;
3653 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3655 NewOp3Val = (32 - Op3Val) & 0x1f;
3657 NewOp3Val = (64 - Op3Val) & 0x3f;
3659 uint64_t NewOp4Val = Op4Val - 1;
3661 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3662 return Error(Op4.getStartLoc(),
3663 "requested insert overflows register");
3665 const MCExpr *NewOp3 =
3666 MCConstantExpr::Create(NewOp3Val, getContext());
3667 const MCExpr *NewOp4 =
3668 MCConstantExpr::Create(NewOp4Val, getContext());
3669 Operands[3] = AArch64Operand::CreateImm(
3670 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3671 Operands[4] = AArch64Operand::CreateImm(
3672 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3674 Operands[0] = AArch64Operand::CreateToken(
3675 "bfm", false, Op.getStartLoc(), getContext());
3676 else if (Tok == "sbfiz")
3677 Operands[0] = AArch64Operand::CreateToken(
3678 "sbfm", false, Op.getStartLoc(), getContext());
3679 else if (Tok == "ubfiz")
3680 Operands[0] = AArch64Operand::CreateToken(
3681 "ubfm", false, Op.getStartLoc(), getContext());
3683 llvm_unreachable("No valid mnemonic for alias?");
3687 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3688 // UBFX -> UBFM aliases.
3689 } else if (NumOperands == 5 &&
3690 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3691 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3692 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3693 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3695 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3696 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3697 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3699 if (Op3CE && Op4CE) {
3700 uint64_t Op3Val = Op3CE->getValue();
3701 uint64_t Op4Val = Op4CE->getValue();
3703 uint64_t RegWidth = 0;
3704 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3710 if (Op3Val >= RegWidth)
3711 return Error(Op3.getStartLoc(),
3712 "expected integer in range [0, 31]");
3713 if (Op4Val < 1 || Op4Val > RegWidth)
3714 return Error(Op4.getStartLoc(),
3715 "expected integer in range [1, 32]");
3717 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3719 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3720 return Error(Op4.getStartLoc(),
3721 "requested extract overflows register");
3723 const MCExpr *NewOp4 =
3724 MCConstantExpr::Create(NewOp4Val, getContext());
3725 Operands[4] = AArch64Operand::CreateImm(
3726 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3728 Operands[0] = AArch64Operand::CreateToken(
3729 "bfm", false, Op.getStartLoc(), getContext());
3730 else if (Tok == "sbfx")
3731 Operands[0] = AArch64Operand::CreateToken(
3732 "sbfm", false, Op.getStartLoc(), getContext());
3733 else if (Tok == "ubfx")
3734 Operands[0] = AArch64Operand::CreateToken(
3735 "ubfm", false, Op.getStartLoc(), getContext());
3737 llvm_unreachable("No valid mnemonic for alias?");
3742 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3743 // InstAlias can't quite handle this since the reg classes aren't
3745 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3746 // The source register can be Wn here, but the matcher expects a
3747 // GPR64. Twiddle it here if necessary.
3748 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3750 unsigned Reg = getXRegFromWReg(Op.getReg());
3751 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3752 Op.getEndLoc(), getContext());
3755 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3756 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3757 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3759 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3761 // The source register can be Wn here, but the matcher expects a
3762 // GPR64. Twiddle it here if necessary.
3763 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3765 unsigned Reg = getXRegFromWReg(Op.getReg());
3766 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3767 Op.getEndLoc(), getContext());
3771 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3772 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3773 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3775 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3777 // The source register can be Wn here, but the matcher expects a
3778 // GPR32. Twiddle it here if necessary.
3779 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3781 unsigned Reg = getWRegFromXReg(Op.getReg());
3782 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3783 Op.getEndLoc(), getContext());
3788 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3789 if (NumOperands == 3 && Tok == "fmov") {
3790 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3791 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3792 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3794 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3798 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3799 Op.getEndLoc(), getContext());
3804 // First try to match against the secondary set of tables containing the
3805 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3806 unsigned MatchResult =
3807 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3809 // If that fails, try against the alternate table containing long-form NEON:
3810 // "fadd v0.2s, v1.2s, v2.2s"
3811 if (MatchResult != Match_Success)
3813 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3815 switch (MatchResult) {
3816 case Match_Success: {
3817 // Perform range checking and other semantic validations
3818 SmallVector<SMLoc, 8> OperandLocs;
3819 NumOperands = Operands.size();
3820 for (unsigned i = 1; i < NumOperands; ++i)
3821 OperandLocs.push_back(Operands[i]->getStartLoc());
3822 if (validateInstruction(Inst, OperandLocs))
3826 Out.EmitInstruction(Inst, STI);
3829 case Match_MissingFeature: {
3830 assert(ErrorInfo && "Unknown missing feature!");
3831 // Special case the error message for the very common case where only
3832 // a single subtarget feature is missing (neon, e.g.).
3833 std::string Msg = "instruction requires:";
3835 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3836 if (ErrorInfo & Mask) {
3838 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3842 return Error(IDLoc, Msg);
3844 case Match_MnemonicFail:
3845 return showMatchError(IDLoc, MatchResult);
3846 case Match_InvalidOperand: {
3847 SMLoc ErrorLoc = IDLoc;
3848 if (ErrorInfo != ~0ULL) {
3849 if (ErrorInfo >= Operands.size())
3850 return Error(IDLoc, "too few operands for instruction");
3852 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3853 if (ErrorLoc == SMLoc())
3856 // If the match failed on a suffix token operand, tweak the diagnostic
3858 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3859 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3860 MatchResult = Match_InvalidSuffix;
3862 return showMatchError(ErrorLoc, MatchResult);
3864 case Match_InvalidMemoryIndexed1:
3865 case Match_InvalidMemoryIndexed2:
3866 case Match_InvalidMemoryIndexed4:
3867 case Match_InvalidMemoryIndexed8:
3868 case Match_InvalidMemoryIndexed16:
3869 case Match_InvalidCondCode:
3870 case Match_AddSubRegExtendSmall:
3871 case Match_AddSubRegExtendLarge:
3872 case Match_AddSubSecondSource:
3873 case Match_LogicalSecondSource:
3874 case Match_AddSubRegShift32:
3875 case Match_AddSubRegShift64:
3876 case Match_InvalidMovImm32Shift:
3877 case Match_InvalidMovImm64Shift:
3878 case Match_InvalidFPImm:
3879 case Match_InvalidMemoryWExtend8:
3880 case Match_InvalidMemoryWExtend16:
3881 case Match_InvalidMemoryWExtend32:
3882 case Match_InvalidMemoryWExtend64:
3883 case Match_InvalidMemoryWExtend128:
3884 case Match_InvalidMemoryXExtend8:
3885 case Match_InvalidMemoryXExtend16:
3886 case Match_InvalidMemoryXExtend32:
3887 case Match_InvalidMemoryXExtend64:
3888 case Match_InvalidMemoryXExtend128:
3889 case Match_InvalidMemoryIndexed4SImm7:
3890 case Match_InvalidMemoryIndexed8SImm7:
3891 case Match_InvalidMemoryIndexed16SImm7:
3892 case Match_InvalidMemoryIndexedSImm9:
3893 case Match_InvalidImm0_7:
3894 case Match_InvalidImm0_15:
3895 case Match_InvalidImm0_31:
3896 case Match_InvalidImm0_63:
3897 case Match_InvalidImm0_127:
3898 case Match_InvalidImm0_65535:
3899 case Match_InvalidImm1_8:
3900 case Match_InvalidImm1_16:
3901 case Match_InvalidImm1_32:
3902 case Match_InvalidImm1_64:
3903 case Match_InvalidIndex1:
3904 case Match_InvalidIndexB:
3905 case Match_InvalidIndexH:
3906 case Match_InvalidIndexS:
3907 case Match_InvalidIndexD:
3908 case Match_InvalidLabel:
3911 if (ErrorInfo >= Operands.size())
3912 return Error(IDLoc, "too few operands for instruction");
3913 // Any time we get here, there's nothing fancy to do. Just get the
3914 // operand SMLoc and display the diagnostic.
3915 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3916 if (ErrorLoc == SMLoc())
3918 return showMatchError(ErrorLoc, MatchResult);
3922 llvm_unreachable("Implement any new match types added!");
3925 /// ParseDirective parses the arm specific directives
3926 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3927 const MCObjectFileInfo::Environment Format =
3928 getContext().getObjectFileInfo()->getObjectFileType();
3929 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3930 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3932 StringRef IDVal = DirectiveID.getIdentifier();
3933 SMLoc Loc = DirectiveID.getLoc();
3934 if (IDVal == ".hword")
3935 return parseDirectiveWord(2, Loc);
3936 if (IDVal == ".word")
3937 return parseDirectiveWord(4, Loc);
3938 if (IDVal == ".xword")
3939 return parseDirectiveWord(8, Loc);
3940 if (IDVal == ".tlsdesccall")
3941 return parseDirectiveTLSDescCall(Loc);
3942 if (IDVal == ".ltorg" || IDVal == ".pool")
3943 return parseDirectiveLtorg(Loc);
3944 if (IDVal == ".unreq")
3945 return parseDirectiveUnreq(DirectiveID.getLoc());
3947 if (!IsMachO && !IsCOFF) {
3948 if (IDVal == ".inst")
3949 return parseDirectiveInst(Loc);
3952 return parseDirectiveLOH(IDVal, Loc);
3955 /// parseDirectiveWord
3956 /// ::= .word [ expression (, expression)* ]
3957 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3958 MCAsmParser &Parser = getParser();
3959 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3961 const MCExpr *Value;
3962 if (getParser().parseExpression(Value))
3965 getParser().getStreamer().EmitValue(Value, Size);
3967 if (getLexer().is(AsmToken::EndOfStatement))
3970 // FIXME: Improve diagnostic.
3971 if (getLexer().isNot(AsmToken::Comma))
3972 return Error(L, "unexpected token in directive");
3981 /// parseDirectiveInst
3982 /// ::= .inst opcode [, ...]
3983 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3984 MCAsmParser &Parser = getParser();
3985 if (getLexer().is(AsmToken::EndOfStatement)) {
3986 Parser.eatToEndOfStatement();
3987 Error(Loc, "expected expression following directive");
3994 if (getParser().parseExpression(Expr)) {
3995 Error(Loc, "expected expression");
3999 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4001 Error(Loc, "expected constant expression");
4005 getTargetStreamer().emitInst(Value->getValue());
4007 if (getLexer().is(AsmToken::EndOfStatement))
4010 if (getLexer().isNot(AsmToken::Comma)) {
4011 Error(Loc, "unexpected token in directive");
4015 Parser.Lex(); // Eat comma.
4022 // parseDirectiveTLSDescCall:
4023 // ::= .tlsdesccall symbol
4024 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4026 if (getParser().parseIdentifier(Name))
4027 return Error(L, "expected symbol after directive");
4029 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4030 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4031 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4034 Inst.setOpcode(AArch64::TLSDESCCALL);
4035 Inst.addOperand(MCOperand::CreateExpr(Expr));
4037 getParser().getStreamer().EmitInstruction(Inst, STI);
4041 /// ::= .loh <lohName | lohId> label1, ..., labelN
4042 /// The number of arguments depends on the loh identifier.
4043 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4044 if (IDVal != MCLOHDirectiveName())
4047 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4048 if (getParser().getTok().isNot(AsmToken::Integer))
4049 return TokError("expected an identifier or a number in directive");
4050 // We successfully get a numeric value for the identifier.
4051 // Check if it is valid.
4052 int64_t Id = getParser().getTok().getIntVal();
4053 if (Id <= -1U && !isValidMCLOHType(Id))
4054 return TokError("invalid numeric identifier in directive");
4055 Kind = (MCLOHType)Id;
4057 StringRef Name = getTok().getIdentifier();
4058 // We successfully parse an identifier.
4059 // Check if it is a recognized one.
4060 int Id = MCLOHNameToId(Name);
4063 return TokError("invalid identifier in directive");
4064 Kind = (MCLOHType)Id;
4066 // Consume the identifier.
4068 // Get the number of arguments of this LOH.
4069 int NbArgs = MCLOHIdToNbArgs(Kind);
4071 assert(NbArgs != -1 && "Invalid number of arguments");
4073 SmallVector<MCSymbol *, 3> Args;
4074 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4076 if (getParser().parseIdentifier(Name))
4077 return TokError("expected identifier in directive");
4078 Args.push_back(getContext().GetOrCreateSymbol(Name));
4080 if (Idx + 1 == NbArgs)
4082 if (getLexer().isNot(AsmToken::Comma))
4083 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4086 if (getLexer().isNot(AsmToken::EndOfStatement))
4087 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4089 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4093 /// parseDirectiveLtorg
4094 /// ::= .ltorg | .pool
4095 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4096 getTargetStreamer().emitCurrentConstantPool();
4100 /// parseDirectiveReq
4101 /// ::= name .req registername
4102 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4103 MCAsmParser &Parser = getParser();
4104 Parser.Lex(); // Eat the '.req' token.
4105 SMLoc SRegLoc = getLoc();
4106 unsigned RegNum = tryParseRegister();
4107 bool IsVector = false;
4109 if (RegNum == static_cast<unsigned>(-1)) {
4111 RegNum = tryMatchVectorRegister(Kind, false);
4112 if (!Kind.empty()) {
4113 Error(SRegLoc, "vector register without type specifier expected");
4119 if (RegNum == static_cast<unsigned>(-1)) {
4120 Parser.eatToEndOfStatement();
4121 Error(SRegLoc, "register name or alias expected");
4125 // Shouldn't be anything else.
4126 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4127 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4128 Parser.eatToEndOfStatement();
4132 Parser.Lex(); // Consume the EndOfStatement
4134 auto pair = std::make_pair(IsVector, RegNum);
4135 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4136 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4141 /// parseDirectiveUneq
4142 /// ::= .unreq registername
4143 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4144 MCAsmParser &Parser = getParser();
4145 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4146 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4147 Parser.eatToEndOfStatement();
4150 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4151 Parser.Lex(); // Eat the identifier.
4156 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4157 AArch64MCExpr::VariantKind &ELFRefKind,
4158 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4160 ELFRefKind = AArch64MCExpr::VK_INVALID;
4161 DarwinRefKind = MCSymbolRefExpr::VK_None;
4164 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4165 ELFRefKind = AE->getKind();
4166 Expr = AE->getSubExpr();
4169 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4171 // It's a simple symbol reference with no addend.
4172 DarwinRefKind = SE->getKind();
4176 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4180 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4183 DarwinRefKind = SE->getKind();
4185 if (BE->getOpcode() != MCBinaryExpr::Add &&
4186 BE->getOpcode() != MCBinaryExpr::Sub)
4189 // See if the addend is is a constant, otherwise there's more going
4190 // on here than we can deal with.
4191 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4195 Addend = AddendExpr->getValue();
4196 if (BE->getOpcode() == MCBinaryExpr::Sub)
4199 // It's some symbol reference + a constant addend, but really
4200 // shouldn't use both Darwin and ELF syntax.
4201 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4202 DarwinRefKind == MCSymbolRefExpr::VK_None;
4205 /// Force static initialization.
4206 extern "C" void LLVMInitializeAArch64AsmParser() {
4207 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4208 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4209 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4212 #define GET_REGISTER_MATCHER
4213 #define GET_SUBTARGET_FEATURE_NAME
4214 #define GET_MATCHER_IMPLEMENTATION
4215 #include "AArch64GenAsmMatcher.inc"
4217 // Define this matcher function after the auto-generated include so we
4218 // have the match class enum definitions.
4219 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4221 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4222 // If the kind is a token for a literal immediate, check if our asm
4223 // operand matches. This is for InstAliases which have a fixed-value
4224 // immediate in the syntax.
4225 int64_t ExpectedVal;
4228 return Match_InvalidOperand;
4270 return Match_InvalidOperand;
4271 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4273 return Match_InvalidOperand;
4274 if (CE->getValue() == ExpectedVal)
4275 return Match_Success;
4276 return Match_InvalidOperand;