1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
42 class AArch64AsmParser : public MCTargetAsmParser {
44 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
76 bool parseDirectiveTLSDescCall(SMLoc L);
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI) {
120 MCAsmParserExtension::Initialize(_Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
215 uint32_t PStateField;
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
229 bool HasExplicitAmount;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
262 StartLoc = o.StartLoc;
272 ShiftedImm = o.ShiftedImm;
275 CondCode = o.CondCode;
287 VectorList = o.VectorList;
290 VectorIndex = o.VectorIndex;
296 SysCRImm = o.SysCRImm;
299 Prefetch = o.Prefetch;
302 ShiftExtend = o.ShiftExtend;
307 /// getStartLoc - Get the location of the first token of this operand.
308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
310 SMLoc getEndLoc() const override { return EndLoc; }
312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
377 unsigned getSysCR() const {
378 assert(Kind == k_SysCR && "Invalid access!");
382 unsigned getPrefetch() const {
383 assert(Kind == k_Prefetch && "Invalid access!");
387 AArch64_AM::ShiftExtendType getShiftExtendType() const {
388 assert(Kind == k_ShiftExtend && "Invalid access!");
389 return ShiftExtend.Type;
392 unsigned getShiftExtendAmount() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Amount;
397 bool hasShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.HasExplicitAmount;
402 bool isImm() const override { return Kind == k_Immediate; }
403 bool isMem() const override { return false; }
404 bool isSImm9() const {
407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
410 int64_t Val = MCE->getValue();
411 return (Val >= -256 && Val < 256);
413 bool isSImm7s4() const {
416 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
419 int64_t Val = MCE->getValue();
420 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
422 bool isSImm7s8() const {
425 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
428 int64_t Val = MCE->getValue();
429 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
431 bool isSImm7s16() const {
434 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
437 int64_t Val = MCE->getValue();
438 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
441 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
442 AArch64MCExpr::VariantKind ELFRefKind;
443 MCSymbolRefExpr::VariantKind DarwinRefKind;
445 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
447 // If we don't understand the expression, assume the best and
448 // let the fixup and relocation code deal with it.
452 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
453 ELFRefKind == AArch64MCExpr::VK_LO12 ||
454 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
455 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
456 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
457 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
458 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
459 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
460 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
461 // Note that we don't range-check the addend. It's adjusted modulo page
462 // size when converted, so there is no "out of range" condition when using
464 return Addend >= 0 && (Addend % Scale) == 0;
465 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
466 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
467 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
474 template <int Scale> bool isUImm12Offset() const {
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480 return isSymbolicUImm12Offset(getImm(), Scale);
482 int64_t Val = MCE->getValue();
483 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
486 bool isImm0_7() const {
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 int64_t Val = MCE->getValue();
493 return (Val >= 0 && Val < 8);
495 bool isImm1_8() const {
498 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
501 int64_t Val = MCE->getValue();
502 return (Val > 0 && Val < 9);
504 bool isImm0_15() const {
507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
510 int64_t Val = MCE->getValue();
511 return (Val >= 0 && Val < 16);
513 bool isImm1_16() const {
516 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
519 int64_t Val = MCE->getValue();
520 return (Val > 0 && Val < 17);
522 bool isImm0_31() const {
525 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
528 int64_t Val = MCE->getValue();
529 return (Val >= 0 && Val < 32);
531 bool isImm1_31() const {
534 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
537 int64_t Val = MCE->getValue();
538 return (Val >= 1 && Val < 32);
540 bool isImm1_32() const {
543 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
546 int64_t Val = MCE->getValue();
547 return (Val >= 1 && Val < 33);
549 bool isImm0_63() const {
552 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
555 int64_t Val = MCE->getValue();
556 return (Val >= 0 && Val < 64);
558 bool isImm1_63() const {
561 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
564 int64_t Val = MCE->getValue();
565 return (Val >= 1 && Val < 64);
567 bool isImm1_64() const {
570 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
573 int64_t Val = MCE->getValue();
574 return (Val >= 1 && Val < 65);
576 bool isImm0_127() const {
579 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
582 int64_t Val = MCE->getValue();
583 return (Val >= 0 && Val < 128);
585 bool isImm0_255() const {
588 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
591 int64_t Val = MCE->getValue();
592 return (Val >= 0 && Val < 256);
594 bool isImm0_65535() const {
597 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
600 int64_t Val = MCE->getValue();
601 return (Val >= 0 && Val < 65536);
603 bool isImm32_63() const {
606 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
609 int64_t Val = MCE->getValue();
610 return (Val >= 32 && Val < 64);
612 bool isLogicalImm32() const {
615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
618 int64_t Val = MCE->getValue();
619 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
622 return AArch64_AM::isLogicalImmediate(Val, 32);
624 bool isLogicalImm64() const {
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
632 bool isLogicalImm32Not() const {
635 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
638 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
639 return AArch64_AM::isLogicalImmediate(Val, 32);
641 bool isLogicalImm64Not() const {
644 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
647 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
649 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
650 bool isAddSubImm() const {
651 if (!isShiftedImm() && !isImm())
656 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
657 if (isShiftedImm()) {
658 unsigned Shift = ShiftedImm.ShiftAmount;
659 Expr = ShiftedImm.Val;
660 if (Shift != 0 && Shift != 12)
666 AArch64MCExpr::VariantKind ELFRefKind;
667 MCSymbolRefExpr::VariantKind DarwinRefKind;
669 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
670 DarwinRefKind, Addend)) {
671 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
672 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
673 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
674 || ELFRefKind == AArch64MCExpr::VK_LO12
675 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
676 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
677 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
678 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
679 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
680 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
681 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
684 // Otherwise it should be a real immediate in range:
685 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
686 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
688 bool isCondCode() const { return Kind == k_CondCode; }
689 bool isSIMDImmType10() const {
692 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
695 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
697 bool isBranchTarget26() const {
700 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
703 int64_t Val = MCE->getValue();
706 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
708 bool isPCRelLabel19() const {
711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
714 int64_t Val = MCE->getValue();
717 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
719 bool isBranchTarget14() const {
722 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
725 int64_t Val = MCE->getValue();
728 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
732 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
736 AArch64MCExpr::VariantKind ELFRefKind;
737 MCSymbolRefExpr::VariantKind DarwinRefKind;
739 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
740 DarwinRefKind, Addend)) {
743 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
746 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
747 if (ELFRefKind == AllowedModifiers[i])
754 bool isMovZSymbolG3() const {
755 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
758 bool isMovZSymbolG2() const {
759 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
760 AArch64MCExpr::VK_TPREL_G2,
761 AArch64MCExpr::VK_DTPREL_G2});
764 bool isMovZSymbolG1() const {
765 return isMovWSymbol({
766 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
767 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
768 AArch64MCExpr::VK_DTPREL_G1,
772 bool isMovZSymbolG0() const {
773 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
774 AArch64MCExpr::VK_TPREL_G0,
775 AArch64MCExpr::VK_DTPREL_G0});
778 bool isMovKSymbolG3() const {
779 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
782 bool isMovKSymbolG2() const {
783 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
786 bool isMovKSymbolG1() const {
787 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
788 AArch64MCExpr::VK_TPREL_G1_NC,
789 AArch64MCExpr::VK_DTPREL_G1_NC});
792 bool isMovKSymbolG0() const {
794 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
795 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
798 template<int RegWidth, int Shift>
799 bool isMOVZMovAlias() const {
800 if (!isImm()) return false;
802 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803 if (!CE) return false;
804 uint64_t Value = CE->getValue();
807 Value &= 0xffffffffULL;
809 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
810 if (Value == 0 && Shift != 0)
813 return (Value & ~(0xffffULL << Shift)) == 0;
816 template<int RegWidth, int Shift>
817 bool isMOVNMovAlias() const {
818 if (!isImm()) return false;
820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821 if (!CE) return false;
822 uint64_t Value = CE->getValue();
824 // MOVZ takes precedence over MOVN.
825 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
826 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
831 Value &= 0xffffffffULL;
833 return (Value & ~(0xffffULL << Shift)) == 0;
836 bool isFPImm() const { return Kind == k_FPImm; }
837 bool isBarrier() const { return Kind == k_Barrier; }
838 bool isSysReg() const { return Kind == k_SysReg; }
839 bool isMRSSystemRegister() const {
840 if (!isSysReg()) return false;
842 return SysReg.MRSReg != -1U;
844 bool isMSRSystemRegister() const {
845 if (!isSysReg()) return false;
847 return SysReg.MSRReg != -1U;
849 bool isSystemPStateField() const {
850 if (!isSysReg()) return false;
852 return SysReg.PStateField != -1U;
854 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
855 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
856 bool isVectorRegLo() const {
857 return Kind == k_Register && Reg.isVector &&
858 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
861 bool isGPR32as64() const {
862 return Kind == k_Register && !Reg.isVector &&
863 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
866 bool isGPR64sp0() const {
867 return Kind == k_Register && !Reg.isVector &&
868 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
871 /// Is this a vector list with the type implicit (presumably attached to the
872 /// instruction itself)?
873 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
874 return Kind == k_VectorList && VectorList.Count == NumRegs &&
875 !VectorList.ElementKind;
878 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
879 bool isTypedVectorList() const {
880 if (Kind != k_VectorList)
882 if (VectorList.Count != NumRegs)
884 if (VectorList.ElementKind != ElementKind)
886 return VectorList.NumElements == NumElements;
889 bool isVectorIndex1() const {
890 return Kind == k_VectorIndex && VectorIndex.Val == 1;
892 bool isVectorIndexB() const {
893 return Kind == k_VectorIndex && VectorIndex.Val < 16;
895 bool isVectorIndexH() const {
896 return Kind == k_VectorIndex && VectorIndex.Val < 8;
898 bool isVectorIndexS() const {
899 return Kind == k_VectorIndex && VectorIndex.Val < 4;
901 bool isVectorIndexD() const {
902 return Kind == k_VectorIndex && VectorIndex.Val < 2;
904 bool isToken() const override { return Kind == k_Token; }
905 bool isTokenEqual(StringRef Str) const {
906 return Kind == k_Token && getToken() == Str;
908 bool isSysCR() const { return Kind == k_SysCR; }
909 bool isPrefetch() const { return Kind == k_Prefetch; }
910 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
911 bool isShifter() const {
912 if (!isShiftExtend())
915 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
916 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
917 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
918 ST == AArch64_AM::MSL);
920 bool isExtend() const {
921 if (!isShiftExtend())
924 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
925 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
926 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
927 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
928 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
929 ET == AArch64_AM::LSL) &&
930 getShiftExtendAmount() <= 4;
933 bool isExtend64() const {
936 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
937 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
938 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
940 bool isExtendLSL64() const {
943 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
944 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
945 ET == AArch64_AM::LSL) &&
946 getShiftExtendAmount() <= 4;
949 template<int Width> bool isMemXExtend() const {
952 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
953 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
954 (getShiftExtendAmount() == Log2_32(Width / 8) ||
955 getShiftExtendAmount() == 0);
958 template<int Width> bool isMemWExtend() const {
961 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
962 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
963 (getShiftExtendAmount() == Log2_32(Width / 8) ||
964 getShiftExtendAmount() == 0);
967 template <unsigned width>
968 bool isArithmeticShifter() const {
972 // An arithmetic shifter is LSL, LSR, or ASR.
973 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
974 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
975 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
978 template <unsigned width>
979 bool isLogicalShifter() const {
983 // A logical shifter is LSL, LSR, ASR or ROR.
984 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
985 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
986 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
987 getShiftExtendAmount() < width;
990 bool isMovImm32Shifter() const {
994 // A MOVi shifter is LSL of 0, 16, 32, or 48.
995 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
996 if (ST != AArch64_AM::LSL)
998 uint64_t Val = getShiftExtendAmount();
999 return (Val == 0 || Val == 16);
1002 bool isMovImm64Shifter() const {
1006 // A MOVi shifter is LSL of 0 or 16.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1014 bool isLogicalVecShifter() const {
1018 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1019 unsigned Shift = getShiftExtendAmount();
1020 return getShiftExtendType() == AArch64_AM::LSL &&
1021 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1024 bool isLogicalVecHalfWordShifter() const {
1025 if (!isLogicalVecShifter())
1028 // A logical vector shifter is a left shift by 0 or 8.
1029 unsigned Shift = getShiftExtendAmount();
1030 return getShiftExtendType() == AArch64_AM::LSL &&
1031 (Shift == 0 || Shift == 8);
1034 bool isMoveVecShifter() const {
1035 if (!isShiftExtend())
1038 // A logical vector shifter is a left shift by 8 or 16.
1039 unsigned Shift = getShiftExtendAmount();
1040 return getShiftExtendType() == AArch64_AM::MSL &&
1041 (Shift == 8 || Shift == 16);
1044 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1045 // to LDUR/STUR when the offset is not legal for the former but is for
1046 // the latter. As such, in addition to checking for being a legal unscaled
1047 // address, also check that it is not a legal scaled address. This avoids
1048 // ambiguity in the matcher.
1050 bool isSImm9OffsetFB() const {
1051 return isSImm9() && !isUImm12Offset<Width / 8>();
1054 bool isAdrpLabel() const {
1055 // Validation was handled during parsing, so we just sanity check that
1056 // something didn't go haywire.
1060 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1061 int64_t Val = CE->getValue();
1062 int64_t Min = - (4096 * (1LL << (21 - 1)));
1063 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1064 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1070 bool isAdrLabel() const {
1071 // Validation was handled during parsing, so we just sanity check that
1072 // something didn't go haywire.
1076 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1077 int64_t Val = CE->getValue();
1078 int64_t Min = - (1LL << (21 - 1));
1079 int64_t Max = ((1LL << (21 - 1)) - 1);
1080 return Val >= Min && Val <= Max;
1086 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1087 // Add as immediates when possible. Null MCExpr = 0.
1089 Inst.addOperand(MCOperand::CreateImm(0));
1090 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1091 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1093 Inst.addOperand(MCOperand::CreateExpr(Expr));
1096 void addRegOperands(MCInst &Inst, unsigned N) const {
1097 assert(N == 1 && "Invalid number of operands!");
1098 Inst.addOperand(MCOperand::CreateReg(getReg()));
1101 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1102 assert(N == 1 && "Invalid number of operands!");
1104 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1106 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1107 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1108 RI->getEncodingValue(getReg()));
1110 Inst.addOperand(MCOperand::CreateReg(Reg));
1113 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1116 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1117 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1120 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1121 assert(N == 1 && "Invalid number of operands!");
1123 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1124 Inst.addOperand(MCOperand::CreateReg(getReg()));
1127 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1128 assert(N == 1 && "Invalid number of operands!");
1129 Inst.addOperand(MCOperand::CreateReg(getReg()));
1132 template <unsigned NumRegs>
1133 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1134 assert(N == 1 && "Invalid number of operands!");
1135 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1136 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1137 unsigned FirstReg = FirstRegs[NumRegs - 1];
1140 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1143 template <unsigned NumRegs>
1144 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1147 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1148 unsigned FirstReg = FirstRegs[NumRegs - 1];
1151 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1154 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1159 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1164 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1169 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1170 assert(N == 1 && "Invalid number of operands!");
1171 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1174 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1176 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1179 void addImmOperands(MCInst &Inst, unsigned N) const {
1180 assert(N == 1 && "Invalid number of operands!");
1181 // If this is a pageoff symrefexpr with an addend, adjust the addend
1182 // to be only the page-offset portion. Otherwise, just add the expr
1184 addExpr(Inst, getImm());
1187 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1188 assert(N == 2 && "Invalid number of operands!");
1189 if (isShiftedImm()) {
1190 addExpr(Inst, getShiftedImmVal());
1191 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1193 addExpr(Inst, getImm());
1194 Inst.addOperand(MCOperand::CreateImm(0));
1198 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1199 assert(N == 1 && "Invalid number of operands!");
1200 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1203 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1204 assert(N == 1 && "Invalid number of operands!");
1205 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1207 addExpr(Inst, getImm());
1209 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1212 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1213 addImmOperands(Inst, N);
1217 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1222 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1225 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1228 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1231 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1234 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1235 assert(N == 1 && "Invalid number of operands!");
1236 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1240 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1246 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1252 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1258 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1264 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1270 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 assert(MCE && "Invalid constant immediate operand!");
1274 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1277 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1280 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1283 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1301 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1307 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1313 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1319 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1325 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1337 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1341 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1342 Inst.addOperand(MCOperand::CreateImm(encoding));
1345 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1349 Inst.addOperand(MCOperand::CreateImm(encoding));
1352 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1356 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1357 Inst.addOperand(MCOperand::CreateImm(encoding));
1360 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1361 assert(N == 1 && "Invalid number of operands!");
1362 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1364 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1365 Inst.addOperand(MCOperand::CreateImm(encoding));
1368 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1371 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1372 Inst.addOperand(MCOperand::CreateImm(encoding));
1375 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1376 // Branch operands don't encode the low bits, so shift them off
1377 // here. If it's a label, however, just put it on directly as there's
1378 // not enough information now to do anything.
1379 assert(N == 1 && "Invalid number of operands!");
1380 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1382 addExpr(Inst, getImm());
1385 assert(MCE && "Invalid constant immediate operand!");
1386 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1389 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1390 // Branch operands don't encode the low bits, so shift them off
1391 // here. If it's a label, however, just put it on directly as there's
1392 // not enough information now to do anything.
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1396 addExpr(Inst, getImm());
1399 assert(MCE && "Invalid constant immediate operand!");
1400 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1403 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1404 // Branch operands don't encode the low bits, so shift them off
1405 // here. If it's a label, however, just put it on directly as there's
1406 // not enough information now to do anything.
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1410 addExpr(Inst, getImm());
1413 assert(MCE && "Invalid constant immediate operand!");
1414 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1417 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1418 assert(N == 1 && "Invalid number of operands!");
1419 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1422 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1423 assert(N == 1 && "Invalid number of operands!");
1424 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1427 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1428 assert(N == 1 && "Invalid number of operands!");
1430 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1433 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1439 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1442 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1445 void addSysCROperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1447 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1450 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1451 assert(N == 1 && "Invalid number of operands!");
1452 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1455 void addShifterOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1458 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1459 Inst.addOperand(MCOperand::CreateImm(Imm));
1462 void addExtendOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1465 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1466 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1467 Inst.addOperand(MCOperand::CreateImm(Imm));
1470 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1471 assert(N == 1 && "Invalid number of operands!");
1472 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1473 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1474 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1475 Inst.addOperand(MCOperand::CreateImm(Imm));
1478 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1479 assert(N == 2 && "Invalid number of operands!");
1480 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1481 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1482 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1483 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1486 // For 8-bit load/store instructions with a register offset, both the
1487 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1488 // they're disambiguated by whether the shift was explicit or implicit rather
1490 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1495 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1499 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1500 assert(N == 1 && "Invalid number of operands!");
1502 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1503 uint64_t Value = CE->getValue();
1504 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1508 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1511 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1512 uint64_t Value = CE->getValue();
1513 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1516 void print(raw_ostream &OS) const override;
1518 static std::unique_ptr<AArch64Operand>
1519 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1520 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1521 Op->Tok.Data = Str.data();
1522 Op->Tok.Length = Str.size();
1523 Op->Tok.IsSuffix = IsSuffix;
1529 static std::unique_ptr<AArch64Operand>
1530 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1531 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1532 Op->Reg.RegNum = RegNum;
1533 Op->Reg.isVector = isVector;
1539 static std::unique_ptr<AArch64Operand>
1540 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1541 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1542 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1543 Op->VectorList.RegNum = RegNum;
1544 Op->VectorList.Count = Count;
1545 Op->VectorList.NumElements = NumElements;
1546 Op->VectorList.ElementKind = ElementKind;
1552 static std::unique_ptr<AArch64Operand>
1553 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1555 Op->VectorIndex.Val = Idx;
1561 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1562 SMLoc E, MCContext &Ctx) {
1563 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1570 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1571 unsigned ShiftAmount,
1574 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1575 Op->ShiftedImm .Val = Val;
1576 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1582 static std::unique_ptr<AArch64Operand>
1583 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1584 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1585 Op->CondCode.Code = Code;
1591 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1593 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1594 Op->FPImm.Val = Val;
1600 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1602 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1603 Op->Barrier.Val = Val;
1609 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1612 uint32_t PStateField,
1614 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1615 Op->SysReg.Data = Str.data();
1616 Op->SysReg.Length = Str.size();
1617 Op->SysReg.MRSReg = MRSReg;
1618 Op->SysReg.MSRReg = MSRReg;
1619 Op->SysReg.PStateField = PStateField;
1625 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1626 SMLoc E, MCContext &Ctx) {
1627 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1628 Op->SysCRImm.Val = Val;
1634 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1636 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1637 Op->Prefetch.Val = Val;
1643 static std::unique_ptr<AArch64Operand>
1644 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1645 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1646 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1647 Op->ShiftExtend.Type = ShOp;
1648 Op->ShiftExtend.Amount = Val;
1649 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1656 } // end anonymous namespace.
1658 void AArch64Operand::print(raw_ostream &OS) const {
1661 OS << "<fpimm " << getFPImm() << "("
1662 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1666 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1668 OS << "<barrier " << Name << ">";
1670 OS << "<barrier invalid #" << getBarrier() << ">";
1674 getImm()->print(OS);
1676 case k_ShiftedImm: {
1677 unsigned Shift = getShiftedImmShift();
1678 OS << "<shiftedimm ";
1679 getShiftedImmVal()->print(OS);
1680 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1684 OS << "<condcode " << getCondCode() << ">";
1687 OS << "<register " << getReg() << ">";
1689 case k_VectorList: {
1690 OS << "<vectorlist ";
1691 unsigned Reg = getVectorListStart();
1692 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1693 OS << Reg + i << " ";
1698 OS << "<vectorindex " << getVectorIndex() << ">";
1701 OS << "<sysreg: " << getSysReg() << '>';
1704 OS << "'" << getToken() << "'";
1707 OS << "c" << getSysCR();
1711 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1713 OS << "<prfop " << Name << ">";
1715 OS << "<prfop invalid #" << getPrefetch() << ">";
1718 case k_ShiftExtend: {
1719 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1720 << getShiftExtendAmount();
1721 if (!hasShiftExtendAmount())
1729 /// @name Auto-generated Match Functions
1732 static unsigned MatchRegisterName(StringRef Name);
1736 static unsigned matchVectorRegName(StringRef Name) {
1737 return StringSwitch<unsigned>(Name)
1738 .Case("v0", AArch64::Q0)
1739 .Case("v1", AArch64::Q1)
1740 .Case("v2", AArch64::Q2)
1741 .Case("v3", AArch64::Q3)
1742 .Case("v4", AArch64::Q4)
1743 .Case("v5", AArch64::Q5)
1744 .Case("v6", AArch64::Q6)
1745 .Case("v7", AArch64::Q7)
1746 .Case("v8", AArch64::Q8)
1747 .Case("v9", AArch64::Q9)
1748 .Case("v10", AArch64::Q10)
1749 .Case("v11", AArch64::Q11)
1750 .Case("v12", AArch64::Q12)
1751 .Case("v13", AArch64::Q13)
1752 .Case("v14", AArch64::Q14)
1753 .Case("v15", AArch64::Q15)
1754 .Case("v16", AArch64::Q16)
1755 .Case("v17", AArch64::Q17)
1756 .Case("v18", AArch64::Q18)
1757 .Case("v19", AArch64::Q19)
1758 .Case("v20", AArch64::Q20)
1759 .Case("v21", AArch64::Q21)
1760 .Case("v22", AArch64::Q22)
1761 .Case("v23", AArch64::Q23)
1762 .Case("v24", AArch64::Q24)
1763 .Case("v25", AArch64::Q25)
1764 .Case("v26", AArch64::Q26)
1765 .Case("v27", AArch64::Q27)
1766 .Case("v28", AArch64::Q28)
1767 .Case("v29", AArch64::Q29)
1768 .Case("v30", AArch64::Q30)
1769 .Case("v31", AArch64::Q31)
1773 static bool isValidVectorKind(StringRef Name) {
1774 return StringSwitch<bool>(Name.lower())
1784 // Accept the width neutral ones, too, for verbose syntax. If those
1785 // aren't used in the right places, the token operand won't match so
1786 // all will work out.
1794 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1795 char &ElementKind) {
1796 assert(isValidVectorKind(Name));
1798 ElementKind = Name.lower()[Name.size() - 1];
1801 if (Name.size() == 2)
1804 // Parse the lane count
1805 Name = Name.drop_front();
1806 while (isdigit(Name.front())) {
1807 NumElements = 10 * NumElements + (Name.front() - '0');
1808 Name = Name.drop_front();
1812 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1814 StartLoc = getLoc();
1815 RegNo = tryParseRegister();
1816 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1817 return (RegNo == (unsigned)-1);
1820 // Matches a register name or register alias previously defined by '.req'
1821 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1823 unsigned RegNum = isVector ? matchVectorRegName(Name)
1824 : MatchRegisterName(Name);
1827 // Check for aliases registered via .req. Canonicalize to lower case.
1828 // That's more consistent since register names are case insensitive, and
1829 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1830 auto Entry = RegisterReqs.find(Name.lower());
1831 if (Entry == RegisterReqs.end())
1833 // set RegNum if the match is the right kind of register
1834 if (isVector == Entry->getValue().first)
1835 RegNum = Entry->getValue().second;
1840 /// tryParseRegister - Try to parse a register name. The token must be an
1841 /// Identifier when called, and if it is a register name the token is eaten and
1842 /// the register is added to the operand list.
1843 int AArch64AsmParser::tryParseRegister() {
1844 MCAsmParser &Parser = getParser();
1845 const AsmToken &Tok = Parser.getTok();
1846 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1848 std::string lowerCase = Tok.getString().lower();
1849 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1850 // Also handle a few aliases of registers.
1852 RegNum = StringSwitch<unsigned>(lowerCase)
1853 .Case("fp", AArch64::FP)
1854 .Case("lr", AArch64::LR)
1855 .Case("x31", AArch64::XZR)
1856 .Case("w31", AArch64::WZR)
1862 Parser.Lex(); // Eat identifier token.
1866 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1867 /// kind specifier. If it is a register specifier, eat the token and return it.
1868 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1869 MCAsmParser &Parser = getParser();
1870 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1871 TokError("vector register expected");
1875 StringRef Name = Parser.getTok().getString();
1876 // If there is a kind specifier, it's separated from the register name by
1878 size_t Start = 0, Next = Name.find('.');
1879 StringRef Head = Name.slice(Start, Next);
1880 unsigned RegNum = matchRegisterNameAlias(Head, true);
1883 if (Next != StringRef::npos) {
1884 Kind = Name.slice(Next, StringRef::npos);
1885 if (!isValidVectorKind(Kind)) {
1886 TokError("invalid vector kind qualifier");
1890 Parser.Lex(); // Eat the register token.
1895 TokError("vector register expected");
1899 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1900 AArch64AsmParser::OperandMatchResultTy
1901 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1902 MCAsmParser &Parser = getParser();
1905 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1906 Error(S, "Expected cN operand where 0 <= N <= 15");
1907 return MatchOperand_ParseFail;
1910 StringRef Tok = Parser.getTok().getIdentifier();
1911 if (Tok[0] != 'c' && Tok[0] != 'C') {
1912 Error(S, "Expected cN operand where 0 <= N <= 15");
1913 return MatchOperand_ParseFail;
1917 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1918 if (BadNum || CRNum > 15) {
1919 Error(S, "Expected cN operand where 0 <= N <= 15");
1920 return MatchOperand_ParseFail;
1923 Parser.Lex(); // Eat identifier token.
1925 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1926 return MatchOperand_Success;
1929 /// tryParsePrefetch - Try to parse a prefetch operand.
1930 AArch64AsmParser::OperandMatchResultTy
1931 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1932 MCAsmParser &Parser = getParser();
1934 const AsmToken &Tok = Parser.getTok();
1935 // Either an identifier for named values or a 5-bit immediate.
1936 bool Hash = Tok.is(AsmToken::Hash);
1937 if (Hash || Tok.is(AsmToken::Integer)) {
1939 Parser.Lex(); // Eat hash token.
1940 const MCExpr *ImmVal;
1941 if (getParser().parseExpression(ImmVal))
1942 return MatchOperand_ParseFail;
1944 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1946 TokError("immediate value expected for prefetch operand");
1947 return MatchOperand_ParseFail;
1949 unsigned prfop = MCE->getValue();
1951 TokError("prefetch operand out of range, [0,31] expected");
1952 return MatchOperand_ParseFail;
1955 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1956 return MatchOperand_Success;
1959 if (Tok.isNot(AsmToken::Identifier)) {
1960 TokError("pre-fetch hint expected");
1961 return MatchOperand_ParseFail;
1965 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1967 TokError("pre-fetch hint expected");
1968 return MatchOperand_ParseFail;
1971 Parser.Lex(); // Eat identifier token.
1972 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1973 return MatchOperand_Success;
1976 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1978 AArch64AsmParser::OperandMatchResultTy
1979 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1980 MCAsmParser &Parser = getParser();
1984 if (Parser.getTok().is(AsmToken::Hash)) {
1985 Parser.Lex(); // Eat hash token.
1988 if (parseSymbolicImmVal(Expr))
1989 return MatchOperand_ParseFail;
1991 AArch64MCExpr::VariantKind ELFRefKind;
1992 MCSymbolRefExpr::VariantKind DarwinRefKind;
1994 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1995 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1996 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1997 // No modifier was specified at all; this is the syntax for an ELF basic
1998 // ADRP relocation (unfortunately).
2000 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2001 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2002 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2004 Error(S, "gotpage label reference not allowed an addend");
2005 return MatchOperand_ParseFail;
2006 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2007 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2008 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2009 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2010 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2011 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2012 // The operand must be an @page or @gotpage qualified symbolref.
2013 Error(S, "page or gotpage label reference expected");
2014 return MatchOperand_ParseFail;
2018 // We have either a label reference possibly with addend or an immediate. The
2019 // addend is a raw value here. The linker will adjust it to only reference the
2021 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2022 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2024 return MatchOperand_Success;
2027 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2029 AArch64AsmParser::OperandMatchResultTy
2030 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2031 MCAsmParser &Parser = getParser();
2035 if (Parser.getTok().is(AsmToken::Hash)) {
2036 Parser.Lex(); // Eat hash token.
2039 if (getParser().parseExpression(Expr))
2040 return MatchOperand_ParseFail;
2042 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2043 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2045 return MatchOperand_Success;
2048 /// tryParseFPImm - A floating point immediate expression operand.
2049 AArch64AsmParser::OperandMatchResultTy
2050 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2051 MCAsmParser &Parser = getParser();
2055 if (Parser.getTok().is(AsmToken::Hash)) {
2056 Parser.Lex(); // Eat '#'
2060 // Handle negation, as that still comes through as a separate token.
2061 bool isNegative = false;
2062 if (Parser.getTok().is(AsmToken::Minus)) {
2066 const AsmToken &Tok = Parser.getTok();
2067 if (Tok.is(AsmToken::Real)) {
2068 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2069 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2070 // If we had a '-' in front, toggle the sign bit.
2071 IntVal ^= (uint64_t)isNegative << 63;
2072 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2073 Parser.Lex(); // Eat the token.
2074 // Check for out of range values. As an exception, we let Zero through,
2075 // as we handle that special case in post-processing before matching in
2076 // order to use the zero register for it.
2077 if (Val == -1 && !RealVal.isZero()) {
2078 TokError("expected compatible register or floating-point constant");
2079 return MatchOperand_ParseFail;
2081 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2082 return MatchOperand_Success;
2084 if (Tok.is(AsmToken::Integer)) {
2086 if (!isNegative && Tok.getString().startswith("0x")) {
2087 Val = Tok.getIntVal();
2088 if (Val > 255 || Val < 0) {
2089 TokError("encoded floating point value out of range");
2090 return MatchOperand_ParseFail;
2093 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2094 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2095 // If we had a '-' in front, toggle the sign bit.
2096 IntVal ^= (uint64_t)isNegative << 63;
2097 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2099 Parser.Lex(); // Eat the token.
2100 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2101 return MatchOperand_Success;
2105 return MatchOperand_NoMatch;
2107 TokError("invalid floating point immediate");
2108 return MatchOperand_ParseFail;
2111 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2112 AArch64AsmParser::OperandMatchResultTy
2113 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2114 MCAsmParser &Parser = getParser();
2117 if (Parser.getTok().is(AsmToken::Hash))
2118 Parser.Lex(); // Eat '#'
2119 else if (Parser.getTok().isNot(AsmToken::Integer))
2120 // Operand should start from # or should be integer, emit error otherwise.
2121 return MatchOperand_NoMatch;
2124 if (parseSymbolicImmVal(Imm))
2125 return MatchOperand_ParseFail;
2126 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2127 uint64_t ShiftAmount = 0;
2128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2130 int64_t Val = MCE->getValue();
2131 if (Val > 0xfff && (Val & 0xfff) == 0) {
2132 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2136 SMLoc E = Parser.getTok().getLoc();
2137 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2139 return MatchOperand_Success;
2145 // The optional operand must be "lsl #N" where N is non-negative.
2146 if (!Parser.getTok().is(AsmToken::Identifier) ||
2147 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2148 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2149 return MatchOperand_ParseFail;
2155 if (Parser.getTok().is(AsmToken::Hash)) {
2159 if (Parser.getTok().isNot(AsmToken::Integer)) {
2160 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2161 return MatchOperand_ParseFail;
2164 int64_t ShiftAmount = Parser.getTok().getIntVal();
2166 if (ShiftAmount < 0) {
2167 Error(Parser.getTok().getLoc(), "positive shift amount required");
2168 return MatchOperand_ParseFail;
2170 Parser.Lex(); // Eat the number
2172 SMLoc E = Parser.getTok().getLoc();
2173 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2174 S, E, getContext()));
2175 return MatchOperand_Success;
2178 /// parseCondCodeString - Parse a Condition Code string.
2179 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2180 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2181 .Case("eq", AArch64CC::EQ)
2182 .Case("ne", AArch64CC::NE)
2183 .Case("cs", AArch64CC::HS)
2184 .Case("hs", AArch64CC::HS)
2185 .Case("cc", AArch64CC::LO)
2186 .Case("lo", AArch64CC::LO)
2187 .Case("mi", AArch64CC::MI)
2188 .Case("pl", AArch64CC::PL)
2189 .Case("vs", AArch64CC::VS)
2190 .Case("vc", AArch64CC::VC)
2191 .Case("hi", AArch64CC::HI)
2192 .Case("ls", AArch64CC::LS)
2193 .Case("ge", AArch64CC::GE)
2194 .Case("lt", AArch64CC::LT)
2195 .Case("gt", AArch64CC::GT)
2196 .Case("le", AArch64CC::LE)
2197 .Case("al", AArch64CC::AL)
2198 .Case("nv", AArch64CC::NV)
2199 .Default(AArch64CC::Invalid);
2203 /// parseCondCode - Parse a Condition Code operand.
2204 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2205 bool invertCondCode) {
2206 MCAsmParser &Parser = getParser();
2208 const AsmToken &Tok = Parser.getTok();
2209 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2211 StringRef Cond = Tok.getString();
2212 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2213 if (CC == AArch64CC::Invalid)
2214 return TokError("invalid condition code");
2215 Parser.Lex(); // Eat identifier token.
2217 if (invertCondCode) {
2218 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2219 return TokError("condition codes AL and NV are invalid for this instruction");
2220 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2224 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2228 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2229 /// them if present.
2230 AArch64AsmParser::OperandMatchResultTy
2231 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2232 MCAsmParser &Parser = getParser();
2233 const AsmToken &Tok = Parser.getTok();
2234 std::string LowerID = Tok.getString().lower();
2235 AArch64_AM::ShiftExtendType ShOp =
2236 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2237 .Case("lsl", AArch64_AM::LSL)
2238 .Case("lsr", AArch64_AM::LSR)
2239 .Case("asr", AArch64_AM::ASR)
2240 .Case("ror", AArch64_AM::ROR)
2241 .Case("msl", AArch64_AM::MSL)
2242 .Case("uxtb", AArch64_AM::UXTB)
2243 .Case("uxth", AArch64_AM::UXTH)
2244 .Case("uxtw", AArch64_AM::UXTW)
2245 .Case("uxtx", AArch64_AM::UXTX)
2246 .Case("sxtb", AArch64_AM::SXTB)
2247 .Case("sxth", AArch64_AM::SXTH)
2248 .Case("sxtw", AArch64_AM::SXTW)
2249 .Case("sxtx", AArch64_AM::SXTX)
2250 .Default(AArch64_AM::InvalidShiftExtend);
2252 if (ShOp == AArch64_AM::InvalidShiftExtend)
2253 return MatchOperand_NoMatch;
2255 SMLoc S = Tok.getLoc();
2258 bool Hash = getLexer().is(AsmToken::Hash);
2259 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2260 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2261 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2262 ShOp == AArch64_AM::MSL) {
2263 // We expect a number here.
2264 TokError("expected #imm after shift specifier");
2265 return MatchOperand_ParseFail;
2268 // "extend" type operatoins don't need an immediate, #0 is implicit.
2269 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2271 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2272 return MatchOperand_Success;
2276 Parser.Lex(); // Eat the '#'.
2278 // Make sure we do actually have a number or a parenthesized expression.
2279 SMLoc E = Parser.getTok().getLoc();
2280 if (!Parser.getTok().is(AsmToken::Integer) &&
2281 !Parser.getTok().is(AsmToken::LParen)) {
2282 Error(E, "expected integer shift amount");
2283 return MatchOperand_ParseFail;
2286 const MCExpr *ImmVal;
2287 if (getParser().parseExpression(ImmVal))
2288 return MatchOperand_ParseFail;
2290 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2292 Error(E, "expected constant '#imm' after shift specifier");
2293 return MatchOperand_ParseFail;
2296 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2297 Operands.push_back(AArch64Operand::CreateShiftExtend(
2298 ShOp, MCE->getValue(), true, S, E, getContext()));
2299 return MatchOperand_Success;
2302 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2303 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2304 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2305 OperandVector &Operands) {
2306 if (Name.find('.') != StringRef::npos)
2307 return TokError("invalid operand");
2311 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2313 MCAsmParser &Parser = getParser();
2314 const AsmToken &Tok = Parser.getTok();
2315 StringRef Op = Tok.getString();
2316 SMLoc S = Tok.getLoc();
2318 const MCExpr *Expr = nullptr;
2320 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2322 Expr = MCConstantExpr::Create(op1, getContext()); \
2323 Operands.push_back( \
2324 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2325 Operands.push_back( \
2326 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2327 Operands.push_back( \
2328 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2329 Expr = MCConstantExpr::Create(op2, getContext()); \
2330 Operands.push_back( \
2331 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2334 if (Mnemonic == "ic") {
2335 if (!Op.compare_lower("ialluis")) {
2336 // SYS #0, C7, C1, #0
2337 SYS_ALIAS(0, 7, 1, 0);
2338 } else if (!Op.compare_lower("iallu")) {
2339 // SYS #0, C7, C5, #0
2340 SYS_ALIAS(0, 7, 5, 0);
2341 } else if (!Op.compare_lower("ivau")) {
2342 // SYS #3, C7, C5, #1
2343 SYS_ALIAS(3, 7, 5, 1);
2345 return TokError("invalid operand for IC instruction");
2347 } else if (Mnemonic == "dc") {
2348 if (!Op.compare_lower("zva")) {
2349 // SYS #3, C7, C4, #1
2350 SYS_ALIAS(3, 7, 4, 1);
2351 } else if (!Op.compare_lower("ivac")) {
2352 // SYS #3, C7, C6, #1
2353 SYS_ALIAS(0, 7, 6, 1);
2354 } else if (!Op.compare_lower("isw")) {
2355 // SYS #0, C7, C6, #2
2356 SYS_ALIAS(0, 7, 6, 2);
2357 } else if (!Op.compare_lower("cvac")) {
2358 // SYS #3, C7, C10, #1
2359 SYS_ALIAS(3, 7, 10, 1);
2360 } else if (!Op.compare_lower("csw")) {
2361 // SYS #0, C7, C10, #2
2362 SYS_ALIAS(0, 7, 10, 2);
2363 } else if (!Op.compare_lower("cvau")) {
2364 // SYS #3, C7, C11, #1
2365 SYS_ALIAS(3, 7, 11, 1);
2366 } else if (!Op.compare_lower("civac")) {
2367 // SYS #3, C7, C14, #1
2368 SYS_ALIAS(3, 7, 14, 1);
2369 } else if (!Op.compare_lower("cisw")) {
2370 // SYS #0, C7, C14, #2
2371 SYS_ALIAS(0, 7, 14, 2);
2373 return TokError("invalid operand for DC instruction");
2375 } else if (Mnemonic == "at") {
2376 if (!Op.compare_lower("s1e1r")) {
2377 // SYS #0, C7, C8, #0
2378 SYS_ALIAS(0, 7, 8, 0);
2379 } else if (!Op.compare_lower("s1e2r")) {
2380 // SYS #4, C7, C8, #0
2381 SYS_ALIAS(4, 7, 8, 0);
2382 } else if (!Op.compare_lower("s1e3r")) {
2383 // SYS #6, C7, C8, #0
2384 SYS_ALIAS(6, 7, 8, 0);
2385 } else if (!Op.compare_lower("s1e1w")) {
2386 // SYS #0, C7, C8, #1
2387 SYS_ALIAS(0, 7, 8, 1);
2388 } else if (!Op.compare_lower("s1e2w")) {
2389 // SYS #4, C7, C8, #1
2390 SYS_ALIAS(4, 7, 8, 1);
2391 } else if (!Op.compare_lower("s1e3w")) {
2392 // SYS #6, C7, C8, #1
2393 SYS_ALIAS(6, 7, 8, 1);
2394 } else if (!Op.compare_lower("s1e0r")) {
2395 // SYS #0, C7, C8, #3
2396 SYS_ALIAS(0, 7, 8, 2);
2397 } else if (!Op.compare_lower("s1e0w")) {
2398 // SYS #0, C7, C8, #3
2399 SYS_ALIAS(0, 7, 8, 3);
2400 } else if (!Op.compare_lower("s12e1r")) {
2401 // SYS #4, C7, C8, #4
2402 SYS_ALIAS(4, 7, 8, 4);
2403 } else if (!Op.compare_lower("s12e1w")) {
2404 // SYS #4, C7, C8, #5
2405 SYS_ALIAS(4, 7, 8, 5);
2406 } else if (!Op.compare_lower("s12e0r")) {
2407 // SYS #4, C7, C8, #6
2408 SYS_ALIAS(4, 7, 8, 6);
2409 } else if (!Op.compare_lower("s12e0w")) {
2410 // SYS #4, C7, C8, #7
2411 SYS_ALIAS(4, 7, 8, 7);
2413 return TokError("invalid operand for AT instruction");
2415 } else if (Mnemonic == "tlbi") {
2416 if (!Op.compare_lower("vmalle1is")) {
2417 // SYS #0, C8, C3, #0
2418 SYS_ALIAS(0, 8, 3, 0);
2419 } else if (!Op.compare_lower("alle2is")) {
2420 // SYS #4, C8, C3, #0
2421 SYS_ALIAS(4, 8, 3, 0);
2422 } else if (!Op.compare_lower("alle3is")) {
2423 // SYS #6, C8, C3, #0
2424 SYS_ALIAS(6, 8, 3, 0);
2425 } else if (!Op.compare_lower("vae1is")) {
2426 // SYS #0, C8, C3, #1
2427 SYS_ALIAS(0, 8, 3, 1);
2428 } else if (!Op.compare_lower("vae2is")) {
2429 // SYS #4, C8, C3, #1
2430 SYS_ALIAS(4, 8, 3, 1);
2431 } else if (!Op.compare_lower("vae3is")) {
2432 // SYS #6, C8, C3, #1
2433 SYS_ALIAS(6, 8, 3, 1);
2434 } else if (!Op.compare_lower("aside1is")) {
2435 // SYS #0, C8, C3, #2
2436 SYS_ALIAS(0, 8, 3, 2);
2437 } else if (!Op.compare_lower("vaae1is")) {
2438 // SYS #0, C8, C3, #3
2439 SYS_ALIAS(0, 8, 3, 3);
2440 } else if (!Op.compare_lower("alle1is")) {
2441 // SYS #4, C8, C3, #4
2442 SYS_ALIAS(4, 8, 3, 4);
2443 } else if (!Op.compare_lower("vale1is")) {
2444 // SYS #0, C8, C3, #5
2445 SYS_ALIAS(0, 8, 3, 5);
2446 } else if (!Op.compare_lower("vaale1is")) {
2447 // SYS #0, C8, C3, #7
2448 SYS_ALIAS(0, 8, 3, 7);
2449 } else if (!Op.compare_lower("vmalle1")) {
2450 // SYS #0, C8, C7, #0
2451 SYS_ALIAS(0, 8, 7, 0);
2452 } else if (!Op.compare_lower("alle2")) {
2453 // SYS #4, C8, C7, #0
2454 SYS_ALIAS(4, 8, 7, 0);
2455 } else if (!Op.compare_lower("vale2is")) {
2456 // SYS #4, C8, C3, #5
2457 SYS_ALIAS(4, 8, 3, 5);
2458 } else if (!Op.compare_lower("vale3is")) {
2459 // SYS #6, C8, C3, #5
2460 SYS_ALIAS(6, 8, 3, 5);
2461 } else if (!Op.compare_lower("alle3")) {
2462 // SYS #6, C8, C7, #0
2463 SYS_ALIAS(6, 8, 7, 0);
2464 } else if (!Op.compare_lower("vae1")) {
2465 // SYS #0, C8, C7, #1
2466 SYS_ALIAS(0, 8, 7, 1);
2467 } else if (!Op.compare_lower("vae2")) {
2468 // SYS #4, C8, C7, #1
2469 SYS_ALIAS(4, 8, 7, 1);
2470 } else if (!Op.compare_lower("vae3")) {
2471 // SYS #6, C8, C7, #1
2472 SYS_ALIAS(6, 8, 7, 1);
2473 } else if (!Op.compare_lower("aside1")) {
2474 // SYS #0, C8, C7, #2
2475 SYS_ALIAS(0, 8, 7, 2);
2476 } else if (!Op.compare_lower("vaae1")) {
2477 // SYS #0, C8, C7, #3
2478 SYS_ALIAS(0, 8, 7, 3);
2479 } else if (!Op.compare_lower("alle1")) {
2480 // SYS #4, C8, C7, #4
2481 SYS_ALIAS(4, 8, 7, 4);
2482 } else if (!Op.compare_lower("vale1")) {
2483 // SYS #0, C8, C7, #5
2484 SYS_ALIAS(0, 8, 7, 5);
2485 } else if (!Op.compare_lower("vale2")) {
2486 // SYS #4, C8, C7, #5
2487 SYS_ALIAS(4, 8, 7, 5);
2488 } else if (!Op.compare_lower("vale3")) {
2489 // SYS #6, C8, C7, #5
2490 SYS_ALIAS(6, 8, 7, 5);
2491 } else if (!Op.compare_lower("vaale1")) {
2492 // SYS #0, C8, C7, #7
2493 SYS_ALIAS(0, 8, 7, 7);
2494 } else if (!Op.compare_lower("ipas2e1")) {
2495 // SYS #4, C8, C4, #1
2496 SYS_ALIAS(4, 8, 4, 1);
2497 } else if (!Op.compare_lower("ipas2le1")) {
2498 // SYS #4, C8, C4, #5
2499 SYS_ALIAS(4, 8, 4, 5);
2500 } else if (!Op.compare_lower("ipas2e1is")) {
2501 // SYS #4, C8, C4, #1
2502 SYS_ALIAS(4, 8, 0, 1);
2503 } else if (!Op.compare_lower("ipas2le1is")) {
2504 // SYS #4, C8, C4, #5
2505 SYS_ALIAS(4, 8, 0, 5);
2506 } else if (!Op.compare_lower("vmalls12e1")) {
2507 // SYS #4, C8, C7, #6
2508 SYS_ALIAS(4, 8, 7, 6);
2509 } else if (!Op.compare_lower("vmalls12e1is")) {
2510 // SYS #4, C8, C3, #6
2511 SYS_ALIAS(4, 8, 3, 6);
2513 return TokError("invalid operand for TLBI instruction");
2519 Parser.Lex(); // Eat operand.
2521 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2522 bool HasRegister = false;
2524 // Check for the optional register operand.
2525 if (getLexer().is(AsmToken::Comma)) {
2526 Parser.Lex(); // Eat comma.
2528 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2529 return TokError("expected register operand");
2534 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2535 Parser.eatToEndOfStatement();
2536 return TokError("unexpected token in argument list");
2539 if (ExpectRegister && !HasRegister) {
2540 return TokError("specified " + Mnemonic + " op requires a register");
2542 else if (!ExpectRegister && HasRegister) {
2543 return TokError("specified " + Mnemonic + " op does not use a register");
2546 Parser.Lex(); // Consume the EndOfStatement
2550 AArch64AsmParser::OperandMatchResultTy
2551 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2552 MCAsmParser &Parser = getParser();
2553 const AsmToken &Tok = Parser.getTok();
2555 // Can be either a #imm style literal or an option name
2556 bool Hash = Tok.is(AsmToken::Hash);
2557 if (Hash || Tok.is(AsmToken::Integer)) {
2558 // Immediate operand.
2560 Parser.Lex(); // Eat the '#'
2561 const MCExpr *ImmVal;
2562 SMLoc ExprLoc = getLoc();
2563 if (getParser().parseExpression(ImmVal))
2564 return MatchOperand_ParseFail;
2565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2567 Error(ExprLoc, "immediate value expected for barrier operand");
2568 return MatchOperand_ParseFail;
2570 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2571 Error(ExprLoc, "barrier operand out of range");
2572 return MatchOperand_ParseFail;
2575 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2576 return MatchOperand_Success;
2579 if (Tok.isNot(AsmToken::Identifier)) {
2580 TokError("invalid operand for instruction");
2581 return MatchOperand_ParseFail;
2585 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2587 TokError("invalid barrier option name");
2588 return MatchOperand_ParseFail;
2591 // The only valid named option for ISB is 'sy'
2592 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2593 TokError("'sy' or #imm operand expected");
2594 return MatchOperand_ParseFail;
2598 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2599 Parser.Lex(); // Consume the option
2601 return MatchOperand_Success;
2604 AArch64AsmParser::OperandMatchResultTy
2605 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2606 MCAsmParser &Parser = getParser();
2607 const AsmToken &Tok = Parser.getTok();
2609 if (Tok.isNot(AsmToken::Identifier))
2610 return MatchOperand_NoMatch;
2613 auto MRSMapper = AArch64SysReg::MRSMapper(STI.getFeatureBits());
2614 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), IsKnown);
2615 assert(IsKnown == (MRSReg != -1U) &&
2616 "register should be -1 if and only if it's unknown");
2618 auto MSRMapper = AArch64SysReg::MSRMapper(STI.getFeatureBits());
2619 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), IsKnown);
2620 assert(IsKnown == (MSRReg != -1U) &&
2621 "register should be -1 if and only if it's unknown");
2623 uint32_t PStateField =
2624 AArch64PState::PStateMapper().fromString(Tok.getString(), IsKnown);
2625 assert(IsKnown == (PStateField != -1U) &&
2626 "register should be -1 if and only if it's unknown");
2628 Operands.push_back(AArch64Operand::CreateSysReg(
2629 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2630 Parser.Lex(); // Eat identifier
2632 return MatchOperand_Success;
2635 /// tryParseVectorRegister - Parse a vector register operand.
2636 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2637 MCAsmParser &Parser = getParser();
2638 if (Parser.getTok().isNot(AsmToken::Identifier))
2642 // Check for a vector register specifier first.
2644 int64_t Reg = tryMatchVectorRegister(Kind, false);
2648 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2649 // If there was an explicit qualifier, that goes on as a literal text
2653 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2655 // If there is an index specifier following the register, parse that too.
2656 if (Parser.getTok().is(AsmToken::LBrac)) {
2657 SMLoc SIdx = getLoc();
2658 Parser.Lex(); // Eat left bracket token.
2660 const MCExpr *ImmVal;
2661 if (getParser().parseExpression(ImmVal))
2663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2665 TokError("immediate value expected for vector index");
2670 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2671 Error(E, "']' expected");
2675 Parser.Lex(); // Eat right bracket token.
2677 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2684 /// parseRegister - Parse a non-vector register operand.
2685 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2686 MCAsmParser &Parser = getParser();
2688 // Try for a vector register.
2689 if (!tryParseVectorRegister(Operands))
2692 // Try for a scalar register.
2693 int64_t Reg = tryParseRegister();
2697 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2699 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2700 // as a string token in the instruction itself.
2701 if (getLexer().getKind() == AsmToken::LBrac) {
2702 SMLoc LBracS = getLoc();
2704 const AsmToken &Tok = Parser.getTok();
2705 if (Tok.is(AsmToken::Integer)) {
2706 SMLoc IntS = getLoc();
2707 int64_t Val = Tok.getIntVal();
2710 if (getLexer().getKind() == AsmToken::RBrac) {
2711 SMLoc RBracS = getLoc();
2714 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2716 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2718 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2728 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2729 MCAsmParser &Parser = getParser();
2730 bool HasELFModifier = false;
2731 AArch64MCExpr::VariantKind RefKind;
2733 if (Parser.getTok().is(AsmToken::Colon)) {
2734 Parser.Lex(); // Eat ':"
2735 HasELFModifier = true;
2737 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2738 Error(Parser.getTok().getLoc(),
2739 "expect relocation specifier in operand after ':'");
2743 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2744 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2745 .Case("lo12", AArch64MCExpr::VK_LO12)
2746 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2747 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2748 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2749 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2750 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2751 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2752 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2753 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2754 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2755 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2756 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2757 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2758 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2759 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2760 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2761 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2762 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2763 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2764 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2765 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2766 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2767 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2768 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2769 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2770 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2771 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2772 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2773 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2774 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2775 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2776 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2777 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2778 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2779 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2780 .Default(AArch64MCExpr::VK_INVALID);
2782 if (RefKind == AArch64MCExpr::VK_INVALID) {
2783 Error(Parser.getTok().getLoc(),
2784 "expect relocation specifier in operand after ':'");
2788 Parser.Lex(); // Eat identifier
2790 if (Parser.getTok().isNot(AsmToken::Colon)) {
2791 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2794 Parser.Lex(); // Eat ':'
2797 if (getParser().parseExpression(ImmVal))
2801 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2806 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2807 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2808 MCAsmParser &Parser = getParser();
2809 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2811 Parser.Lex(); // Eat left bracket token.
2813 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2816 int64_t PrevReg = FirstReg;
2819 if (Parser.getTok().is(AsmToken::Minus)) {
2820 Parser.Lex(); // Eat the minus.
2822 SMLoc Loc = getLoc();
2824 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2827 // Any Kind suffices must match on all regs in the list.
2828 if (Kind != NextKind)
2829 return Error(Loc, "mismatched register size suffix");
2831 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2833 if (Space == 0 || Space > 3) {
2834 return Error(Loc, "invalid number of vectors");
2840 while (Parser.getTok().is(AsmToken::Comma)) {
2841 Parser.Lex(); // Eat the comma token.
2843 SMLoc Loc = getLoc();
2845 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2848 // Any Kind suffices must match on all regs in the list.
2849 if (Kind != NextKind)
2850 return Error(Loc, "mismatched register size suffix");
2852 // Registers must be incremental (with wraparound at 31)
2853 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2854 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2855 return Error(Loc, "registers must be sequential");
2862 if (Parser.getTok().isNot(AsmToken::RCurly))
2863 return Error(getLoc(), "'}' expected");
2864 Parser.Lex(); // Eat the '}' token.
2867 return Error(S, "invalid number of vectors");
2869 unsigned NumElements = 0;
2870 char ElementKind = 0;
2872 parseValidVectorKind(Kind, NumElements, ElementKind);
2874 Operands.push_back(AArch64Operand::CreateVectorList(
2875 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2877 // If there is an index specifier following the list, parse that too.
2878 if (Parser.getTok().is(AsmToken::LBrac)) {
2879 SMLoc SIdx = getLoc();
2880 Parser.Lex(); // Eat left bracket token.
2882 const MCExpr *ImmVal;
2883 if (getParser().parseExpression(ImmVal))
2885 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2887 TokError("immediate value expected for vector index");
2892 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2893 Error(E, "']' expected");
2897 Parser.Lex(); // Eat right bracket token.
2899 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2905 AArch64AsmParser::OperandMatchResultTy
2906 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2907 MCAsmParser &Parser = getParser();
2908 const AsmToken &Tok = Parser.getTok();
2909 if (!Tok.is(AsmToken::Identifier))
2910 return MatchOperand_NoMatch;
2912 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2914 MCContext &Ctx = getContext();
2915 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2916 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2917 return MatchOperand_NoMatch;
2920 Parser.Lex(); // Eat register
2922 if (Parser.getTok().isNot(AsmToken::Comma)) {
2924 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2925 return MatchOperand_Success;
2927 Parser.Lex(); // Eat comma.
2929 if (Parser.getTok().is(AsmToken::Hash))
2930 Parser.Lex(); // Eat hash
2932 if (Parser.getTok().isNot(AsmToken::Integer)) {
2933 Error(getLoc(), "index must be absent or #0");
2934 return MatchOperand_ParseFail;
2937 const MCExpr *ImmVal;
2938 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2939 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2940 Error(getLoc(), "index must be absent or #0");
2941 return MatchOperand_ParseFail;
2945 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2946 return MatchOperand_Success;
2949 /// parseOperand - Parse a arm instruction operand. For now this parses the
2950 /// operand regardless of the mnemonic.
2951 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2952 bool invertCondCode) {
2953 MCAsmParser &Parser = getParser();
2954 // Check if the current operand has a custom associated parser, if so, try to
2955 // custom parse the operand, or fallback to the general approach.
2956 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2957 if (ResTy == MatchOperand_Success)
2959 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2960 // there was a match, but an error occurred, in which case, just return that
2961 // the operand parsing failed.
2962 if (ResTy == MatchOperand_ParseFail)
2965 // Nothing custom, so do general case parsing.
2967 switch (getLexer().getKind()) {
2971 if (parseSymbolicImmVal(Expr))
2972 return Error(S, "invalid operand");
2974 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2975 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2978 case AsmToken::LBrac: {
2979 SMLoc Loc = Parser.getTok().getLoc();
2980 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2982 Parser.Lex(); // Eat '['
2984 // There's no comma after a '[', so we can parse the next operand
2986 return parseOperand(Operands, false, false);
2988 case AsmToken::LCurly:
2989 return parseVectorList(Operands);
2990 case AsmToken::Identifier: {
2991 // If we're expecting a Condition Code operand, then just parse that.
2993 return parseCondCode(Operands, invertCondCode);
2995 // If it's a register name, parse it.
2996 if (!parseRegister(Operands))
2999 // This could be an optional "shift" or "extend" operand.
3000 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3001 // We can only continue if no tokens were eaten.
3002 if (GotShift != MatchOperand_NoMatch)
3005 // This was not a register so parse other operands that start with an
3006 // identifier (like labels) as expressions and create them as immediates.
3007 const MCExpr *IdVal;
3009 if (getParser().parseExpression(IdVal))
3012 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3013 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3016 case AsmToken::Integer:
3017 case AsmToken::Real:
3018 case AsmToken::Hash: {
3019 // #42 -> immediate.
3021 if (getLexer().is(AsmToken::Hash))
3024 // Parse a negative sign
3025 bool isNegative = false;
3026 if (Parser.getTok().is(AsmToken::Minus)) {
3028 // We need to consume this token only when we have a Real, otherwise
3029 // we let parseSymbolicImmVal take care of it
3030 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3034 // The only Real that should come through here is a literal #0.0 for
3035 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3036 // so convert the value.
3037 const AsmToken &Tok = Parser.getTok();
3038 if (Tok.is(AsmToken::Real)) {
3039 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3040 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3041 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3042 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3043 Mnemonic != "fcmlt")
3044 return TokError("unexpected floating point literal");
3045 else if (IntVal != 0 || isNegative)
3046 return TokError("expected floating-point constant #0.0");
3047 Parser.Lex(); // Eat the token.
3050 AArch64Operand::CreateToken("#0", false, S, getContext()));
3052 AArch64Operand::CreateToken(".0", false, S, getContext()));
3056 const MCExpr *ImmVal;
3057 if (parseSymbolicImmVal(ImmVal))
3060 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3061 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3064 case AsmToken::Equal: {
3065 SMLoc Loc = Parser.getTok().getLoc();
3066 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3067 return Error(Loc, "unexpected token in operand");
3068 Parser.Lex(); // Eat '='
3069 const MCExpr *SubExprVal;
3070 if (getParser().parseExpression(SubExprVal))
3073 if (Operands.size() < 2 ||
3074 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3078 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3079 Operands[1]->getReg());
3081 MCContext& Ctx = getContext();
3082 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3083 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3084 if (isa<MCConstantExpr>(SubExprVal)) {
3085 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3086 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3087 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3091 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3092 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3093 Operands.push_back(AArch64Operand::CreateImm(
3094 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3096 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3097 ShiftAmt, true, S, E, Ctx));
3100 APInt Simm = APInt(64, Imm << ShiftAmt);
3101 // check if the immediate is an unsigned or signed 32-bit int for W regs
3102 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3103 return Error(Loc, "Immediate too large for register");
3105 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3106 const MCExpr *CPLoc =
3107 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3108 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3114 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3116 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3117 StringRef Name, SMLoc NameLoc,
3118 OperandVector &Operands) {
3119 MCAsmParser &Parser = getParser();
3120 Name = StringSwitch<StringRef>(Name.lower())
3121 .Case("beq", "b.eq")
3122 .Case("bne", "b.ne")
3123 .Case("bhs", "b.hs")
3124 .Case("bcs", "b.cs")
3125 .Case("blo", "b.lo")
3126 .Case("bcc", "b.cc")
3127 .Case("bmi", "b.mi")
3128 .Case("bpl", "b.pl")
3129 .Case("bvs", "b.vs")
3130 .Case("bvc", "b.vc")
3131 .Case("bhi", "b.hi")
3132 .Case("bls", "b.ls")
3133 .Case("bge", "b.ge")
3134 .Case("blt", "b.lt")
3135 .Case("bgt", "b.gt")
3136 .Case("ble", "b.le")
3137 .Case("bal", "b.al")
3138 .Case("bnv", "b.nv")
3141 // First check for the AArch64-specific .req directive.
3142 if (Parser.getTok().is(AsmToken::Identifier) &&
3143 Parser.getTok().getIdentifier() == ".req") {
3144 parseDirectiveReq(Name, NameLoc);
3145 // We always return 'error' for this, as we're done with this
3146 // statement and don't need to match the 'instruction."
3150 // Create the leading tokens for the mnemonic, split by '.' characters.
3151 size_t Start = 0, Next = Name.find('.');
3152 StringRef Head = Name.slice(Start, Next);
3154 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3155 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3156 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3157 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3158 Parser.eatToEndOfStatement();
3163 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3166 // Handle condition codes for a branch mnemonic
3167 if (Head == "b" && Next != StringRef::npos) {
3169 Next = Name.find('.', Start + 1);
3170 Head = Name.slice(Start + 1, Next);
3172 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3173 (Head.data() - Name.data()));
3174 AArch64CC::CondCode CC = parseCondCodeString(Head);
3175 if (CC == AArch64CC::Invalid)
3176 return Error(SuffixLoc, "invalid condition code");
3178 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3180 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3183 // Add the remaining tokens in the mnemonic.
3184 while (Next != StringRef::npos) {
3186 Next = Name.find('.', Start + 1);
3187 Head = Name.slice(Start, Next);
3188 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3189 (Head.data() - Name.data()) + 1);
3191 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3194 // Conditional compare instructions have a Condition Code operand, which needs
3195 // to be parsed and an immediate operand created.
3196 bool condCodeFourthOperand =
3197 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3198 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3199 Head == "csinc" || Head == "csinv" || Head == "csneg");
3201 // These instructions are aliases to some of the conditional select
3202 // instructions. However, the condition code is inverted in the aliased
3205 // FIXME: Is this the correct way to handle these? Or should the parser
3206 // generate the aliased instructions directly?
3207 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3208 bool condCodeThirdOperand =
3209 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3211 // Read the remaining operands.
3212 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3213 // Read the first operand.
3214 if (parseOperand(Operands, false, false)) {
3215 Parser.eatToEndOfStatement();
3220 while (getLexer().is(AsmToken::Comma)) {
3221 Parser.Lex(); // Eat the comma.
3223 // Parse and remember the operand.
3224 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3225 (N == 3 && condCodeThirdOperand) ||
3226 (N == 2 && condCodeSecondOperand),
3227 condCodeSecondOperand || condCodeThirdOperand)) {
3228 Parser.eatToEndOfStatement();
3232 // After successfully parsing some operands there are two special cases to
3233 // consider (i.e. notional operands not separated by commas). Both are due
3234 // to memory specifiers:
3235 // + An RBrac will end an address for load/store/prefetch
3236 // + An '!' will indicate a pre-indexed operation.
3238 // It's someone else's responsibility to make sure these tokens are sane
3239 // in the given context!
3240 if (Parser.getTok().is(AsmToken::RBrac)) {
3241 SMLoc Loc = Parser.getTok().getLoc();
3242 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3247 if (Parser.getTok().is(AsmToken::Exclaim)) {
3248 SMLoc Loc = Parser.getTok().getLoc();
3249 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3258 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3259 SMLoc Loc = Parser.getTok().getLoc();
3260 Parser.eatToEndOfStatement();
3261 return Error(Loc, "unexpected token in argument list");
3264 Parser.Lex(); // Consume the EndOfStatement
3268 // FIXME: This entire function is a giant hack to provide us with decent
3269 // operand range validation/diagnostics until TableGen/MC can be extended
3270 // to support autogeneration of this kind of validation.
3271 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3272 SmallVectorImpl<SMLoc> &Loc) {
3273 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3274 // Check for indexed addressing modes w/ the base register being the
3275 // same as a destination/source register or pair load where
3276 // the Rt == Rt2. All of those are undefined behaviour.
3277 switch (Inst.getOpcode()) {
3278 case AArch64::LDPSWpre:
3279 case AArch64::LDPWpost:
3280 case AArch64::LDPWpre:
3281 case AArch64::LDPXpost:
3282 case AArch64::LDPXpre: {
3283 unsigned Rt = Inst.getOperand(1).getReg();
3284 unsigned Rt2 = Inst.getOperand(2).getReg();
3285 unsigned Rn = Inst.getOperand(3).getReg();
3286 if (RI->isSubRegisterEq(Rn, Rt))
3287 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3288 "is also a destination");
3289 if (RI->isSubRegisterEq(Rn, Rt2))
3290 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3291 "is also a destination");
3294 case AArch64::LDPDi:
3295 case AArch64::LDPQi:
3296 case AArch64::LDPSi:
3297 case AArch64::LDPSWi:
3298 case AArch64::LDPWi:
3299 case AArch64::LDPXi: {
3300 unsigned Rt = Inst.getOperand(0).getReg();
3301 unsigned Rt2 = Inst.getOperand(1).getReg();
3303 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3306 case AArch64::LDPDpost:
3307 case AArch64::LDPDpre:
3308 case AArch64::LDPQpost:
3309 case AArch64::LDPQpre:
3310 case AArch64::LDPSpost:
3311 case AArch64::LDPSpre:
3312 case AArch64::LDPSWpost: {
3313 unsigned Rt = Inst.getOperand(1).getReg();
3314 unsigned Rt2 = Inst.getOperand(2).getReg();
3316 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3319 case AArch64::STPDpost:
3320 case AArch64::STPDpre:
3321 case AArch64::STPQpost:
3322 case AArch64::STPQpre:
3323 case AArch64::STPSpost:
3324 case AArch64::STPSpre:
3325 case AArch64::STPWpost:
3326 case AArch64::STPWpre:
3327 case AArch64::STPXpost:
3328 case AArch64::STPXpre: {
3329 unsigned Rt = Inst.getOperand(1).getReg();
3330 unsigned Rt2 = Inst.getOperand(2).getReg();
3331 unsigned Rn = Inst.getOperand(3).getReg();
3332 if (RI->isSubRegisterEq(Rn, Rt))
3333 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3334 "is also a source");
3335 if (RI->isSubRegisterEq(Rn, Rt2))
3336 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3337 "is also a source");
3340 case AArch64::LDRBBpre:
3341 case AArch64::LDRBpre:
3342 case AArch64::LDRHHpre:
3343 case AArch64::LDRHpre:
3344 case AArch64::LDRSBWpre:
3345 case AArch64::LDRSBXpre:
3346 case AArch64::LDRSHWpre:
3347 case AArch64::LDRSHXpre:
3348 case AArch64::LDRSWpre:
3349 case AArch64::LDRWpre:
3350 case AArch64::LDRXpre:
3351 case AArch64::LDRBBpost:
3352 case AArch64::LDRBpost:
3353 case AArch64::LDRHHpost:
3354 case AArch64::LDRHpost:
3355 case AArch64::LDRSBWpost:
3356 case AArch64::LDRSBXpost:
3357 case AArch64::LDRSHWpost:
3358 case AArch64::LDRSHXpost:
3359 case AArch64::LDRSWpost:
3360 case AArch64::LDRWpost:
3361 case AArch64::LDRXpost: {
3362 unsigned Rt = Inst.getOperand(1).getReg();
3363 unsigned Rn = Inst.getOperand(2).getReg();
3364 if (RI->isSubRegisterEq(Rn, Rt))
3365 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3366 "is also a source");
3369 case AArch64::STRBBpost:
3370 case AArch64::STRBpost:
3371 case AArch64::STRHHpost:
3372 case AArch64::STRHpost:
3373 case AArch64::STRWpost:
3374 case AArch64::STRXpost:
3375 case AArch64::STRBBpre:
3376 case AArch64::STRBpre:
3377 case AArch64::STRHHpre:
3378 case AArch64::STRHpre:
3379 case AArch64::STRWpre:
3380 case AArch64::STRXpre: {
3381 unsigned Rt = Inst.getOperand(1).getReg();
3382 unsigned Rn = Inst.getOperand(2).getReg();
3383 if (RI->isSubRegisterEq(Rn, Rt))
3384 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3385 "is also a source");
3390 // Now check immediate ranges. Separate from the above as there is overlap
3391 // in the instructions being checked and this keeps the nested conditionals
3393 switch (Inst.getOpcode()) {
3394 case AArch64::ADDSWri:
3395 case AArch64::ADDSXri:
3396 case AArch64::ADDWri:
3397 case AArch64::ADDXri:
3398 case AArch64::SUBSWri:
3399 case AArch64::SUBSXri:
3400 case AArch64::SUBWri:
3401 case AArch64::SUBXri: {
3402 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3403 // some slight duplication here.
3404 if (Inst.getOperand(2).isExpr()) {
3405 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3406 AArch64MCExpr::VariantKind ELFRefKind;
3407 MCSymbolRefExpr::VariantKind DarwinRefKind;
3409 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3410 return Error(Loc[2], "invalid immediate expression");
3413 // Only allow these with ADDXri.
3414 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3415 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3416 Inst.getOpcode() == AArch64::ADDXri)
3419 // Only allow these with ADDXri/ADDWri
3420 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3421 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3422 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3423 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3424 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3425 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3426 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3427 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3428 (Inst.getOpcode() == AArch64::ADDXri ||
3429 Inst.getOpcode() == AArch64::ADDWri))
3432 // Don't allow expressions in the immediate field otherwise
3433 return Error(Loc[2], "invalid immediate expression");
3442 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3444 case Match_MissingFeature:
3446 "instruction requires a CPU feature not currently enabled");
3447 case Match_InvalidOperand:
3448 return Error(Loc, "invalid operand for instruction");
3449 case Match_InvalidSuffix:
3450 return Error(Loc, "invalid type suffix for instruction");
3451 case Match_InvalidCondCode:
3452 return Error(Loc, "expected AArch64 condition code");
3453 case Match_AddSubRegExtendSmall:
3455 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3456 case Match_AddSubRegExtendLarge:
3458 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3459 case Match_AddSubSecondSource:
3461 "expected compatible register, symbol or integer in range [0, 4095]");
3462 case Match_LogicalSecondSource:
3463 return Error(Loc, "expected compatible register or logical immediate");
3464 case Match_InvalidMovImm32Shift:
3465 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3466 case Match_InvalidMovImm64Shift:
3467 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3468 case Match_AddSubRegShift32:
3470 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3471 case Match_AddSubRegShift64:
3473 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3474 case Match_InvalidFPImm:
3476 "expected compatible register or floating-point constant");
3477 case Match_InvalidMemoryIndexedSImm9:
3478 return Error(Loc, "index must be an integer in range [-256, 255].");
3479 case Match_InvalidMemoryIndexed4SImm7:
3480 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3481 case Match_InvalidMemoryIndexed8SImm7:
3482 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3483 case Match_InvalidMemoryIndexed16SImm7:
3484 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3485 case Match_InvalidMemoryWExtend8:
3487 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3488 case Match_InvalidMemoryWExtend16:
3490 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3491 case Match_InvalidMemoryWExtend32:
3493 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3494 case Match_InvalidMemoryWExtend64:
3496 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3497 case Match_InvalidMemoryWExtend128:
3499 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3500 case Match_InvalidMemoryXExtend8:
3502 "expected 'lsl' or 'sxtx' with optional shift of #0");
3503 case Match_InvalidMemoryXExtend16:
3505 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3506 case Match_InvalidMemoryXExtend32:
3508 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3509 case Match_InvalidMemoryXExtend64:
3511 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3512 case Match_InvalidMemoryXExtend128:
3514 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3515 case Match_InvalidMemoryIndexed1:
3516 return Error(Loc, "index must be an integer in range [0, 4095].");
3517 case Match_InvalidMemoryIndexed2:
3518 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3519 case Match_InvalidMemoryIndexed4:
3520 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3521 case Match_InvalidMemoryIndexed8:
3522 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3523 case Match_InvalidMemoryIndexed16:
3524 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3525 case Match_InvalidImm0_7:
3526 return Error(Loc, "immediate must be an integer in range [0, 7].");
3527 case Match_InvalidImm0_15:
3528 return Error(Loc, "immediate must be an integer in range [0, 15].");
3529 case Match_InvalidImm0_31:
3530 return Error(Loc, "immediate must be an integer in range [0, 31].");
3531 case Match_InvalidImm0_63:
3532 return Error(Loc, "immediate must be an integer in range [0, 63].");
3533 case Match_InvalidImm0_127:
3534 return Error(Loc, "immediate must be an integer in range [0, 127].");
3535 case Match_InvalidImm0_65535:
3536 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3537 case Match_InvalidImm1_8:
3538 return Error(Loc, "immediate must be an integer in range [1, 8].");
3539 case Match_InvalidImm1_16:
3540 return Error(Loc, "immediate must be an integer in range [1, 16].");
3541 case Match_InvalidImm1_32:
3542 return Error(Loc, "immediate must be an integer in range [1, 32].");
3543 case Match_InvalidImm1_64:
3544 return Error(Loc, "immediate must be an integer in range [1, 64].");
3545 case Match_InvalidIndex1:
3546 return Error(Loc, "expected lane specifier '[1]'");
3547 case Match_InvalidIndexB:
3548 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3549 case Match_InvalidIndexH:
3550 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3551 case Match_InvalidIndexS:
3552 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3553 case Match_InvalidIndexD:
3554 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3555 case Match_InvalidLabel:
3556 return Error(Loc, "expected label or encodable integer pc offset");
3558 return Error(Loc, "expected readable system register");
3560 return Error(Loc, "expected writable system register or pstate");
3561 case Match_MnemonicFail:
3562 return Error(Loc, "unrecognized instruction mnemonic");
3564 llvm_unreachable("unexpected error code!");
3568 static const char *getSubtargetFeatureName(uint64_t Val);
3570 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3571 OperandVector &Operands,
3573 uint64_t &ErrorInfo,
3574 bool MatchingInlineAsm) {
3575 assert(!Operands.empty() && "Unexpect empty operand list!");
3576 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3577 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3579 StringRef Tok = Op.getToken();
3580 unsigned NumOperands = Operands.size();
3582 if (NumOperands == 4 && Tok == "lsl") {
3583 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3584 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3585 if (Op2.isReg() && Op3.isImm()) {
3586 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3588 uint64_t Op3Val = Op3CE->getValue();
3589 uint64_t NewOp3Val = 0;
3590 uint64_t NewOp4Val = 0;
3591 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3593 NewOp3Val = (32 - Op3Val) & 0x1f;
3594 NewOp4Val = 31 - Op3Val;
3596 NewOp3Val = (64 - Op3Val) & 0x3f;
3597 NewOp4Val = 63 - Op3Val;
3600 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3601 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3603 Operands[0] = AArch64Operand::CreateToken(
3604 "ubfm", false, Op.getStartLoc(), getContext());
3605 Operands.push_back(AArch64Operand::CreateImm(
3606 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3607 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3608 Op3.getEndLoc(), getContext());
3611 } else if (NumOperands == 5) {
3612 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3613 // UBFIZ -> UBFM aliases.
3614 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3615 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3616 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3617 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3619 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3620 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3621 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3623 if (Op3CE && Op4CE) {
3624 uint64_t Op3Val = Op3CE->getValue();
3625 uint64_t Op4Val = Op4CE->getValue();
3627 uint64_t RegWidth = 0;
3628 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3634 if (Op3Val >= RegWidth)
3635 return Error(Op3.getStartLoc(),
3636 "expected integer in range [0, 31]");
3637 if (Op4Val < 1 || Op4Val > RegWidth)
3638 return Error(Op4.getStartLoc(),
3639 "expected integer in range [1, 32]");
3641 uint64_t NewOp3Val = 0;
3642 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3644 NewOp3Val = (32 - Op3Val) & 0x1f;
3646 NewOp3Val = (64 - Op3Val) & 0x3f;
3648 uint64_t NewOp4Val = Op4Val - 1;
3650 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3651 return Error(Op4.getStartLoc(),
3652 "requested insert overflows register");
3654 const MCExpr *NewOp3 =
3655 MCConstantExpr::Create(NewOp3Val, getContext());
3656 const MCExpr *NewOp4 =
3657 MCConstantExpr::Create(NewOp4Val, getContext());
3658 Operands[3] = AArch64Operand::CreateImm(
3659 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3660 Operands[4] = AArch64Operand::CreateImm(
3661 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3663 Operands[0] = AArch64Operand::CreateToken(
3664 "bfm", false, Op.getStartLoc(), getContext());
3665 else if (Tok == "sbfiz")
3666 Operands[0] = AArch64Operand::CreateToken(
3667 "sbfm", false, Op.getStartLoc(), getContext());
3668 else if (Tok == "ubfiz")
3669 Operands[0] = AArch64Operand::CreateToken(
3670 "ubfm", false, Op.getStartLoc(), getContext());
3672 llvm_unreachable("No valid mnemonic for alias?");
3676 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3677 // UBFX -> UBFM aliases.
3678 } else if (NumOperands == 5 &&
3679 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3680 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3681 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3682 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3684 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3685 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3686 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3688 if (Op3CE && Op4CE) {
3689 uint64_t Op3Val = Op3CE->getValue();
3690 uint64_t Op4Val = Op4CE->getValue();
3692 uint64_t RegWidth = 0;
3693 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3699 if (Op3Val >= RegWidth)
3700 return Error(Op3.getStartLoc(),
3701 "expected integer in range [0, 31]");
3702 if (Op4Val < 1 || Op4Val > RegWidth)
3703 return Error(Op4.getStartLoc(),
3704 "expected integer in range [1, 32]");
3706 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3708 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3709 return Error(Op4.getStartLoc(),
3710 "requested extract overflows register");
3712 const MCExpr *NewOp4 =
3713 MCConstantExpr::Create(NewOp4Val, getContext());
3714 Operands[4] = AArch64Operand::CreateImm(
3715 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3717 Operands[0] = AArch64Operand::CreateToken(
3718 "bfm", false, Op.getStartLoc(), getContext());
3719 else if (Tok == "sbfx")
3720 Operands[0] = AArch64Operand::CreateToken(
3721 "sbfm", false, Op.getStartLoc(), getContext());
3722 else if (Tok == "ubfx")
3723 Operands[0] = AArch64Operand::CreateToken(
3724 "ubfm", false, Op.getStartLoc(), getContext());
3726 llvm_unreachable("No valid mnemonic for alias?");
3731 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3732 // InstAlias can't quite handle this since the reg classes aren't
3734 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3735 // The source register can be Wn here, but the matcher expects a
3736 // GPR64. Twiddle it here if necessary.
3737 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3739 unsigned Reg = getXRegFromWReg(Op.getReg());
3740 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3741 Op.getEndLoc(), getContext());
3744 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3745 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3746 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3748 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3750 // The source register can be Wn here, but the matcher expects a
3751 // GPR64. Twiddle it here if necessary.
3752 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3754 unsigned Reg = getXRegFromWReg(Op.getReg());
3755 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3756 Op.getEndLoc(), getContext());
3760 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3761 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3762 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3764 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3766 // The source register can be Wn here, but the matcher expects a
3767 // GPR32. Twiddle it here if necessary.
3768 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3770 unsigned Reg = getWRegFromXReg(Op.getReg());
3771 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3772 Op.getEndLoc(), getContext());
3777 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3778 if (NumOperands == 3 && Tok == "fmov") {
3779 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3780 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3781 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3783 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3787 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3788 Op.getEndLoc(), getContext());
3793 // First try to match against the secondary set of tables containing the
3794 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3795 unsigned MatchResult =
3796 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3798 // If that fails, try against the alternate table containing long-form NEON:
3799 // "fadd v0.2s, v1.2s, v2.2s"
3800 if (MatchResult != Match_Success)
3802 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3804 switch (MatchResult) {
3805 case Match_Success: {
3806 // Perform range checking and other semantic validations
3807 SmallVector<SMLoc, 8> OperandLocs;
3808 NumOperands = Operands.size();
3809 for (unsigned i = 1; i < NumOperands; ++i)
3810 OperandLocs.push_back(Operands[i]->getStartLoc());
3811 if (validateInstruction(Inst, OperandLocs))
3815 Out.EmitInstruction(Inst, STI);
3818 case Match_MissingFeature: {
3819 assert(ErrorInfo && "Unknown missing feature!");
3820 // Special case the error message for the very common case where only
3821 // a single subtarget feature is missing (neon, e.g.).
3822 std::string Msg = "instruction requires:";
3824 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3825 if (ErrorInfo & Mask) {
3827 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3831 return Error(IDLoc, Msg);
3833 case Match_MnemonicFail:
3834 return showMatchError(IDLoc, MatchResult);
3835 case Match_InvalidOperand: {
3836 SMLoc ErrorLoc = IDLoc;
3837 if (ErrorInfo != ~0ULL) {
3838 if (ErrorInfo >= Operands.size())
3839 return Error(IDLoc, "too few operands for instruction");
3841 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3842 if (ErrorLoc == SMLoc())
3845 // If the match failed on a suffix token operand, tweak the diagnostic
3847 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3848 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3849 MatchResult = Match_InvalidSuffix;
3851 return showMatchError(ErrorLoc, MatchResult);
3853 case Match_InvalidMemoryIndexed1:
3854 case Match_InvalidMemoryIndexed2:
3855 case Match_InvalidMemoryIndexed4:
3856 case Match_InvalidMemoryIndexed8:
3857 case Match_InvalidMemoryIndexed16:
3858 case Match_InvalidCondCode:
3859 case Match_AddSubRegExtendSmall:
3860 case Match_AddSubRegExtendLarge:
3861 case Match_AddSubSecondSource:
3862 case Match_LogicalSecondSource:
3863 case Match_AddSubRegShift32:
3864 case Match_AddSubRegShift64:
3865 case Match_InvalidMovImm32Shift:
3866 case Match_InvalidMovImm64Shift:
3867 case Match_InvalidFPImm:
3868 case Match_InvalidMemoryWExtend8:
3869 case Match_InvalidMemoryWExtend16:
3870 case Match_InvalidMemoryWExtend32:
3871 case Match_InvalidMemoryWExtend64:
3872 case Match_InvalidMemoryWExtend128:
3873 case Match_InvalidMemoryXExtend8:
3874 case Match_InvalidMemoryXExtend16:
3875 case Match_InvalidMemoryXExtend32:
3876 case Match_InvalidMemoryXExtend64:
3877 case Match_InvalidMemoryXExtend128:
3878 case Match_InvalidMemoryIndexed4SImm7:
3879 case Match_InvalidMemoryIndexed8SImm7:
3880 case Match_InvalidMemoryIndexed16SImm7:
3881 case Match_InvalidMemoryIndexedSImm9:
3882 case Match_InvalidImm0_7:
3883 case Match_InvalidImm0_15:
3884 case Match_InvalidImm0_31:
3885 case Match_InvalidImm0_63:
3886 case Match_InvalidImm0_127:
3887 case Match_InvalidImm0_65535:
3888 case Match_InvalidImm1_8:
3889 case Match_InvalidImm1_16:
3890 case Match_InvalidImm1_32:
3891 case Match_InvalidImm1_64:
3892 case Match_InvalidIndex1:
3893 case Match_InvalidIndexB:
3894 case Match_InvalidIndexH:
3895 case Match_InvalidIndexS:
3896 case Match_InvalidIndexD:
3897 case Match_InvalidLabel:
3900 if (ErrorInfo >= Operands.size())
3901 return Error(IDLoc, "too few operands for instruction");
3902 // Any time we get here, there's nothing fancy to do. Just get the
3903 // operand SMLoc and display the diagnostic.
3904 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3905 if (ErrorLoc == SMLoc())
3907 return showMatchError(ErrorLoc, MatchResult);
3911 llvm_unreachable("Implement any new match types added!");
3914 /// ParseDirective parses the arm specific directives
3915 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3916 const MCObjectFileInfo::Environment Format =
3917 getContext().getObjectFileInfo()->getObjectFileType();
3918 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3919 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3921 StringRef IDVal = DirectiveID.getIdentifier();
3922 SMLoc Loc = DirectiveID.getLoc();
3923 if (IDVal == ".hword")
3924 return parseDirectiveWord(2, Loc);
3925 if (IDVal == ".word")
3926 return parseDirectiveWord(4, Loc);
3927 if (IDVal == ".xword")
3928 return parseDirectiveWord(8, Loc);
3929 if (IDVal == ".tlsdesccall")
3930 return parseDirectiveTLSDescCall(Loc);
3931 if (IDVal == ".ltorg" || IDVal == ".pool")
3932 return parseDirectiveLtorg(Loc);
3933 if (IDVal == ".unreq")
3934 return parseDirectiveUnreq(DirectiveID.getLoc());
3936 if (!IsMachO && !IsCOFF) {
3937 if (IDVal == ".inst")
3938 return parseDirectiveInst(Loc);
3941 return parseDirectiveLOH(IDVal, Loc);
3944 /// parseDirectiveWord
3945 /// ::= .word [ expression (, expression)* ]
3946 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3947 MCAsmParser &Parser = getParser();
3948 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3950 const MCExpr *Value;
3951 if (getParser().parseExpression(Value))
3954 getParser().getStreamer().EmitValue(Value, Size);
3956 if (getLexer().is(AsmToken::EndOfStatement))
3959 // FIXME: Improve diagnostic.
3960 if (getLexer().isNot(AsmToken::Comma))
3961 return Error(L, "unexpected token in directive");
3970 /// parseDirectiveInst
3971 /// ::= .inst opcode [, ...]
3972 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3973 MCAsmParser &Parser = getParser();
3974 if (getLexer().is(AsmToken::EndOfStatement)) {
3975 Parser.eatToEndOfStatement();
3976 Error(Loc, "expected expression following directive");
3983 if (getParser().parseExpression(Expr)) {
3984 Error(Loc, "expected expression");
3988 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3990 Error(Loc, "expected constant expression");
3994 getTargetStreamer().emitInst(Value->getValue());
3996 if (getLexer().is(AsmToken::EndOfStatement))
3999 if (getLexer().isNot(AsmToken::Comma)) {
4000 Error(Loc, "unexpected token in directive");
4004 Parser.Lex(); // Eat comma.
4011 // parseDirectiveTLSDescCall:
4012 // ::= .tlsdesccall symbol
4013 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4015 if (getParser().parseIdentifier(Name))
4016 return Error(L, "expected symbol after directive");
4018 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4019 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4020 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4023 Inst.setOpcode(AArch64::TLSDESCCALL);
4024 Inst.addOperand(MCOperand::CreateExpr(Expr));
4026 getParser().getStreamer().EmitInstruction(Inst, STI);
4030 /// ::= .loh <lohName | lohId> label1, ..., labelN
4031 /// The number of arguments depends on the loh identifier.
4032 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4033 if (IDVal != MCLOHDirectiveName())
4036 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4037 if (getParser().getTok().isNot(AsmToken::Integer))
4038 return TokError("expected an identifier or a number in directive");
4039 // We successfully get a numeric value for the identifier.
4040 // Check if it is valid.
4041 int64_t Id = getParser().getTok().getIntVal();
4042 if (Id <= -1U && !isValidMCLOHType(Id))
4043 return TokError("invalid numeric identifier in directive");
4044 Kind = (MCLOHType)Id;
4046 StringRef Name = getTok().getIdentifier();
4047 // We successfully parse an identifier.
4048 // Check if it is a recognized one.
4049 int Id = MCLOHNameToId(Name);
4052 return TokError("invalid identifier in directive");
4053 Kind = (MCLOHType)Id;
4055 // Consume the identifier.
4057 // Get the number of arguments of this LOH.
4058 int NbArgs = MCLOHIdToNbArgs(Kind);
4060 assert(NbArgs != -1 && "Invalid number of arguments");
4062 SmallVector<MCSymbol *, 3> Args;
4063 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4065 if (getParser().parseIdentifier(Name))
4066 return TokError("expected identifier in directive");
4067 Args.push_back(getContext().GetOrCreateSymbol(Name));
4069 if (Idx + 1 == NbArgs)
4071 if (getLexer().isNot(AsmToken::Comma))
4072 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4075 if (getLexer().isNot(AsmToken::EndOfStatement))
4076 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4078 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4082 /// parseDirectiveLtorg
4083 /// ::= .ltorg | .pool
4084 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4085 getTargetStreamer().emitCurrentConstantPool();
4089 /// parseDirectiveReq
4090 /// ::= name .req registername
4091 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4092 MCAsmParser &Parser = getParser();
4093 Parser.Lex(); // Eat the '.req' token.
4094 SMLoc SRegLoc = getLoc();
4095 unsigned RegNum = tryParseRegister();
4096 bool IsVector = false;
4098 if (RegNum == static_cast<unsigned>(-1)) {
4100 RegNum = tryMatchVectorRegister(Kind, false);
4101 if (!Kind.empty()) {
4102 Error(SRegLoc, "vector register without type specifier expected");
4108 if (RegNum == static_cast<unsigned>(-1)) {
4109 Parser.eatToEndOfStatement();
4110 Error(SRegLoc, "register name or alias expected");
4114 // Shouldn't be anything else.
4115 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4116 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4117 Parser.eatToEndOfStatement();
4121 Parser.Lex(); // Consume the EndOfStatement
4123 auto pair = std::make_pair(IsVector, RegNum);
4124 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4125 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4130 /// parseDirectiveUneq
4131 /// ::= .unreq registername
4132 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4133 MCAsmParser &Parser = getParser();
4134 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4135 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4136 Parser.eatToEndOfStatement();
4139 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4140 Parser.Lex(); // Eat the identifier.
4145 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4146 AArch64MCExpr::VariantKind &ELFRefKind,
4147 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4149 ELFRefKind = AArch64MCExpr::VK_INVALID;
4150 DarwinRefKind = MCSymbolRefExpr::VK_None;
4153 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4154 ELFRefKind = AE->getKind();
4155 Expr = AE->getSubExpr();
4158 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4160 // It's a simple symbol reference with no addend.
4161 DarwinRefKind = SE->getKind();
4165 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4169 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4172 DarwinRefKind = SE->getKind();
4174 if (BE->getOpcode() != MCBinaryExpr::Add &&
4175 BE->getOpcode() != MCBinaryExpr::Sub)
4178 // See if the addend is is a constant, otherwise there's more going
4179 // on here than we can deal with.
4180 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4184 Addend = AddendExpr->getValue();
4185 if (BE->getOpcode() == MCBinaryExpr::Sub)
4188 // It's some symbol reference + a constant addend, but really
4189 // shouldn't use both Darwin and ELF syntax.
4190 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4191 DarwinRefKind == MCSymbolRefExpr::VK_None;
4194 /// Force static initialization.
4195 extern "C" void LLVMInitializeAArch64AsmParser() {
4196 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4197 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4198 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4201 #define GET_REGISTER_MATCHER
4202 #define GET_SUBTARGET_FEATURE_NAME
4203 #define GET_MATCHER_IMPLEMENTATION
4204 #include "AArch64GenAsmMatcher.inc"
4206 // Define this matcher function after the auto-generated include so we
4207 // have the match class enum definitions.
4208 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4210 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4211 // If the kind is a token for a literal immediate, check if our asm
4212 // operand matches. This is for InstAliases which have a fixed-value
4213 // immediate in the syntax.
4214 int64_t ExpectedVal;
4217 return Match_InvalidOperand;
4259 return Match_InvalidOperand;
4260 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4262 return Match_InvalidOperand;
4263 if (CE->getValue() == ExpectedVal)
4264 return Match_Success;
4265 return Match_InvalidOperand;