1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCParser/MCAsmLexer.h"
27 #include "llvm/MC/MCParser/MCAsmParser.h"
28 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCStreamer.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCTargetAsmParser.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58 const MCInstrInfo &MII,
59 const MCTargetOptions &Options)
60 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
61 MCAsmParserExtension::Initialize(_Parser);
63 // Initialize the set of available features.
64 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
67 // These are the public interface of the MCTargetAsmParser
68 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
69 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
71 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
73 bool ParseDirective(AsmToken DirectiveID);
74 bool ParseDirectiveTLSDescCall(SMLoc L);
75 bool ParseDirectiveWord(unsigned Size, SMLoc L);
77 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
78 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
79 MCStreamer&Out, unsigned &ErrorInfo,
80 bool MatchingInlineAsm);
82 // The rest of the sub-parsers have more freedom over interface: they return
83 // an OperandMatchResultTy because it's less ambiguous than true/false or
84 // -1/0/1 even if it is more verbose
86 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
89 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
91 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
94 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
98 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
102 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
105 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
108 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
114 ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
116 template<typename SomeNamedImmMapper> OperandMatchResultTy
117 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
118 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
122 ParseNamedImmOperand(const NamedImmMapper &Mapper,
123 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
126 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
129 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
132 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
134 bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
137 OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
139 bool validateInstruction(MCInst &Inst,
140 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
142 /// Scan the next token (which had better be an identifier) and determine
143 /// whether it represents a general-purpose or vector register. It returns
144 /// true if an identifier was found and populates its reference arguments. It
145 /// does not consume the token.
147 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
148 SMLoc &LayoutLoc) const;
156 /// Instances of this class represent a parsed AArch64 machine instruction.
157 class AArch64Operand : public MCParsedAsmOperand {
160 k_ImmWithLSL, // #uimm {, LSL #amt }
161 k_CondCode, // eq/ne/...
162 k_FPImmediate, // Limited-precision floating-point imm
163 k_Immediate, // Including expressions referencing symbols
166 k_VectorList, // A sequential list of 1 to 4 registers.
167 k_SysReg, // The register operand of MRS and MSR instructions
168 k_Token, // The mnemonic; other raw tokens the auto-generated
169 k_WrappedRegister // Load/store exclusive permit a wrapped register.
172 SMLoc StartLoc, EndLoc;
174 struct ImmWithLSLOp {
176 unsigned ShiftAmount;
181 A64CC::CondCodes Code;
196 struct ShiftExtendOp {
197 A64SE::ShiftExtSpecifiers ShiftType;
202 // A vector register list is a sequential list of 1 to 4 registers.
203 struct VectorListOp {
206 A64Layout::VectorLayout Layout;
220 struct ImmWithLSLOp ImmWithLSL;
221 struct CondCodeOp CondCode;
222 struct FPImmOp FPImm;
225 struct ShiftExtendOp ShiftExtend;
226 struct VectorListOp VectorList;
227 struct SysRegOp SysReg;
231 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
232 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
235 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
238 SMLoc getStartLoc() const { return StartLoc; }
239 SMLoc getEndLoc() const { return EndLoc; }
240 void print(raw_ostream&) const;
243 StringRef getToken() const {
244 assert(Kind == k_Token && "Invalid access!");
245 return StringRef(Tok.Data, Tok.Length);
248 unsigned getReg() const {
249 assert((Kind == k_Register || Kind == k_WrappedRegister)
250 && "Invalid access!");
254 const MCExpr *getImm() const {
255 assert(Kind == k_Immediate && "Invalid access!");
259 A64CC::CondCodes getCondCode() const {
260 assert(Kind == k_CondCode && "Invalid access!");
261 return CondCode.Code;
264 static bool isNonConstantExpr(const MCExpr *E,
265 AArch64MCExpr::VariantKind &Variant) {
266 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
267 Variant = A64E->getKind();
269 } else if (!isa<MCConstantExpr>(E)) {
270 Variant = AArch64MCExpr::VK_AARCH64_None;
277 bool isCondCode() const { return Kind == k_CondCode; }
278 bool isToken() const { return Kind == k_Token; }
279 bool isReg() const { return Kind == k_Register; }
280 bool isImm() const { return Kind == k_Immediate; }
281 bool isMem() const { return false; }
282 bool isFPImm() const { return Kind == k_FPImmediate; }
283 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
284 bool isSysReg() const { return Kind == k_SysReg; }
285 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
286 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
288 bool isAddSubImmLSL0() const {
289 if (!isImmWithLSL()) return false;
290 if (ImmWithLSL.ShiftAmount != 0) return false;
292 AArch64MCExpr::VariantKind Variant;
293 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
294 return Variant == AArch64MCExpr::VK_AARCH64_LO12
295 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
296 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
297 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
298 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
299 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
302 // Otherwise it should be a real immediate in range:
303 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
304 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
307 bool isAddSubImmLSL12() const {
308 if (!isImmWithLSL()) return false;
309 if (ImmWithLSL.ShiftAmount != 12) return false;
311 AArch64MCExpr::VariantKind Variant;
312 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
313 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
314 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
317 // Otherwise it should be a real immediate in range:
318 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
319 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
322 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
323 if (!isShiftOrExtend()) return false;
325 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
326 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
329 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
332 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
335 bool isAdrpLabel() const {
336 if (!isImm()) return false;
338 AArch64MCExpr::VariantKind Variant;
339 if (isNonConstantExpr(getImm(), Variant)) {
340 return Variant == AArch64MCExpr::VK_AARCH64_None
341 || Variant == AArch64MCExpr::VK_AARCH64_GOT
342 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
343 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
346 return isLabel<21, 4096>();
349 template<unsigned RegWidth> bool isBitfieldWidth() const {
350 if (!isImm()) return false;
352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
353 if (!CE) return false;
355 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
358 template<int RegWidth>
359 bool isCVTFixedPos() const {
360 if (!isImm()) return false;
362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
363 if (!CE) return false;
365 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
368 bool isFMOVImm() const {
369 if (!isFPImm()) return false;
371 APFloat RealVal(FPImm.Val);
373 return A64Imms::isFPImm(RealVal, ImmVal);
376 bool isFPZero() const {
377 if (!isFPImm()) return false;
379 APFloat RealVal(FPImm.Val);
380 return RealVal.isPosZero();
383 template<unsigned field_width, unsigned scale>
384 bool isLabel() const {
385 if (!isImm()) return false;
387 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
389 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
390 int64_t Val = CE->getValue();
391 int64_t Min = - (scale * (1LL << (field_width - 1)));
392 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
393 return (Val % scale) == 0 && Val >= Min && Val <= Max;
396 // N.b. this disallows explicit relocation specifications via an
397 // AArch64MCExpr. Users needing that behaviour
401 bool isLane1() const {
402 if (!isImm()) return false;
404 // Because it's come through custom assembly parsing, it must always be a
405 // constant expression.
406 return cast<MCConstantExpr>(getImm())->getValue() == 1;
409 bool isLoadLitLabel() const {
410 if (!isImm()) return false;
412 AArch64MCExpr::VariantKind Variant;
413 if (isNonConstantExpr(getImm(), Variant)) {
414 return Variant == AArch64MCExpr::VK_AARCH64_None
415 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
418 return isLabel<19, 4>();
421 template<unsigned RegWidth> bool isLogicalImm() const {
422 if (!isImm()) return false;
424 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
425 if (!CE) return false;
428 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
431 template<unsigned RegWidth> bool isLogicalImmMOV() const {
432 if (!isLogicalImm<RegWidth>()) return false;
434 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
436 // The move alias for ORR is only valid if the immediate cannot be
437 // represented with a move (immediate) instruction; they take priority.
439 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
440 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
443 template<int MemSize>
444 bool isOffsetUImm12() const {
445 if (!isImm()) return false;
447 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
449 // Assume they know what they're doing for now if they've given us a
450 // non-constant expression. In principle we could check for ridiculous
451 // things that can't possibly work or relocations that would almost
452 // certainly break resulting code.
456 int64_t Val = CE->getValue();
458 // Must be a multiple of the access size in bytes.
459 if ((Val & (MemSize - 1)) != 0) return false;
461 // Must be 12-bit unsigned
462 return Val >= 0 && Val <= 0xfff * MemSize;
465 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
466 bool isShift() const {
467 if (!isShiftOrExtend()) return false;
469 if (ShiftExtend.ShiftType != SHKind)
472 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
475 bool isMOVN32Imm() const {
476 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
477 AArch64MCExpr::VK_AARCH64_SABS_G0,
478 AArch64MCExpr::VK_AARCH64_SABS_G1,
479 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
480 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
481 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
482 AArch64MCExpr::VK_AARCH64_TPREL_G1,
483 AArch64MCExpr::VK_AARCH64_TPREL_G0,
485 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
487 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
490 bool isMOVN64Imm() const {
491 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
492 AArch64MCExpr::VK_AARCH64_SABS_G0,
493 AArch64MCExpr::VK_AARCH64_SABS_G1,
494 AArch64MCExpr::VK_AARCH64_SABS_G2,
495 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
496 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
497 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
498 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
499 AArch64MCExpr::VK_AARCH64_TPREL_G2,
500 AArch64MCExpr::VK_AARCH64_TPREL_G1,
501 AArch64MCExpr::VK_AARCH64_TPREL_G0,
503 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
505 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
509 bool isMOVZ32Imm() const {
510 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
511 AArch64MCExpr::VK_AARCH64_ABS_G0,
512 AArch64MCExpr::VK_AARCH64_ABS_G1,
513 AArch64MCExpr::VK_AARCH64_SABS_G0,
514 AArch64MCExpr::VK_AARCH64_SABS_G1,
515 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
516 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
517 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
518 AArch64MCExpr::VK_AARCH64_TPREL_G1,
519 AArch64MCExpr::VK_AARCH64_TPREL_G0,
521 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
523 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
526 bool isMOVZ64Imm() const {
527 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
528 AArch64MCExpr::VK_AARCH64_ABS_G0,
529 AArch64MCExpr::VK_AARCH64_ABS_G1,
530 AArch64MCExpr::VK_AARCH64_ABS_G2,
531 AArch64MCExpr::VK_AARCH64_ABS_G3,
532 AArch64MCExpr::VK_AARCH64_SABS_G0,
533 AArch64MCExpr::VK_AARCH64_SABS_G1,
534 AArch64MCExpr::VK_AARCH64_SABS_G2,
535 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
536 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
537 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
538 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
539 AArch64MCExpr::VK_AARCH64_TPREL_G2,
540 AArch64MCExpr::VK_AARCH64_TPREL_G1,
541 AArch64MCExpr::VK_AARCH64_TPREL_G0,
543 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
545 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
548 bool isMOVK32Imm() const {
549 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
550 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
551 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
552 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
553 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
554 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
555 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
556 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
558 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
560 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
563 bool isMOVK64Imm() const {
564 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
565 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
566 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
567 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
568 AArch64MCExpr::VK_AARCH64_ABS_G3,
569 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
570 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
571 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
572 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
573 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
575 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
577 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
580 bool isMoveWideImm(unsigned RegWidth,
581 const AArch64MCExpr::VariantKind *PermittedModifiers,
582 unsigned NumModifiers) const {
583 if (!isImmWithLSL()) return false;
585 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
586 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
588 AArch64MCExpr::VariantKind Modifier;
589 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
590 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
591 if (!ImmWithLSL.ImplicitAmount) return false;
593 for (unsigned i = 0; i < NumModifiers; ++i)
594 if (PermittedModifiers[i] == Modifier) return true;
599 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
600 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
603 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
604 bool isMoveWideMovAlias() const {
605 if (!isImm()) return false;
607 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608 if (!CE) return false;
611 uint64_t Value = CE->getValue();
613 // If this is a 32-bit instruction then all bits above 32 should be the
614 // same: either of these is fine because signed/unsigned values should be
616 if (RegWidth == 32) {
617 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
620 Value &= 0xffffffffULL;
623 return isValidImm(RegWidth, Value, UImm16, Shift);
626 bool isMSRWithReg() const {
627 if (!isSysReg()) return false;
629 bool IsKnownRegister;
630 StringRef Name(SysReg.Data, SysReg.Length);
631 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
633 return IsKnownRegister;
636 bool isMSRPState() const {
637 if (!isSysReg()) return false;
639 bool IsKnownRegister;
640 StringRef Name(SysReg.Data, SysReg.Length);
641 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
643 return IsKnownRegister;
647 if (!isSysReg()) return false;
649 // First check against specific MSR-only (write-only) registers
650 bool IsKnownRegister;
651 StringRef Name(SysReg.Data, SysReg.Length);
652 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
654 return IsKnownRegister;
657 bool isPRFM() const {
658 if (!isImm()) return false;
660 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665 return CE->getValue() >= 0 && CE->getValue() <= 31;
668 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
669 if (!isShiftOrExtend()) return false;
671 if (ShiftExtend.ShiftType != SHKind)
674 return ShiftExtend.Amount <= 4;
677 bool isRegExtendLSL() const {
678 if (!isShiftOrExtend()) return false;
680 if (ShiftExtend.ShiftType != A64SE::LSL)
683 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
686 // if 0 < value <= w, return true
687 bool isShrFixedWidth(int w) const {
690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693 int64_t Value = CE->getValue();
694 return Value > 0 && Value <= w;
697 bool isShrImm8() const { return isShrFixedWidth(8); }
699 bool isShrImm16() const { return isShrFixedWidth(16); }
701 bool isShrImm32() const { return isShrFixedWidth(32); }
703 bool isShrImm64() const { return isShrFixedWidth(64); }
705 // if 0 <= value < w, return true
706 bool isShlFixedWidth(int w) const {
709 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712 int64_t Value = CE->getValue();
713 return Value >= 0 && Value < w;
716 bool isShlImm8() const { return isShlFixedWidth(8); }
718 bool isShlImm16() const { return isShlFixedWidth(16); }
720 bool isShlImm32() const { return isShlFixedWidth(32); }
722 bool isShlImm64() const { return isShlFixedWidth(64); }
724 bool isNeonMovImmShiftLSL() const {
725 if (!isShiftOrExtend())
728 if (ShiftExtend.ShiftType != A64SE::LSL)
731 // Valid shift amount is 0, 8, 16 and 24.
732 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
735 bool isNeonMovImmShiftLSLH() const {
736 if (!isShiftOrExtend())
739 if (ShiftExtend.ShiftType != A64SE::LSL)
742 // Valid shift amount is 0 and 8.
743 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
746 bool isNeonMovImmShiftMSL() const {
747 if (!isShiftOrExtend())
750 if (ShiftExtend.ShiftType != A64SE::MSL)
753 // Valid shift amount is 8 and 16.
754 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
757 template <A64Layout::VectorLayout Layout, unsigned Count>
758 bool isVectorList() const {
759 return Kind == k_VectorList && VectorList.Layout == Layout &&
760 VectorList.Count == Count;
763 template <int MemSize> bool isSImm7Scaled() const {
767 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
768 if (!CE) return false;
770 int64_t Val = CE->getValue();
771 if (Val % MemSize != 0) return false;
775 return Val >= -64 && Val < 64;
778 template<int BitWidth>
779 bool isSImm() const {
780 if (!isImm()) return false;
782 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783 if (!CE) return false;
785 return CE->getValue() >= -(1LL << (BitWidth - 1))
786 && CE->getValue() < (1LL << (BitWidth - 1));
789 template<int bitWidth>
790 bool isUImm() const {
791 if (!isImm()) return false;
793 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794 if (!CE) return false;
796 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
799 bool isUImm() const {
800 if (!isImm()) return false;
802 return isa<MCConstantExpr>(getImm());
805 bool isNeonUImm64Mask() const {
809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
813 uint64_t Value = CE->getValue();
815 // i64 value with each byte being either 0x00 or 0xff.
816 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
817 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
822 // if value == N, return true
824 bool isExactImm() const {
825 if (!isImm()) return false;
827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828 if (!CE) return false;
830 return CE->getValue() == N;
833 bool isFPZeroIZero() const {
837 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
838 unsigned ShiftAmount,
841 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
842 Op->ImmWithLSL.Val = Val;
843 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
844 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
848 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
850 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
851 Op->CondCode.Code = Code;
855 static AArch64Operand *CreateFPImm(double Val,
857 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
862 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
863 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
868 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
869 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
870 Op->Reg.RegNum = RegNum;
874 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
875 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
876 Op->Reg.RegNum = RegNum;
880 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
884 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
885 Op->ShiftExtend.ShiftType = ShiftTyp;
886 Op->ShiftExtend.Amount = Amount;
887 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
891 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
892 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
893 Op->Tok.Data = Str.data();
894 Op->Tok.Length = Str.size();
898 static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
899 A64Layout::VectorLayout Layout,
901 AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
902 Op->VectorList.RegNum = RegNum;
903 Op->VectorList.Count = Count;
904 Op->VectorList.Layout = Layout;
910 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
911 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
912 Op->Tok.Data = Str.data();
913 Op->Tok.Length = Str.size();
918 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
919 // Add as immediates when possible.
920 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
921 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
923 Inst.addOperand(MCOperand::CreateExpr(Expr));
926 template<unsigned RegWidth>
927 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
928 assert(N == 1 && "Invalid number of operands!");
929 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
930 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
931 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
934 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
935 assert(N == 1 && "Invalid number of operands!");
936 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
937 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
940 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
941 assert(N == 1 && "Invalid number of operands!");
943 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
944 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
946 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
949 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
950 assert(N == 1 && "Invalid number of operands!");
951 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
954 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
955 assert(N == 1 && "Invalid number of operands!");
957 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
958 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
961 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
962 assert(N == 1 && "Invalid number of operands!");
964 APFloat RealVal(FPImm.Val);
966 A64Imms::isFPImm(RealVal, ImmVal);
968 Inst.addOperand(MCOperand::CreateImm(ImmVal));
971 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
972 assert(N == 1 && "Invalid number of operands");
973 Inst.addOperand(MCOperand::CreateImm(0));
976 void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
977 addFPZeroOperands(Inst, N);
980 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
981 assert(N == 1 && "Invalid number of operands!");
982 unsigned Encoded = A64InvertCondCode(getCondCode());
983 Inst.addOperand(MCOperand::CreateImm(Encoded));
986 void addRegOperands(MCInst &Inst, unsigned N) const {
987 assert(N == 1 && "Invalid number of operands!");
988 Inst.addOperand(MCOperand::CreateReg(getReg()));
991 void addImmOperands(MCInst &Inst, unsigned N) const {
992 assert(N == 1 && "Invalid number of operands!");
993 addExpr(Inst, getImm());
996 template<int MemSize>
997 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
998 assert(N == 1 && "Invalid number of operands!");
1000 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1001 uint64_t Val = CE->getValue() / MemSize;
1002 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
1005 template<int BitWidth>
1006 void addSImmOperands(MCInst &Inst, unsigned N) const {
1007 assert(N == 1 && "Invalid number of operands!");
1009 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1010 uint64_t Val = CE->getValue();
1011 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
1014 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1015 assert (N == 1 && "Invalid number of operands!");
1017 addExpr(Inst, ImmWithLSL.Val);
1020 template<unsigned field_width, unsigned scale>
1021 void addLabelOperands(MCInst &Inst, unsigned N) const {
1022 assert(N == 1 && "Invalid number of operands!");
1024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1027 addExpr(Inst, Imm.Val);
1031 int64_t Val = CE->getValue();
1032 assert(Val % scale == 0 && "Unaligned immediate in instruction");
1035 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1038 template<int MemSize>
1039 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1040 assert(N == 1 && "Invalid number of operands!");
1042 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1043 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1045 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1049 template<unsigned RegWidth>
1050 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1051 assert(N == 1 && "Invalid number of operands");
1052 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1055 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1057 Inst.addOperand(MCOperand::CreateImm(Bits));
1060 void addMRSOperands(MCInst &Inst, unsigned N) const {
1061 assert(N == 1 && "Invalid number of operands!");
1064 StringRef Name(SysReg.Data, SysReg.Length);
1065 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1067 Inst.addOperand(MCOperand::CreateImm(Bits));
1070 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1071 assert(N == 1 && "Invalid number of operands!");
1074 StringRef Name(SysReg.Data, SysReg.Length);
1075 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1077 Inst.addOperand(MCOperand::CreateImm(Bits));
1080 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1081 assert(N == 1 && "Invalid number of operands!");
1084 StringRef Name(SysReg.Data, SysReg.Length);
1085 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1087 Inst.addOperand(MCOperand::CreateImm(Bits));
1090 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1091 assert(N == 2 && "Invalid number of operands!");
1093 addExpr(Inst, ImmWithLSL.Val);
1095 AArch64MCExpr::VariantKind Variant;
1096 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1097 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1101 // We know it's relocated
1103 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1104 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1105 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1106 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1107 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1108 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1109 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1110 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1111 Inst.addOperand(MCOperand::CreateImm(0));
1113 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1114 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1115 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1116 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1117 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1118 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1119 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1120 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1121 Inst.addOperand(MCOperand::CreateImm(1));
1123 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1124 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1125 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1126 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1127 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1128 Inst.addOperand(MCOperand::CreateImm(2));
1130 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1131 Inst.addOperand(MCOperand::CreateImm(3));
1133 default: llvm_unreachable("Inappropriate move wide relocation");
1137 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1138 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1139 assert(N == 2 && "Invalid number of operands!");
1142 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1143 uint64_t Value = CE->getValue();
1145 if (RegWidth == 32) {
1146 Value &= 0xffffffffULL;
1149 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1151 assert(Valid && "Invalid immediates should have been weeded out by now");
1153 Inst.addOperand(MCOperand::CreateImm(UImm16));
1154 Inst.addOperand(MCOperand::CreateImm(Shift));
1157 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1160 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1161 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1162 && "PRFM operand should be 5-bits");
1164 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1167 // For Add-sub (extended register) operands.
1168 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1171 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1174 // For Vector Immediates shifted imm operands.
1175 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1176 assert(N == 1 && "Invalid number of operands!");
1178 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1179 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1181 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1182 int64_t Imm = ShiftExtend.Amount / 8;
1183 Inst.addOperand(MCOperand::CreateImm(Imm));
1186 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1189 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1190 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1192 // Encode LSLH shift amount 0, 8 as 0, 1.
1193 int64_t Imm = ShiftExtend.Amount / 8;
1194 Inst.addOperand(MCOperand::CreateImm(Imm));
1197 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1198 assert(N == 1 && "Invalid number of operands!");
1200 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1201 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1203 // Encode MSL shift amount 8, 16 as 0, 1.
1204 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1205 Inst.addOperand(MCOperand::CreateImm(Imm));
1208 // For the extend in load-store (register offset) instructions.
1209 template<unsigned MemSize>
1210 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1211 addAddrRegExtendOperands(Inst, N, MemSize);
1214 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1215 unsigned MemSize) const {
1216 assert(N == 1 && "Invalid number of operands!");
1218 // First bit of Option is set in instruction classes, the high two bits are
1220 unsigned OptionHi = 0;
1221 switch (ShiftExtend.ShiftType) {
1231 llvm_unreachable("Invalid extend type for register offset");
1235 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1237 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1240 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1242 void addShiftOperands(MCInst &Inst, unsigned N) const {
1243 assert(N == 1 && "Invalid number of operands!");
1245 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1248 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1249 assert(N == 1 && "Invalid number of operands!");
1251 // A bit from each byte in the constant forms the encoded immediate
1252 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1253 uint64_t Value = CE->getValue();
1256 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1257 Imm |= (Value & 1) << i;
1259 Inst.addOperand(MCOperand::CreateImm(Imm));
1262 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1263 assert(N == 1 && "Invalid number of operands!");
1264 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1268 } // end anonymous namespace.
1270 AArch64AsmParser::OperandMatchResultTy
1271 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1272 StringRef Mnemonic) {
1274 // See if the operand has a custom parser
1275 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1277 // It could either succeed, fail or just not care.
1278 if (ResTy != MatchOperand_NoMatch)
1281 switch (getLexer().getKind()) {
1283 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1284 return MatchOperand_ParseFail;
1285 case AsmToken::Identifier: {
1286 // It might be in the LSL/UXTB family ...
1287 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1289 // We can only continue if no tokens were eaten.
1290 if (GotShift != MatchOperand_NoMatch)
1293 // ... or it might be a register ...
1294 uint32_t NumLanes = 0;
1295 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1296 assert(GotReg != MatchOperand_ParseFail
1297 && "register parsing shouldn't partially succeed");
1299 if (GotReg == MatchOperand_Success) {
1300 if (Parser.getTok().is(AsmToken::LBrac))
1301 return ParseNEONLane(Operands, NumLanes);
1303 return MatchOperand_Success;
1305 // ... or it might be a symbolish thing
1308 case AsmToken::LParen: // E.g. (strcmp-4)
1309 case AsmToken::Integer: // 1f, 2b labels
1310 case AsmToken::String: // quoted labels
1311 case AsmToken::Dot: // . is Current location
1312 case AsmToken::Dollar: // $ is PC
1313 case AsmToken::Colon: {
1314 SMLoc StartLoc = Parser.getTok().getLoc();
1316 const MCExpr *ImmVal = 0;
1318 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1319 return MatchOperand_ParseFail;
1321 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1322 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1323 return MatchOperand_Success;
1325 case AsmToken::Hash: { // Immediates
1326 SMLoc StartLoc = Parser.getTok().getLoc();
1328 const MCExpr *ImmVal = 0;
1331 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1332 return MatchOperand_ParseFail;
1334 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1335 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1336 return MatchOperand_Success;
1338 case AsmToken::LBrac: {
1339 SMLoc Loc = Parser.getTok().getLoc();
1340 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1341 Parser.Lex(); // Eat '['
1343 // There's no comma after a '[', so we can parse the next operand
1345 return ParseOperand(Operands, Mnemonic);
1347 // The following will likely be useful later, but not in very early cases
1348 case AsmToken::LCurly: // SIMD vector list is not parsed here
1349 llvm_unreachable("Don't know how to deal with '{' in operand");
1350 return MatchOperand_ParseFail;
1354 AArch64AsmParser::OperandMatchResultTy
1355 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1356 if (getLexer().is(AsmToken::Colon)) {
1357 AArch64MCExpr::VariantKind RefKind;
1359 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1360 if (ResTy != MatchOperand_Success)
1363 const MCExpr *SubExprVal;
1364 if (getParser().parseExpression(SubExprVal))
1365 return MatchOperand_ParseFail;
1367 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1368 return MatchOperand_Success;
1371 // No weird AArch64MCExpr prefix
1372 return getParser().parseExpression(ExprVal)
1373 ? MatchOperand_ParseFail : MatchOperand_Success;
1376 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1377 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1378 AArch64AsmParser::OperandMatchResultTy
1379 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1380 uint32_t NumLanes) {
1381 SMLoc Loc = Parser.getTok().getLoc();
1383 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1384 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1385 Parser.Lex(); // Eat '['
1387 if (Parser.getTok().isNot(AsmToken::Integer)) {
1388 Error(Parser.getTok().getLoc(), "expected lane number");
1389 return MatchOperand_ParseFail;
1392 if (Parser.getTok().getIntVal() >= NumLanes) {
1393 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1394 return MatchOperand_ParseFail;
1397 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1399 SMLoc S = Parser.getTok().getLoc();
1400 Parser.Lex(); // Eat actual lane
1401 SMLoc E = Parser.getTok().getLoc();
1402 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1405 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1406 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1407 return MatchOperand_ParseFail;
1410 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1411 Parser.Lex(); // Eat ']'
1413 return MatchOperand_Success;
1416 AArch64AsmParser::OperandMatchResultTy
1417 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1418 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1421 if (getLexer().isNot(AsmToken::Identifier)) {
1422 Error(Parser.getTok().getLoc(),
1423 "expected relocation specifier in operand after ':'");
1424 return MatchOperand_ParseFail;
1427 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1428 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1429 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1430 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1431 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1432 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1433 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1434 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1435 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1436 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1437 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1438 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1439 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1440 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1441 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1442 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1443 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1444 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1445 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1446 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1447 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1448 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1449 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1450 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1451 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1452 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1453 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1454 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1455 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1456 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1457 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1458 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1459 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1460 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1461 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1462 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1463 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1464 .Default(AArch64MCExpr::VK_AARCH64_None);
1466 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1467 Error(Parser.getTok().getLoc(),
1468 "expected relocation specifier in operand after ':'");
1469 return MatchOperand_ParseFail;
1471 Parser.Lex(); // Eat identifier
1473 if (getLexer().isNot(AsmToken::Colon)) {
1474 Error(Parser.getTok().getLoc(),
1475 "expected ':' after relocation specifier");
1476 return MatchOperand_ParseFail;
1479 return MatchOperand_Success;
1482 AArch64AsmParser::OperandMatchResultTy
1483 AArch64AsmParser::ParseImmWithLSLOperand(
1484 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1486 SMLoc S = Parser.getTok().getLoc();
1488 if (Parser.getTok().is(AsmToken::Hash))
1489 Parser.Lex(); // Eat '#'
1490 else if (Parser.getTok().isNot(AsmToken::Integer))
1491 // Operand should start from # or should be integer, emit error otherwise.
1492 return MatchOperand_NoMatch;
1495 if (ParseImmediate(Imm) != MatchOperand_Success)
1496 return MatchOperand_ParseFail;
1497 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1498 SMLoc E = Parser.getTok().getLoc();
1499 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1500 return MatchOperand_Success;
1506 // The optional operand must be "lsl #N" where N is non-negative.
1507 if (Parser.getTok().is(AsmToken::Identifier)
1508 && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1511 if (Parser.getTok().is(AsmToken::Hash)) {
1514 if (Parser.getTok().isNot(AsmToken::Integer)) {
1515 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1516 return MatchOperand_ParseFail;
1521 int64_t ShiftAmount = Parser.getTok().getIntVal();
1523 if (ShiftAmount < 0) {
1524 Error(Parser.getTok().getLoc(), "positive shift amount required");
1525 return MatchOperand_ParseFail;
1527 Parser.Lex(); // Eat the number
1529 SMLoc E = Parser.getTok().getLoc();
1530 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1532 return MatchOperand_Success;
1536 AArch64AsmParser::OperandMatchResultTy
1537 AArch64AsmParser::ParseCondCodeOperand(
1538 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1539 if (Parser.getTok().isNot(AsmToken::Identifier))
1540 return MatchOperand_NoMatch;
1542 StringRef Tok = Parser.getTok().getIdentifier();
1543 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1545 if (CondCode == A64CC::Invalid)
1546 return MatchOperand_NoMatch;
1548 SMLoc S = Parser.getTok().getLoc();
1549 Parser.Lex(); // Eat condition code
1550 SMLoc E = Parser.getTok().getLoc();
1552 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1553 return MatchOperand_Success;
1556 AArch64AsmParser::OperandMatchResultTy
1557 AArch64AsmParser::ParseCRxOperand(
1558 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1559 SMLoc S = Parser.getTok().getLoc();
1560 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1561 Error(S, "Expected cN operand where 0 <= N <= 15");
1562 return MatchOperand_ParseFail;
1565 StringRef Tok = Parser.getTok().getIdentifier();
1566 if (Tok[0] != 'c' && Tok[0] != 'C') {
1567 Error(S, "Expected cN operand where 0 <= N <= 15");
1568 return MatchOperand_ParseFail;
1572 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1573 if (BadNum || CRNum > 15) {
1574 Error(S, "Expected cN operand where 0 <= N <= 15");
1575 return MatchOperand_ParseFail;
1578 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1581 SMLoc E = Parser.getTok().getLoc();
1583 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1584 return MatchOperand_Success;
1587 AArch64AsmParser::OperandMatchResultTy
1588 AArch64AsmParser::ParseFPImmOperand(
1589 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1591 SMLoc S = Parser.getTok().getLoc();
1594 if (Parser.getTok().is(AsmToken::Hash)) {
1595 Parser.Lex(); // Eat '#'
1599 bool Negative = false;
1600 if (Parser.getTok().is(AsmToken::Minus)) {
1602 Parser.Lex(); // Eat '-'
1603 } else if (Parser.getTok().is(AsmToken::Plus)) {
1604 Parser.Lex(); // Eat '+'
1607 if (Parser.getTok().isNot(AsmToken::Real)) {
1609 return MatchOperand_NoMatch;
1610 Error(S, "Expected floating-point immediate");
1611 return MatchOperand_ParseFail;
1614 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1615 if (Negative) RealVal.changeSign();
1616 double DblVal = RealVal.convertToDouble();
1618 Parser.Lex(); // Eat real number
1619 SMLoc E = Parser.getTok().getLoc();
1621 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1622 return MatchOperand_Success;
1625 AArch64AsmParser::OperandMatchResultTy
1626 AArch64AsmParser::ParseFPImm0AndImm0Operand(
1627 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1629 SMLoc S = Parser.getTok().getLoc();
1632 if (Parser.getTok().is(AsmToken::Hash)) {
1633 Parser.Lex(); // Eat '#'
1637 APFloat RealVal(0.0);
1638 if (Parser.getTok().is(AsmToken::Real)) {
1639 if(Parser.getTok().getString() != "0.0") {
1640 Error(S, "only #0.0 is acceptable as immediate");
1641 return MatchOperand_ParseFail;
1644 else if (Parser.getTok().is(AsmToken::Integer)) {
1645 if(Parser.getTok().getIntVal() != 0) {
1646 Error(S, "only #0.0 is acceptable as immediate");
1647 return MatchOperand_ParseFail;
1652 return MatchOperand_NoMatch;
1653 Error(S, "only #0.0 is acceptable as immediate");
1654 return MatchOperand_ParseFail;
1657 Parser.Lex(); // Eat real number
1658 SMLoc E = Parser.getTok().getLoc();
1660 Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
1661 return MatchOperand_Success;
1664 // Automatically generated
1665 static unsigned MatchRegisterName(StringRef Name);
1668 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1670 SMLoc &LayoutLoc) const {
1671 const AsmToken &Tok = Parser.getTok();
1673 if (Tok.isNot(AsmToken::Identifier))
1676 std::string LowerReg = Tok.getString().lower();
1677 size_t DotPos = LowerReg.find('.');
1679 bool IsVec128 = false;
1680 SMLoc S = Tok.getLoc();
1681 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1683 if (DotPos == std::string::npos) {
1684 Layout = StringRef();
1686 // Everything afterwards needs to be a literal token, expected to be
1687 // '.2d','.b' etc for vector registers.
1689 // This StringSwitch validates the input and (perhaps more importantly)
1690 // gives us a permanent string to use in the token (a pointer into LowerReg
1691 // would go out of scope when we return).
1692 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1693 StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1695 // See if it's a 128-bit layout first.
1696 Layout = StringSwitch<const char *>(LayoutText)
1697 .Case(".q", ".q").Case(".1q", ".1q")
1698 .Case(".d", ".d").Case(".2d", ".2d")
1699 .Case(".s", ".s").Case(".4s", ".4s")
1700 .Case(".h", ".h").Case(".8h", ".8h")
1701 .Case(".b", ".b").Case(".16b", ".16b")
1704 if (Layout.size() != 0)
1707 Layout = StringSwitch<const char *>(LayoutText)
1715 if (Layout.size() == 0) {
1716 // If we've still not pinned it down the register is malformed.
1721 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1722 if (RegNum == AArch64::NoRegister) {
1723 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1724 .Case("ip0", AArch64::X16)
1725 .Case("ip1", AArch64::X17)
1726 .Case("fp", AArch64::X29)
1727 .Case("lr", AArch64::X30)
1728 .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1729 .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1730 .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1731 .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1732 .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1733 .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1734 .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1735 .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1736 .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1737 .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1738 .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1739 .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1740 .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1741 .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1742 .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1743 .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1744 .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1745 .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1746 .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1747 .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1748 .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1749 .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1750 .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1751 .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1752 .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1753 .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1754 .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1755 .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1756 .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1757 .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1758 .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1759 .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1760 .Default(AArch64::NoRegister);
1762 if (RegNum == AArch64::NoRegister)
1768 AArch64AsmParser::OperandMatchResultTy
1769 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1770 uint32_t &NumLanes) {
1773 SMLoc RegEndLoc, LayoutLoc;
1774 SMLoc S = Parser.getTok().getLoc();
1776 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1777 return MatchOperand_NoMatch;
1779 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1781 if (Layout.size() != 0) {
1782 unsigned long long TmpLanes = 0;
1783 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1784 if (TmpLanes != 0) {
1785 NumLanes = TmpLanes;
1787 // If the number of lanes isn't specified explicitly, a valid instruction
1788 // will have an element specifier and be capable of acting on the entire
1790 switch (Layout.back()) {
1791 default: llvm_unreachable("Invalid layout specifier");
1792 case 'b': NumLanes = 16; break;
1793 case 'h': NumLanes = 8; break;
1794 case 's': NumLanes = 4; break;
1795 case 'd': NumLanes = 2; break;
1796 case 'q': NumLanes = 1; break;
1800 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1804 return MatchOperand_Success;
1808 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1810 // This callback is used for things like DWARF frame directives in
1811 // assembly. They don't care about things like NEON layouts or lanes, they
1812 // just want to be able to produce the DWARF register number.
1813 StringRef LayoutSpec;
1814 SMLoc RegEndLoc, LayoutLoc;
1815 StartLoc = Parser.getTok().getLoc();
1817 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1821 EndLoc = Parser.getTok().getLoc();
1826 AArch64AsmParser::OperandMatchResultTy
1827 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1828 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1829 // Since these operands occur in very limited circumstances, without
1830 // alternatives, we actually signal an error if there is no match. If relaxing
1831 // this, beware of unintended consequences: an immediate will be accepted
1832 // during matching, no matter how it gets into the AArch64Operand.
1833 const AsmToken &Tok = Parser.getTok();
1834 SMLoc S = Tok.getLoc();
1836 if (Tok.is(AsmToken::Identifier)) {
1838 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1841 Error(S, "operand specifier not recognised");
1842 return MatchOperand_ParseFail;
1845 Parser.Lex(); // We're done with the identifier. Eat it
1847 SMLoc E = Parser.getTok().getLoc();
1848 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1849 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1850 return MatchOperand_Success;
1851 } else if (Tok.is(AsmToken::Hash)) {
1854 const MCExpr *ImmVal;
1855 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1856 return MatchOperand_ParseFail;
1858 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1859 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1860 Error(S, "Invalid immediate for instruction");
1861 return MatchOperand_ParseFail;
1864 SMLoc E = Parser.getTok().getLoc();
1865 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1866 return MatchOperand_Success;
1869 Error(S, "unexpected operand for instruction");
1870 return MatchOperand_ParseFail;
1873 AArch64AsmParser::OperandMatchResultTy
1874 AArch64AsmParser::ParseSysRegOperand(
1875 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1876 const AsmToken &Tok = Parser.getTok();
1878 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1879 // kind of string: SPSel is valid for two different forms of MSR with two
1880 // different encodings. There's no collision at the moment, but the potential
1882 if (!Tok.is(AsmToken::Identifier)) {
1883 return MatchOperand_NoMatch;
1886 SMLoc S = Tok.getLoc();
1887 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1888 Parser.Lex(); // Eat identifier
1890 return MatchOperand_Success;
1893 AArch64AsmParser::OperandMatchResultTy
1894 AArch64AsmParser::ParseLSXAddressOperand(
1895 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1896 SMLoc S = Parser.getTok().getLoc();
1899 SMLoc RegEndLoc, LayoutLoc;
1901 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1902 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1903 || Layout.size() != 0) {
1904 // Check Layout.size because we don't want to let "x3.4s" or similar
1906 return MatchOperand_NoMatch;
1908 Parser.Lex(); // Eat register
1910 if (Parser.getTok().is(AsmToken::RBrac)) {
1912 SMLoc E = Parser.getTok().getLoc();
1913 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1914 return MatchOperand_Success;
1917 // Otherwise, only ", #0" is valid
1919 if (Parser.getTok().isNot(AsmToken::Comma)) {
1920 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1921 return MatchOperand_ParseFail;
1923 Parser.Lex(); // Eat ','
1925 if (Parser.getTok().isNot(AsmToken::Hash)) {
1926 Error(Parser.getTok().getLoc(), "expected '#0'");
1927 return MatchOperand_ParseFail;
1929 Parser.Lex(); // Eat '#'
1931 if (Parser.getTok().isNot(AsmToken::Integer)
1932 || Parser.getTok().getIntVal() != 0 ) {
1933 Error(Parser.getTok().getLoc(), "expected '#0'");
1934 return MatchOperand_ParseFail;
1936 Parser.Lex(); // Eat '0'
1938 SMLoc E = Parser.getTok().getLoc();
1939 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1940 return MatchOperand_Success;
1943 AArch64AsmParser::OperandMatchResultTy
1944 AArch64AsmParser::ParseShiftExtend(
1945 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1946 StringRef IDVal = Parser.getTok().getIdentifier();
1947 std::string LowerID = IDVal.lower();
1949 A64SE::ShiftExtSpecifiers Spec =
1950 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1951 .Case("lsl", A64SE::LSL)
1952 .Case("msl", A64SE::MSL)
1953 .Case("lsr", A64SE::LSR)
1954 .Case("asr", A64SE::ASR)
1955 .Case("ror", A64SE::ROR)
1956 .Case("uxtb", A64SE::UXTB)
1957 .Case("uxth", A64SE::UXTH)
1958 .Case("uxtw", A64SE::UXTW)
1959 .Case("uxtx", A64SE::UXTX)
1960 .Case("sxtb", A64SE::SXTB)
1961 .Case("sxth", A64SE::SXTH)
1962 .Case("sxtw", A64SE::SXTW)
1963 .Case("sxtx", A64SE::SXTX)
1964 .Default(A64SE::Invalid);
1966 if (Spec == A64SE::Invalid)
1967 return MatchOperand_NoMatch;
1971 S = Parser.getTok().getLoc();
1974 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1975 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1976 // The shift amount can be omitted for the extending versions, but not real
1978 // add x0, x0, x0, uxtb
1979 // is valid, and equivalent to
1980 // add x0, x0, x0, uxtb #0
1982 if (Parser.getTok().is(AsmToken::Comma) ||
1983 Parser.getTok().is(AsmToken::EndOfStatement) ||
1984 Parser.getTok().is(AsmToken::RBrac)) {
1985 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1987 return MatchOperand_Success;
1991 // Eat # at beginning of immediate
1992 if (!Parser.getTok().is(AsmToken::Hash)) {
1993 Error(Parser.getTok().getLoc(),
1994 "expected #imm after shift specifier");
1995 return MatchOperand_ParseFail;
1999 // Make sure we do actually have a number
2000 if (!Parser.getTok().is(AsmToken::Integer)) {
2001 Error(Parser.getTok().getLoc(),
2002 "expected integer shift amount");
2003 return MatchOperand_ParseFail;
2005 unsigned Amount = Parser.getTok().getIntVal();
2007 E = Parser.getTok().getLoc();
2009 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
2012 return MatchOperand_Success;
2015 /// Try to parse a vector register token, If it is a vector register,
2016 /// the token is eaten and return true. Otherwise return false.
2017 bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
2018 StringRef &Layout, SMLoc &LayoutLoc) {
2019 bool IsVector = true;
2021 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
2023 else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
2024 .contains(RegNum) &&
2025 !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
2028 else if (Layout.size() == 0)
2032 Error(Parser.getTok().getLoc(), "expected vector type register");
2034 Parser.Lex(); // Eat this token.
2039 // A vector list contains 1-4 consecutive registers.
2040 // Now there are two kinds of vector list when number of vector > 1:
2041 // (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
2042 // (2) {Vn.layout - Vm.layout}
2043 // If the layout is like .b/.h/.s/.d, also parse the lane.
2044 AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
2045 SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
2046 if (Parser.getTok().isNot(AsmToken::LCurly)) {
2047 Error(Parser.getTok().getLoc(), "'{' expected");
2048 return MatchOperand_ParseFail;
2050 SMLoc SLoc = Parser.getTok().getLoc();
2051 Parser.Lex(); // Eat '{' token.
2053 unsigned Reg, Count = 1;
2054 StringRef LayoutStr;
2055 SMLoc RegEndLoc, LayoutLoc;
2056 if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2057 return MatchOperand_ParseFail;
2059 if (Parser.getTok().is(AsmToken::Minus)) {
2060 Parser.Lex(); // Eat the minus.
2063 StringRef LayoutStr2;
2064 SMLoc RegEndLoc2, LayoutLoc2;
2065 SMLoc RegLoc2 = Parser.getTok().getLoc();
2067 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2068 return MatchOperand_ParseFail;
2069 unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2071 if (LayoutStr != LayoutStr2) {
2072 Error(LayoutLoc2, "expected the same vector layout");
2073 return MatchOperand_ParseFail;
2075 if (Space == 0 || Space > 3) {
2076 Error(RegLoc2, "invalid number of vectors");
2077 return MatchOperand_ParseFail;
2082 unsigned LastReg = Reg;
2083 while (Parser.getTok().is(AsmToken::Comma)) {
2084 Parser.Lex(); // Eat the comma.
2086 StringRef LayoutStr2;
2087 SMLoc RegEndLoc2, LayoutLoc2;
2088 SMLoc RegLoc2 = Parser.getTok().getLoc();
2090 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2091 return MatchOperand_ParseFail;
2092 unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2093 : (Reg2 + 32 - LastReg);
2096 // The space between two vectors should be 1. And they should have the same layout.
2097 // Total count shouldn't be great than 4
2099 Error(RegLoc2, "invalid space between two vectors");
2100 return MatchOperand_ParseFail;
2102 if (LayoutStr != LayoutStr2) {
2103 Error(LayoutLoc2, "expected the same vector layout");
2104 return MatchOperand_ParseFail;
2107 Error(RegLoc2, "invalid number of vectors");
2108 return MatchOperand_ParseFail;
2115 if (Parser.getTok().isNot(AsmToken::RCurly)) {
2116 Error(Parser.getTok().getLoc(), "'}' expected");
2117 return MatchOperand_ParseFail;
2119 SMLoc ELoc = Parser.getTok().getLoc();
2120 Parser.Lex(); // Eat '}' token.
2122 A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2123 if (Count > 1) { // If count > 1, create vector list using super register.
2124 bool IsVec64 = (Layout < A64Layout::VL_16B);
2125 static unsigned SupRegIDs[3][2] = {
2126 { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2127 { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2128 { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2130 unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2131 unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2132 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2133 Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2134 &AArch64MCRegisterClasses[SupRegID]);
2137 AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2139 if (Parser.getTok().is(AsmToken::LBrac)) {
2140 uint32_t NumLanes = 0;
2142 case A64Layout::VL_B : NumLanes = 16; break;
2143 case A64Layout::VL_H : NumLanes = 8; break;
2144 case A64Layout::VL_S : NumLanes = 4; break;
2145 case A64Layout::VL_D : NumLanes = 2; break;
2147 SMLoc Loc = getLexer().getLoc();
2148 Error(Loc, "expected comma before next operand");
2149 return MatchOperand_ParseFail;
2151 return ParseNEONLane(Operands, NumLanes);
2153 return MatchOperand_Success;
2157 // FIXME: We would really like to be able to tablegen'erate this.
2158 bool AArch64AsmParser::
2159 validateInstruction(MCInst &Inst,
2160 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2161 switch (Inst.getOpcode()) {
2162 case AArch64::BFIwwii:
2163 case AArch64::BFIxxii:
2164 case AArch64::SBFIZwwii:
2165 case AArch64::SBFIZxxii:
2166 case AArch64::UBFIZwwii:
2167 case AArch64::UBFIZxxii: {
2168 unsigned ImmOps = Inst.getNumOperands() - 2;
2169 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2170 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2172 if (ImmR != 0 && ImmS >= ImmR) {
2173 return Error(Operands[4]->getStartLoc(),
2174 "requested insert overflows register");
2178 case AArch64::BFXILwwii:
2179 case AArch64::BFXILxxii:
2180 case AArch64::SBFXwwii:
2181 case AArch64::SBFXxxii:
2182 case AArch64::UBFXwwii:
2183 case AArch64::UBFXxxii: {
2184 unsigned ImmOps = Inst.getNumOperands() - 2;
2185 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2186 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2187 int64_t RegWidth = 0;
2188 switch (Inst.getOpcode()) {
2189 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2192 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2197 if (ImmS >= RegWidth || ImmS < ImmR) {
2198 return Error(Operands[4]->getStartLoc(),
2199 "requested extract overflows register");
2203 case AArch64::ICix: {
2204 int64_t ImmVal = Inst.getOperand(0).getImm();
2205 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2206 if (!A64IC::NeedsRegister(ICOp)) {
2207 return Error(Operands[1]->getStartLoc(),
2208 "specified IC op does not use a register");
2212 case AArch64::ICi: {
2213 int64_t ImmVal = Inst.getOperand(0).getImm();
2214 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2215 if (A64IC::NeedsRegister(ICOp)) {
2216 return Error(Operands[1]->getStartLoc(),
2217 "specified IC op requires a register");
2221 case AArch64::TLBIix: {
2222 int64_t ImmVal = Inst.getOperand(0).getImm();
2223 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2224 if (!A64TLBI::NeedsRegister(TLBIOp)) {
2225 return Error(Operands[1]->getStartLoc(),
2226 "specified TLBI op does not use a register");
2230 case AArch64::TLBIi: {
2231 int64_t ImmVal = Inst.getOperand(0).getImm();
2232 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2233 if (A64TLBI::NeedsRegister(TLBIOp)) {
2234 return Error(Operands[1]->getStartLoc(),
2235 "specified TLBI op requires a register");
2245 // Parses the instruction *together with* all operands, appending each parsed
2246 // operand to the "Operands" list
2247 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2248 StringRef Name, SMLoc NameLoc,
2249 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2250 StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
2251 .Case("beq", "b.eq")
2252 .Case("bne", "b.ne")
2253 .Case("bhs", "b.hs")
2254 .Case("bcs", "b.cs")
2255 .Case("blo", "b.lo")
2256 .Case("bcc", "b.cc")
2257 .Case("bmi", "b.mi")
2258 .Case("bpl", "b.pl")
2259 .Case("bvs", "b.vs")
2260 .Case("bvc", "b.vc")
2261 .Case("bhi", "b.hi")
2262 .Case("bls", "b.ls")
2263 .Case("bge", "b.ge")
2264 .Case("blt", "b.lt")
2265 .Case("bgt", "b.gt")
2266 .Case("ble", "b.le")
2267 .Case("bal", "b.al")
2268 .Case("bnv", "b.nv")
2271 size_t CondCodePos = PatchedName.find('.');
2273 StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
2274 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2276 if (CondCodePos != StringRef::npos) {
2277 // We have a condition code
2278 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2279 StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
2280 A64CC::CondCodes Code;
2282 Code = A64StringToCondCode(CondStr);
2284 if (Code == A64CC::Invalid) {
2285 Error(S, "invalid condition code");
2286 Parser.eatToEndOfStatement();
2290 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2292 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
2293 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2294 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2297 // Now we parse the operands of this instruction
2298 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2299 // Read the first operand.
2300 if (ParseOperand(Operands, Mnemonic)) {
2301 Parser.eatToEndOfStatement();
2305 while (getLexer().is(AsmToken::Comma)) {
2306 Parser.Lex(); // Eat the comma.
2308 // Parse and remember the operand.
2309 if (ParseOperand(Operands, Mnemonic)) {
2310 Parser.eatToEndOfStatement();
2315 // After successfully parsing some operands there are two special cases to
2316 // consider (i.e. notional operands not separated by commas). Both are due
2317 // to memory specifiers:
2318 // + An RBrac will end an address for load/store/prefetch
2319 // + An '!' will indicate a pre-indexed operation.
2321 // It's someone else's responsibility to make sure these tokens are sane
2322 // in the given context!
2323 if (Parser.getTok().is(AsmToken::RBrac)) {
2324 SMLoc Loc = Parser.getTok().getLoc();
2325 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2329 if (Parser.getTok().is(AsmToken::Exclaim)) {
2330 SMLoc Loc = Parser.getTok().getLoc();
2331 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2337 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2338 SMLoc Loc = getLexer().getLoc();
2339 Parser.eatToEndOfStatement();
2340 return Error(Loc, "expected comma before next operand");
2343 // Eat the EndOfStatement
2349 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2350 StringRef IDVal = DirectiveID.getIdentifier();
2351 if (IDVal == ".hword")
2352 return ParseDirectiveWord(2, DirectiveID.getLoc());
2353 else if (IDVal == ".word")
2354 return ParseDirectiveWord(4, DirectiveID.getLoc());
2355 else if (IDVal == ".xword")
2356 return ParseDirectiveWord(8, DirectiveID.getLoc());
2357 else if (IDVal == ".tlsdesccall")
2358 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2363 /// parseDirectiveWord
2364 /// ::= .word [ expression (, expression)* ]
2365 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2366 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2368 const MCExpr *Value;
2369 if (getParser().parseExpression(Value))
2372 getParser().getStreamer().EmitValue(Value, Size);
2374 if (getLexer().is(AsmToken::EndOfStatement))
2377 // FIXME: Improve diagnostic.
2378 if (getLexer().isNot(AsmToken::Comma)) {
2379 Error(L, "unexpected token in directive");
2390 // parseDirectiveTLSDescCall:
2391 // ::= .tlsdesccall symbol
2392 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2394 if (getParser().parseIdentifier(Name)) {
2395 Error(L, "expected symbol after directive");
2399 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2400 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2403 Inst.setOpcode(AArch64::TLSDESCCALL);
2404 Inst.addOperand(MCOperand::CreateExpr(Expr));
2406 getParser().getStreamer().EmitInstruction(Inst, STI);
2411 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2412 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2413 MCStreamer &Out, unsigned &ErrorInfo,
2414 bool MatchingInlineAsm) {
2416 unsigned MatchResult;
2417 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2420 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2421 return Error(IDLoc, "too few operands for instruction");
2423 switch (MatchResult) {
2426 if (validateInstruction(Inst, Operands))
2429 Out.EmitInstruction(Inst, STI);
2431 case Match_MissingFeature:
2432 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2434 case Match_InvalidOperand: {
2435 SMLoc ErrorLoc = IDLoc;
2436 if (ErrorInfo != ~0U) {
2437 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2438 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2441 return Error(ErrorLoc, "invalid operand for instruction");
2443 case Match_MnemonicFail:
2444 return Error(IDLoc, "invalid instruction");
2446 case Match_AddSubRegExtendSmall:
2447 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2448 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2449 case Match_AddSubRegExtendLarge:
2450 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2451 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2452 case Match_AddSubRegShift32:
2453 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2454 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2455 case Match_AddSubRegShift64:
2456 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2457 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2458 case Match_AddSubSecondSource:
2459 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2460 "expected compatible register, symbol or integer in range [0, 4095]");
2461 case Match_CVTFixedPos32:
2462 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2463 "expected integer in range [1, 32]");
2464 case Match_CVTFixedPos64:
2465 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2466 "expected integer in range [1, 64]");
2467 case Match_CondCode:
2468 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2469 "expected AArch64 condition code");
2471 // Any situation which allows a nontrivial floating-point constant also
2472 // allows a register.
2473 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2474 "expected compatible register or floating-point constant");
2476 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2477 "expected floating-point constant #0.0 or invalid register type");
2479 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2480 "expected label or encodable integer pc offset");
2482 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2483 "expected lane specifier '[1]'");
2484 case Match_LoadStoreExtend32_1:
2485 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2486 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2487 case Match_LoadStoreExtend32_2:
2488 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2489 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2490 case Match_LoadStoreExtend32_4:
2491 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2492 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2493 case Match_LoadStoreExtend32_8:
2494 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2495 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2496 case Match_LoadStoreExtend32_16:
2497 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2498 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2499 case Match_LoadStoreExtend64_1:
2500 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2501 "expected 'lsl' or 'sxtx' with optional shift of #0");
2502 case Match_LoadStoreExtend64_2:
2503 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2504 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2505 case Match_LoadStoreExtend64_4:
2506 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2507 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2508 case Match_LoadStoreExtend64_8:
2509 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2510 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2511 case Match_LoadStoreExtend64_16:
2512 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2513 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2514 case Match_LoadStoreSImm7_4:
2515 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2516 "expected integer multiple of 4 in range [-256, 252]");
2517 case Match_LoadStoreSImm7_8:
2518 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2519 "expected integer multiple of 8 in range [-512, 504]");
2520 case Match_LoadStoreSImm7_16:
2521 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2522 "expected integer multiple of 16 in range [-1024, 1008]");
2523 case Match_LoadStoreSImm9:
2524 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2525 "expected integer in range [-256, 255]");
2526 case Match_LoadStoreUImm12_1:
2527 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2528 "expected symbolic reference or integer in range [0, 4095]");
2529 case Match_LoadStoreUImm12_2:
2530 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2531 "expected symbolic reference or integer in range [0, 8190]");
2532 case Match_LoadStoreUImm12_4:
2533 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2534 "expected symbolic reference or integer in range [0, 16380]");
2535 case Match_LoadStoreUImm12_8:
2536 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2537 "expected symbolic reference or integer in range [0, 32760]");
2538 case Match_LoadStoreUImm12_16:
2539 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2540 "expected symbolic reference or integer in range [0, 65520]");
2541 case Match_LogicalSecondSource:
2542 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2543 "expected compatible register or logical immediate");
2544 case Match_MOVWUImm16:
2545 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2546 "expected relocated symbol or integer in range [0, 65535]");
2548 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2549 "expected readable system register");
2551 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2552 "expected writable system register or pstate");
2553 case Match_NamedImm_at:
2554 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2555 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2556 case Match_NamedImm_dbarrier:
2557 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2558 "expected integer in range [0, 15] or symbolic barrier operand");
2559 case Match_NamedImm_dc:
2560 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2561 "expected symbolic 'dc' operand");
2562 case Match_NamedImm_ic:
2563 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2564 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2565 case Match_NamedImm_isb:
2566 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2567 "expected integer in range [0, 15] or 'sy'");
2568 case Match_NamedImm_prefetch:
2569 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2570 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2571 case Match_NamedImm_tlbi:
2572 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2573 "expected translation buffer invalidation operand");
2575 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2576 "expected integer in range [0, 65535]");
2578 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2579 "expected integer in range [0, 7]");
2581 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2582 "expected integer in range [0, 15]");
2584 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2585 "expected integer in range [0, 31]");
2587 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2588 "expected integer in range [0, 63]");
2590 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2591 "expected integer in range [0, 127]");
2593 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2594 "expected integer in range [<lsb>, 31]");
2596 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2597 "expected integer in range [<lsb>, 63]");
2599 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2600 "expected integer in range [1, 8]");
2601 case Match_ShrImm16:
2602 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2603 "expected integer in range [1, 16]");
2604 case Match_ShrImm32:
2605 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2606 "expected integer in range [1, 32]");
2607 case Match_ShrImm64:
2608 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2609 "expected integer in range [1, 64]");
2611 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2612 "expected integer in range [0, 7]");
2613 case Match_ShlImm16:
2614 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2615 "expected integer in range [0, 15]");
2616 case Match_ShlImm32:
2617 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2618 "expected integer in range [0, 31]");
2619 case Match_ShlImm64:
2620 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2621 "expected integer in range [0, 63]");
2624 llvm_unreachable("Implement any new match types added!");
2628 void AArch64Operand::print(raw_ostream &OS) const {
2631 OS << "<CondCode: " << CondCode.Code << ">";
2634 OS << "<fpimm: " << FPImm.Val << ">";
2637 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2638 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2641 getImm()->print(OS);
2644 OS << "<register " << getReg() << '>';
2647 OS << '\'' << getToken() << '\'';
2650 OS << "<shift: type=" << ShiftExtend.ShiftType
2651 << ", amount=" << ShiftExtend.Amount << ">";
2654 StringRef Name(SysReg.Data, SysReg.Length);
2655 OS << "<sysreg: " << Name << '>';
2659 llvm_unreachable("No idea how to print this kind of operand");
2664 void AArch64Operand::dump() const {
2669 /// Force static initialization.
2670 extern "C" void LLVMInitializeAArch64AsmParser() {
2671 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
2672 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
2675 #define GET_REGISTER_MATCHER
2676 #define GET_MATCHER_IMPLEMENTATION
2677 #include "AArch64GenAsmMatcher.inc"