1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCParser/MCAsmLexer.h"
27 #include "llvm/MC/MCParser/MCAsmParser.h"
28 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCStreamer.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCTargetAsmParser.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58 const MCInstrInfo &MII)
59 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60 MCAsmParserExtension::Initialize(_Parser);
62 // Initialize the set of available features.
63 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
66 // These are the public interface of the MCTargetAsmParser
67 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
70 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
72 bool ParseDirective(AsmToken DirectiveID);
73 bool ParseDirectiveTLSDescCall(SMLoc L);
74 bool ParseDirectiveWord(unsigned Size, SMLoc L);
76 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78 MCStreamer&Out, unsigned &ErrorInfo,
79 bool MatchingInlineAsm);
81 // The rest of the sub-parsers have more freedom over interface: they return
82 // an OperandMatchResultTy because it's less ambiguous than true/false or
83 // -1/0/1 even if it is more verbose
85 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
88 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
90 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
93 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
101 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
113 ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
115 template<typename SomeNamedImmMapper> OperandMatchResultTy
116 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
117 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
121 ParseNamedImmOperand(const NamedImmMapper &Mapper,
122 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
131 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
133 bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
136 OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
138 bool validateInstruction(MCInst &Inst,
139 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
141 /// Scan the next token (which had better be an identifier) and determine
142 /// whether it represents a general-purpose or vector register. It returns
143 /// true if an identifier was found and populates its reference arguments. It
144 /// does not consume the token.
146 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
147 SMLoc &LayoutLoc) const;
155 /// Instances of this class represent a parsed AArch64 machine instruction.
156 class AArch64Operand : public MCParsedAsmOperand {
159 k_ImmWithLSL, // #uimm {, LSL #amt }
160 k_CondCode, // eq/ne/...
161 k_FPImmediate, // Limited-precision floating-point imm
162 k_Immediate, // Including expressions referencing symbols
165 k_VectorList, // A sequential list of 1 to 4 registers.
166 k_SysReg, // The register operand of MRS and MSR instructions
167 k_Token, // The mnemonic; other raw tokens the auto-generated
168 k_WrappedRegister // Load/store exclusive permit a wrapped register.
171 SMLoc StartLoc, EndLoc;
173 struct ImmWithLSLOp {
175 unsigned ShiftAmount;
180 A64CC::CondCodes Code;
195 struct ShiftExtendOp {
196 A64SE::ShiftExtSpecifiers ShiftType;
201 // A vector register list is a sequential list of 1 to 4 registers.
202 struct VectorListOp {
205 A64Layout::VectorLayout Layout;
219 struct ImmWithLSLOp ImmWithLSL;
220 struct CondCodeOp CondCode;
221 struct FPImmOp FPImm;
224 struct ShiftExtendOp ShiftExtend;
225 struct VectorListOp VectorList;
226 struct SysRegOp SysReg;
230 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
231 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
234 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
237 SMLoc getStartLoc() const { return StartLoc; }
238 SMLoc getEndLoc() const { return EndLoc; }
239 void print(raw_ostream&) const;
242 StringRef getToken() const {
243 assert(Kind == k_Token && "Invalid access!");
244 return StringRef(Tok.Data, Tok.Length);
247 unsigned getReg() const {
248 assert((Kind == k_Register || Kind == k_WrappedRegister)
249 && "Invalid access!");
253 const MCExpr *getImm() const {
254 assert(Kind == k_Immediate && "Invalid access!");
258 A64CC::CondCodes getCondCode() const {
259 assert(Kind == k_CondCode && "Invalid access!");
260 return CondCode.Code;
263 static bool isNonConstantExpr(const MCExpr *E,
264 AArch64MCExpr::VariantKind &Variant) {
265 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
266 Variant = A64E->getKind();
268 } else if (!isa<MCConstantExpr>(E)) {
269 Variant = AArch64MCExpr::VK_AARCH64_None;
276 bool isCondCode() const { return Kind == k_CondCode; }
277 bool isToken() const { return Kind == k_Token; }
278 bool isReg() const { return Kind == k_Register; }
279 bool isImm() const { return Kind == k_Immediate; }
280 bool isMem() const { return false; }
281 bool isFPImm() const { return Kind == k_FPImmediate; }
282 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
283 bool isSysReg() const { return Kind == k_SysReg; }
284 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
285 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
287 bool isAddSubImmLSL0() const {
288 if (!isImmWithLSL()) return false;
289 if (ImmWithLSL.ShiftAmount != 0) return false;
291 AArch64MCExpr::VariantKind Variant;
292 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
293 return Variant == AArch64MCExpr::VK_AARCH64_LO12
294 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
295 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
296 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
297 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
298 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
301 // Otherwise it should be a real immediate in range:
302 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
303 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
306 bool isAddSubImmLSL12() const {
307 if (!isImmWithLSL()) return false;
308 if (ImmWithLSL.ShiftAmount != 12) return false;
310 AArch64MCExpr::VariantKind Variant;
311 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
312 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
313 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
316 // Otherwise it should be a real immediate in range:
317 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
318 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
321 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
322 if (!isShiftOrExtend()) return false;
324 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
325 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
328 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
331 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
334 bool isAdrpLabel() const {
335 if (!isImm()) return false;
337 AArch64MCExpr::VariantKind Variant;
338 if (isNonConstantExpr(getImm(), Variant)) {
339 return Variant == AArch64MCExpr::VK_AARCH64_None
340 || Variant == AArch64MCExpr::VK_AARCH64_GOT
341 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
342 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
345 return isLabel<21, 4096>();
348 template<unsigned RegWidth> bool isBitfieldWidth() const {
349 if (!isImm()) return false;
351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
352 if (!CE) return false;
354 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
357 template<int RegWidth>
358 bool isCVTFixedPos() const {
359 if (!isImm()) return false;
361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
362 if (!CE) return false;
364 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
367 bool isFMOVImm() const {
368 if (!isFPImm()) return false;
370 APFloat RealVal(FPImm.Val);
372 return A64Imms::isFPImm(RealVal, ImmVal);
375 bool isFPZero() const {
376 if (!isFPImm()) return false;
378 APFloat RealVal(FPImm.Val);
379 return RealVal.isPosZero();
382 template<unsigned field_width, unsigned scale>
383 bool isLabel() const {
384 if (!isImm()) return false;
386 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
388 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
389 int64_t Val = CE->getValue();
390 int64_t Min = - (scale * (1LL << (field_width - 1)));
391 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
392 return (Val % scale) == 0 && Val >= Min && Val <= Max;
395 // N.b. this disallows explicit relocation specifications via an
396 // AArch64MCExpr. Users needing that behaviour
400 bool isLane1() const {
401 if (!isImm()) return false;
403 // Because it's come through custom assembly parsing, it must always be a
404 // constant expression.
405 return cast<MCConstantExpr>(getImm())->getValue() == 1;
408 bool isLoadLitLabel() const {
409 if (!isImm()) return false;
411 AArch64MCExpr::VariantKind Variant;
412 if (isNonConstantExpr(getImm(), Variant)) {
413 return Variant == AArch64MCExpr::VK_AARCH64_None
414 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
417 return isLabel<19, 4>();
420 template<unsigned RegWidth> bool isLogicalImm() const {
421 if (!isImm()) return false;
423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
424 if (!CE) return false;
427 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
430 template<unsigned RegWidth> bool isLogicalImmMOV() const {
431 if (!isLogicalImm<RegWidth>()) return false;
433 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
435 // The move alias for ORR is only valid if the immediate cannot be
436 // represented with a move (immediate) instruction; they take priority.
438 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
439 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
442 template<int MemSize>
443 bool isOffsetUImm12() const {
444 if (!isImm()) return false;
446 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
448 // Assume they know what they're doing for now if they've given us a
449 // non-constant expression. In principle we could check for ridiculous
450 // things that can't possibly work or relocations that would almost
451 // certainly break resulting code.
455 int64_t Val = CE->getValue();
457 // Must be a multiple of the access size in bytes.
458 if ((Val & (MemSize - 1)) != 0) return false;
460 // Must be 12-bit unsigned
461 return Val >= 0 && Val <= 0xfff * MemSize;
464 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
465 bool isShift() const {
466 if (!isShiftOrExtend()) return false;
468 if (ShiftExtend.ShiftType != SHKind)
471 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
474 bool isMOVN32Imm() const {
475 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
476 AArch64MCExpr::VK_AARCH64_SABS_G0,
477 AArch64MCExpr::VK_AARCH64_SABS_G1,
478 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
479 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
480 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
481 AArch64MCExpr::VK_AARCH64_TPREL_G1,
482 AArch64MCExpr::VK_AARCH64_TPREL_G0,
484 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
486 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
489 bool isMOVN64Imm() const {
490 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
491 AArch64MCExpr::VK_AARCH64_SABS_G0,
492 AArch64MCExpr::VK_AARCH64_SABS_G1,
493 AArch64MCExpr::VK_AARCH64_SABS_G2,
494 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
495 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
496 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
497 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
498 AArch64MCExpr::VK_AARCH64_TPREL_G2,
499 AArch64MCExpr::VK_AARCH64_TPREL_G1,
500 AArch64MCExpr::VK_AARCH64_TPREL_G0,
502 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
504 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
508 bool isMOVZ32Imm() const {
509 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
510 AArch64MCExpr::VK_AARCH64_ABS_G0,
511 AArch64MCExpr::VK_AARCH64_ABS_G1,
512 AArch64MCExpr::VK_AARCH64_SABS_G0,
513 AArch64MCExpr::VK_AARCH64_SABS_G1,
514 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
515 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
516 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
517 AArch64MCExpr::VK_AARCH64_TPREL_G1,
518 AArch64MCExpr::VK_AARCH64_TPREL_G0,
520 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
522 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
525 bool isMOVZ64Imm() const {
526 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
527 AArch64MCExpr::VK_AARCH64_ABS_G0,
528 AArch64MCExpr::VK_AARCH64_ABS_G1,
529 AArch64MCExpr::VK_AARCH64_ABS_G2,
530 AArch64MCExpr::VK_AARCH64_ABS_G3,
531 AArch64MCExpr::VK_AARCH64_SABS_G0,
532 AArch64MCExpr::VK_AARCH64_SABS_G1,
533 AArch64MCExpr::VK_AARCH64_SABS_G2,
534 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
535 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
536 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
537 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
538 AArch64MCExpr::VK_AARCH64_TPREL_G2,
539 AArch64MCExpr::VK_AARCH64_TPREL_G1,
540 AArch64MCExpr::VK_AARCH64_TPREL_G0,
542 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
544 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
547 bool isMOVK32Imm() const {
548 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
549 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
550 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
551 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
552 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
553 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
554 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
555 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
557 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
559 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
562 bool isMOVK64Imm() const {
563 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
564 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
565 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
566 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
567 AArch64MCExpr::VK_AARCH64_ABS_G3,
568 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
569 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
570 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
571 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
572 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
574 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
576 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
579 bool isMoveWideImm(unsigned RegWidth,
580 const AArch64MCExpr::VariantKind *PermittedModifiers,
581 unsigned NumModifiers) const {
582 if (!isImmWithLSL()) return false;
584 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
585 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
587 AArch64MCExpr::VariantKind Modifier;
588 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
589 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
590 if (!ImmWithLSL.ImplicitAmount) return false;
592 for (unsigned i = 0; i < NumModifiers; ++i)
593 if (PermittedModifiers[i] == Modifier) return true;
598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
599 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
602 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
603 bool isMoveWideMovAlias() const {
604 if (!isImm()) return false;
606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
607 if (!CE) return false;
610 uint64_t Value = CE->getValue();
612 // If this is a 32-bit instruction then all bits above 32 should be the
613 // same: either of these is fine because signed/unsigned values should be
615 if (RegWidth == 32) {
616 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
619 Value &= 0xffffffffULL;
622 return isValidImm(RegWidth, Value, UImm16, Shift);
625 bool isMSRWithReg() const {
626 if (!isSysReg()) return false;
628 bool IsKnownRegister;
629 StringRef Name(SysReg.Data, SysReg.Length);
630 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
632 return IsKnownRegister;
635 bool isMSRPState() const {
636 if (!isSysReg()) return false;
638 bool IsKnownRegister;
639 StringRef Name(SysReg.Data, SysReg.Length);
640 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
642 return IsKnownRegister;
646 if (!isSysReg()) return false;
648 // First check against specific MSR-only (write-only) registers
649 bool IsKnownRegister;
650 StringRef Name(SysReg.Data, SysReg.Length);
651 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
653 return IsKnownRegister;
656 bool isPRFM() const {
657 if (!isImm()) return false;
659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664 return CE->getValue() >= 0 && CE->getValue() <= 31;
667 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
668 if (!isShiftOrExtend()) return false;
670 if (ShiftExtend.ShiftType != SHKind)
673 return ShiftExtend.Amount <= 4;
676 bool isRegExtendLSL() const {
677 if (!isShiftOrExtend()) return false;
679 if (ShiftExtend.ShiftType != A64SE::LSL)
682 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
685 // if 0 < value <= w, return true
686 bool isShrFixedWidth(int w) const {
689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
692 int64_t Value = CE->getValue();
693 return Value > 0 && Value <= w;
696 bool isShrImm8() const { return isShrFixedWidth(8); }
698 bool isShrImm16() const { return isShrFixedWidth(16); }
700 bool isShrImm32() const { return isShrFixedWidth(32); }
702 bool isShrImm64() const { return isShrFixedWidth(64); }
704 // if 0 <= value < w, return true
705 bool isShlFixedWidth(int w) const {
708 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711 int64_t Value = CE->getValue();
712 return Value >= 0 && Value < w;
715 bool isShlImm8() const { return isShlFixedWidth(8); }
717 bool isShlImm16() const { return isShlFixedWidth(16); }
719 bool isShlImm32() const { return isShlFixedWidth(32); }
721 bool isShlImm64() const { return isShlFixedWidth(64); }
723 bool isNeonMovImmShiftLSL() const {
724 if (!isShiftOrExtend())
727 if (ShiftExtend.ShiftType != A64SE::LSL)
730 // Valid shift amount is 0, 8, 16 and 24.
731 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
734 bool isNeonMovImmShiftLSLH() const {
735 if (!isShiftOrExtend())
738 if (ShiftExtend.ShiftType != A64SE::LSL)
741 // Valid shift amount is 0 and 8.
742 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
745 bool isNeonMovImmShiftMSL() const {
746 if (!isShiftOrExtend())
749 if (ShiftExtend.ShiftType != A64SE::MSL)
752 // Valid shift amount is 8 and 16.
753 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
756 template <A64Layout::VectorLayout Layout, unsigned Count>
757 bool isVectorList() const {
758 return Kind == k_VectorList && VectorList.Layout == Layout &&
759 VectorList.Count == Count;
762 template <int MemSize> bool isSImm7Scaled() const {
766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
767 if (!CE) return false;
769 int64_t Val = CE->getValue();
770 if (Val % MemSize != 0) return false;
774 return Val >= -64 && Val < 64;
777 template<int BitWidth>
778 bool isSImm() const {
779 if (!isImm()) return false;
781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782 if (!CE) return false;
784 return CE->getValue() >= -(1LL << (BitWidth - 1))
785 && CE->getValue() < (1LL << (BitWidth - 1));
788 template<int bitWidth>
789 bool isUImm() const {
790 if (!isImm()) return false;
792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793 if (!CE) return false;
795 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
798 bool isUImm() const {
799 if (!isImm()) return false;
801 return isa<MCConstantExpr>(getImm());
804 bool isNeonUImm64Mask() const {
808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
812 uint64_t Value = CE->getValue();
814 // i64 value with each byte being either 0x00 or 0xff.
815 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
816 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
821 // if value == N, return true
823 bool isExactImm() const {
824 if (!isImm()) return false;
826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
827 if (!CE) return false;
829 return CE->getValue() == N;
832 bool isFPZeroIZero() const {
836 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
837 unsigned ShiftAmount,
840 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
841 Op->ImmWithLSL.Val = Val;
842 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
843 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
847 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
849 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
850 Op->CondCode.Code = Code;
854 static AArch64Operand *CreateFPImm(double Val,
856 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
861 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
862 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
867 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
868 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
869 Op->Reg.RegNum = RegNum;
873 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
874 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
875 Op->Reg.RegNum = RegNum;
879 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
883 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
884 Op->ShiftExtend.ShiftType = ShiftTyp;
885 Op->ShiftExtend.Amount = Amount;
886 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
890 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
891 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
892 Op->Tok.Data = Str.data();
893 Op->Tok.Length = Str.size();
897 static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
898 A64Layout::VectorLayout Layout,
900 AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
901 Op->VectorList.RegNum = RegNum;
902 Op->VectorList.Count = Count;
903 Op->VectorList.Layout = Layout;
909 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
910 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
911 Op->Tok.Data = Str.data();
912 Op->Tok.Length = Str.size();
917 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
918 // Add as immediates when possible.
919 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
920 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
922 Inst.addOperand(MCOperand::CreateExpr(Expr));
925 template<unsigned RegWidth>
926 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
927 assert(N == 1 && "Invalid number of operands!");
928 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
929 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
930 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
933 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
934 assert(N == 1 && "Invalid number of operands!");
935 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
936 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
939 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
940 assert(N == 1 && "Invalid number of operands!");
942 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
943 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
945 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
948 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
949 assert(N == 1 && "Invalid number of operands!");
950 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
953 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
954 assert(N == 1 && "Invalid number of operands!");
956 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
957 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
960 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
961 assert(N == 1 && "Invalid number of operands!");
963 APFloat RealVal(FPImm.Val);
965 A64Imms::isFPImm(RealVal, ImmVal);
967 Inst.addOperand(MCOperand::CreateImm(ImmVal));
970 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
971 assert(N == 1 && "Invalid number of operands");
972 Inst.addOperand(MCOperand::CreateImm(0));
975 void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
976 addFPZeroOperands(Inst, N);
979 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
980 assert(N == 1 && "Invalid number of operands!");
981 unsigned Encoded = A64InvertCondCode(getCondCode());
982 Inst.addOperand(MCOperand::CreateImm(Encoded));
985 void addRegOperands(MCInst &Inst, unsigned N) const {
986 assert(N == 1 && "Invalid number of operands!");
987 Inst.addOperand(MCOperand::CreateReg(getReg()));
990 void addImmOperands(MCInst &Inst, unsigned N) const {
991 assert(N == 1 && "Invalid number of operands!");
992 addExpr(Inst, getImm());
995 template<int MemSize>
996 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
997 assert(N == 1 && "Invalid number of operands!");
999 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1000 uint64_t Val = CE->getValue() / MemSize;
1001 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
1004 template<int BitWidth>
1005 void addSImmOperands(MCInst &Inst, unsigned N) const {
1006 assert(N == 1 && "Invalid number of operands!");
1008 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1009 uint64_t Val = CE->getValue();
1010 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
1013 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1014 assert (N == 1 && "Invalid number of operands!");
1016 addExpr(Inst, ImmWithLSL.Val);
1019 template<unsigned field_width, unsigned scale>
1020 void addLabelOperands(MCInst &Inst, unsigned N) const {
1021 assert(N == 1 && "Invalid number of operands!");
1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1026 addExpr(Inst, Imm.Val);
1030 int64_t Val = CE->getValue();
1031 assert(Val % scale == 0 && "Unaligned immediate in instruction");
1034 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1037 template<int MemSize>
1038 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1039 assert(N == 1 && "Invalid number of operands!");
1041 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1042 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1044 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1048 template<unsigned RegWidth>
1049 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1050 assert(N == 1 && "Invalid number of operands");
1051 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1054 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1056 Inst.addOperand(MCOperand::CreateImm(Bits));
1059 void addMRSOperands(MCInst &Inst, unsigned N) const {
1060 assert(N == 1 && "Invalid number of operands!");
1063 StringRef Name(SysReg.Data, SysReg.Length);
1064 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1066 Inst.addOperand(MCOperand::CreateImm(Bits));
1069 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1070 assert(N == 1 && "Invalid number of operands!");
1073 StringRef Name(SysReg.Data, SysReg.Length);
1074 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1076 Inst.addOperand(MCOperand::CreateImm(Bits));
1079 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1080 assert(N == 1 && "Invalid number of operands!");
1083 StringRef Name(SysReg.Data, SysReg.Length);
1084 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1086 Inst.addOperand(MCOperand::CreateImm(Bits));
1089 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1090 assert(N == 2 && "Invalid number of operands!");
1092 addExpr(Inst, ImmWithLSL.Val);
1094 AArch64MCExpr::VariantKind Variant;
1095 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1096 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1100 // We know it's relocated
1102 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1103 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1104 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1105 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1106 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1107 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1108 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1109 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1110 Inst.addOperand(MCOperand::CreateImm(0));
1112 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1113 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1114 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1115 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1116 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1117 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1118 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1119 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1120 Inst.addOperand(MCOperand::CreateImm(1));
1122 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1123 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1124 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1125 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1126 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1127 Inst.addOperand(MCOperand::CreateImm(2));
1129 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1130 Inst.addOperand(MCOperand::CreateImm(3));
1132 default: llvm_unreachable("Inappropriate move wide relocation");
1136 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1137 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1138 assert(N == 2 && "Invalid number of operands!");
1141 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1142 uint64_t Value = CE->getValue();
1144 if (RegWidth == 32) {
1145 Value &= 0xffffffffULL;
1148 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1150 assert(Valid && "Invalid immediates should have been weeded out by now");
1152 Inst.addOperand(MCOperand::CreateImm(UImm16));
1153 Inst.addOperand(MCOperand::CreateImm(Shift));
1156 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1159 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1160 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1161 && "PRFM operand should be 5-bits");
1163 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1166 // For Add-sub (extended register) operands.
1167 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1173 // For Vector Immediates shifted imm operands.
1174 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1178 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1180 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1181 int64_t Imm = ShiftExtend.Amount / 8;
1182 Inst.addOperand(MCOperand::CreateImm(Imm));
1185 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1188 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1189 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1191 // Encode LSLH shift amount 0, 8 as 0, 1.
1192 int64_t Imm = ShiftExtend.Amount / 8;
1193 Inst.addOperand(MCOperand::CreateImm(Imm));
1196 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1199 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1200 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1202 // Encode MSL shift amount 8, 16 as 0, 1.
1203 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1204 Inst.addOperand(MCOperand::CreateImm(Imm));
1207 // For the extend in load-store (register offset) instructions.
1208 template<unsigned MemSize>
1209 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1210 addAddrRegExtendOperands(Inst, N, MemSize);
1213 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1214 unsigned MemSize) const {
1215 assert(N == 1 && "Invalid number of operands!");
1217 // First bit of Option is set in instruction classes, the high two bits are
1219 unsigned OptionHi = 0;
1220 switch (ShiftExtend.ShiftType) {
1230 llvm_unreachable("Invalid extend type for register offset");
1234 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1236 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1239 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1241 void addShiftOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1244 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1247 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1250 // A bit from each byte in the constant forms the encoded immediate
1251 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1252 uint64_t Value = CE->getValue();
1255 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1256 Imm |= (Value & 1) << i;
1258 Inst.addOperand(MCOperand::CreateImm(Imm));
1261 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1267 } // end anonymous namespace.
1269 AArch64AsmParser::OperandMatchResultTy
1270 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1271 StringRef Mnemonic) {
1273 // See if the operand has a custom parser
1274 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1276 // It could either succeed, fail or just not care.
1277 if (ResTy != MatchOperand_NoMatch)
1280 switch (getLexer().getKind()) {
1282 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1283 return MatchOperand_ParseFail;
1284 case AsmToken::Identifier: {
1285 // It might be in the LSL/UXTB family ...
1286 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1288 // We can only continue if no tokens were eaten.
1289 if (GotShift != MatchOperand_NoMatch)
1292 // ... or it might be a register ...
1293 uint32_t NumLanes = 0;
1294 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1295 assert(GotReg != MatchOperand_ParseFail
1296 && "register parsing shouldn't partially succeed");
1298 if (GotReg == MatchOperand_Success) {
1299 if (Parser.getTok().is(AsmToken::LBrac))
1300 return ParseNEONLane(Operands, NumLanes);
1302 return MatchOperand_Success;
1304 // ... or it might be a symbolish thing
1307 case AsmToken::LParen: // E.g. (strcmp-4)
1308 case AsmToken::Integer: // 1f, 2b labels
1309 case AsmToken::String: // quoted labels
1310 case AsmToken::Dot: // . is Current location
1311 case AsmToken::Dollar: // $ is PC
1312 case AsmToken::Colon: {
1313 SMLoc StartLoc = Parser.getTok().getLoc();
1315 const MCExpr *ImmVal = 0;
1317 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1318 return MatchOperand_ParseFail;
1320 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1321 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1322 return MatchOperand_Success;
1324 case AsmToken::Hash: { // Immediates
1325 SMLoc StartLoc = Parser.getTok().getLoc();
1327 const MCExpr *ImmVal = 0;
1330 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1331 return MatchOperand_ParseFail;
1333 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1334 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1335 return MatchOperand_Success;
1337 case AsmToken::LBrac: {
1338 SMLoc Loc = Parser.getTok().getLoc();
1339 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1340 Parser.Lex(); // Eat '['
1342 // There's no comma after a '[', so we can parse the next operand
1344 return ParseOperand(Operands, Mnemonic);
1346 // The following will likely be useful later, but not in very early cases
1347 case AsmToken::LCurly: // SIMD vector list is not parsed here
1348 llvm_unreachable("Don't know how to deal with '{' in operand");
1349 return MatchOperand_ParseFail;
1353 AArch64AsmParser::OperandMatchResultTy
1354 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1355 if (getLexer().is(AsmToken::Colon)) {
1356 AArch64MCExpr::VariantKind RefKind;
1358 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1359 if (ResTy != MatchOperand_Success)
1362 const MCExpr *SubExprVal;
1363 if (getParser().parseExpression(SubExprVal))
1364 return MatchOperand_ParseFail;
1366 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1367 return MatchOperand_Success;
1370 // No weird AArch64MCExpr prefix
1371 return getParser().parseExpression(ExprVal)
1372 ? MatchOperand_ParseFail : MatchOperand_Success;
1375 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1376 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1377 AArch64AsmParser::OperandMatchResultTy
1378 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1379 uint32_t NumLanes) {
1380 SMLoc Loc = Parser.getTok().getLoc();
1382 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1383 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1384 Parser.Lex(); // Eat '['
1386 if (Parser.getTok().isNot(AsmToken::Integer)) {
1387 Error(Parser.getTok().getLoc(), "expected lane number");
1388 return MatchOperand_ParseFail;
1391 if (Parser.getTok().getIntVal() >= NumLanes) {
1392 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1393 return MatchOperand_ParseFail;
1396 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1398 SMLoc S = Parser.getTok().getLoc();
1399 Parser.Lex(); // Eat actual lane
1400 SMLoc E = Parser.getTok().getLoc();
1401 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1404 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1405 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1406 return MatchOperand_ParseFail;
1409 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1410 Parser.Lex(); // Eat ']'
1412 return MatchOperand_Success;
1415 AArch64AsmParser::OperandMatchResultTy
1416 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1417 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1420 if (getLexer().isNot(AsmToken::Identifier)) {
1421 Error(Parser.getTok().getLoc(),
1422 "expected relocation specifier in operand after ':'");
1423 return MatchOperand_ParseFail;
1426 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1427 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1428 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1429 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1430 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1431 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1432 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1433 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1434 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1435 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1436 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1437 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1438 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1439 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1440 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1441 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1442 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1443 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1444 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1445 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1446 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1447 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1448 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1449 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1450 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1451 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1452 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1453 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1454 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1455 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1456 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1457 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1458 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1459 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1460 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1461 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1462 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1463 .Default(AArch64MCExpr::VK_AARCH64_None);
1465 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1466 Error(Parser.getTok().getLoc(),
1467 "expected relocation specifier in operand after ':'");
1468 return MatchOperand_ParseFail;
1470 Parser.Lex(); // Eat identifier
1472 if (getLexer().isNot(AsmToken::Colon)) {
1473 Error(Parser.getTok().getLoc(),
1474 "expected ':' after relocation specifier");
1475 return MatchOperand_ParseFail;
1478 return MatchOperand_Success;
1481 AArch64AsmParser::OperandMatchResultTy
1482 AArch64AsmParser::ParseImmWithLSLOperand(
1483 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1485 SMLoc S = Parser.getTok().getLoc();
1487 if (Parser.getTok().is(AsmToken::Hash))
1488 Parser.Lex(); // Eat '#'
1489 else if (Parser.getTok().isNot(AsmToken::Integer))
1490 // Operand should start from # or should be integer, emit error otherwise.
1491 return MatchOperand_NoMatch;
1494 if (ParseImmediate(Imm) != MatchOperand_Success)
1495 return MatchOperand_ParseFail;
1496 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1497 SMLoc E = Parser.getTok().getLoc();
1498 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1499 return MatchOperand_Success;
1505 // The optional operand must be "lsl #N" where N is non-negative.
1506 if (Parser.getTok().is(AsmToken::Identifier)
1507 && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1510 if (Parser.getTok().is(AsmToken::Hash)) {
1513 if (Parser.getTok().isNot(AsmToken::Integer)) {
1514 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1515 return MatchOperand_ParseFail;
1520 int64_t ShiftAmount = Parser.getTok().getIntVal();
1522 if (ShiftAmount < 0) {
1523 Error(Parser.getTok().getLoc(), "positive shift amount required");
1524 return MatchOperand_ParseFail;
1526 Parser.Lex(); // Eat the number
1528 SMLoc E = Parser.getTok().getLoc();
1529 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1531 return MatchOperand_Success;
1535 AArch64AsmParser::OperandMatchResultTy
1536 AArch64AsmParser::ParseCondCodeOperand(
1537 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1538 if (Parser.getTok().isNot(AsmToken::Identifier))
1539 return MatchOperand_NoMatch;
1541 StringRef Tok = Parser.getTok().getIdentifier();
1542 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1544 if (CondCode == A64CC::Invalid)
1545 return MatchOperand_NoMatch;
1547 SMLoc S = Parser.getTok().getLoc();
1548 Parser.Lex(); // Eat condition code
1549 SMLoc E = Parser.getTok().getLoc();
1551 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1552 return MatchOperand_Success;
1555 AArch64AsmParser::OperandMatchResultTy
1556 AArch64AsmParser::ParseCRxOperand(
1557 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1558 SMLoc S = Parser.getTok().getLoc();
1559 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1560 Error(S, "Expected cN operand where 0 <= N <= 15");
1561 return MatchOperand_ParseFail;
1564 StringRef Tok = Parser.getTok().getIdentifier();
1565 if (Tok[0] != 'c' && Tok[0] != 'C') {
1566 Error(S, "Expected cN operand where 0 <= N <= 15");
1567 return MatchOperand_ParseFail;
1571 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1572 if (BadNum || CRNum > 15) {
1573 Error(S, "Expected cN operand where 0 <= N <= 15");
1574 return MatchOperand_ParseFail;
1577 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1580 SMLoc E = Parser.getTok().getLoc();
1582 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1583 return MatchOperand_Success;
1586 AArch64AsmParser::OperandMatchResultTy
1587 AArch64AsmParser::ParseFPImmOperand(
1588 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1590 SMLoc S = Parser.getTok().getLoc();
1593 if (Parser.getTok().is(AsmToken::Hash)) {
1594 Parser.Lex(); // Eat '#'
1598 bool Negative = false;
1599 if (Parser.getTok().is(AsmToken::Minus)) {
1601 Parser.Lex(); // Eat '-'
1602 } else if (Parser.getTok().is(AsmToken::Plus)) {
1603 Parser.Lex(); // Eat '+'
1606 if (Parser.getTok().isNot(AsmToken::Real)) {
1608 return MatchOperand_NoMatch;
1609 Error(S, "Expected floating-point immediate");
1610 return MatchOperand_ParseFail;
1613 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1614 if (Negative) RealVal.changeSign();
1615 double DblVal = RealVal.convertToDouble();
1617 Parser.Lex(); // Eat real number
1618 SMLoc E = Parser.getTok().getLoc();
1620 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1621 return MatchOperand_Success;
1624 AArch64AsmParser::OperandMatchResultTy
1625 AArch64AsmParser::ParseFPImm0AndImm0Operand(
1626 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1628 SMLoc S = Parser.getTok().getLoc();
1631 if (Parser.getTok().is(AsmToken::Hash)) {
1632 Parser.Lex(); // Eat '#'
1636 APFloat RealVal(0.0);
1637 if (Parser.getTok().is(AsmToken::Real)) {
1638 if(Parser.getTok().getString() != "0.0") {
1639 Error(S, "only #0.0 is acceptable as immediate");
1640 return MatchOperand_ParseFail;
1643 else if (Parser.getTok().is(AsmToken::Integer)) {
1644 if(Parser.getTok().getIntVal() != 0) {
1645 Error(S, "only #0.0 is acceptable as immediate");
1646 return MatchOperand_ParseFail;
1651 return MatchOperand_NoMatch;
1652 Error(S, "only #0.0 is acceptable as immediate");
1653 return MatchOperand_ParseFail;
1656 Parser.Lex(); // Eat real number
1657 SMLoc E = Parser.getTok().getLoc();
1659 Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
1660 return MatchOperand_Success;
1663 // Automatically generated
1664 static unsigned MatchRegisterName(StringRef Name);
1667 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1669 SMLoc &LayoutLoc) const {
1670 const AsmToken &Tok = Parser.getTok();
1672 if (Tok.isNot(AsmToken::Identifier))
1675 std::string LowerReg = Tok.getString().lower();
1676 size_t DotPos = LowerReg.find('.');
1678 bool IsVec128 = false;
1679 SMLoc S = Tok.getLoc();
1680 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1682 if (DotPos == std::string::npos) {
1683 Layout = StringRef();
1685 // Everything afterwards needs to be a literal token, expected to be
1686 // '.2d','.b' etc for vector registers.
1688 // This StringSwitch validates the input and (perhaps more importantly)
1689 // gives us a permanent string to use in the token (a pointer into LowerReg
1690 // would go out of scope when we return).
1691 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1692 StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1694 // See if it's a 128-bit layout first.
1695 Layout = StringSwitch<const char *>(LayoutText)
1696 .Case(".q", ".q").Case(".1q", ".1q")
1697 .Case(".d", ".d").Case(".2d", ".2d")
1698 .Case(".s", ".s").Case(".4s", ".4s")
1699 .Case(".h", ".h").Case(".8h", ".8h")
1700 .Case(".b", ".b").Case(".16b", ".16b")
1703 if (Layout.size() != 0)
1706 Layout = StringSwitch<const char *>(LayoutText)
1714 if (Layout.size() == 0) {
1715 // If we've still not pinned it down the register is malformed.
1720 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1721 if (RegNum == AArch64::NoRegister) {
1722 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1723 .Case("ip0", AArch64::X16)
1724 .Case("ip1", AArch64::X17)
1725 .Case("fp", AArch64::X29)
1726 .Case("lr", AArch64::X30)
1727 .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1728 .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1729 .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1730 .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1731 .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1732 .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1733 .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1734 .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1735 .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1736 .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1737 .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1738 .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1739 .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1740 .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1741 .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1742 .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1743 .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1744 .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1745 .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1746 .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1747 .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1748 .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1749 .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1750 .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1751 .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1752 .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1753 .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1754 .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1755 .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1756 .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1757 .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1758 .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1759 .Default(AArch64::NoRegister);
1761 if (RegNum == AArch64::NoRegister)
1767 AArch64AsmParser::OperandMatchResultTy
1768 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1769 uint32_t &NumLanes) {
1772 SMLoc RegEndLoc, LayoutLoc;
1773 SMLoc S = Parser.getTok().getLoc();
1775 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1776 return MatchOperand_NoMatch;
1778 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1780 if (Layout.size() != 0) {
1781 unsigned long long TmpLanes = 0;
1782 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1783 if (TmpLanes != 0) {
1784 NumLanes = TmpLanes;
1786 // If the number of lanes isn't specified explicitly, a valid instruction
1787 // will have an element specifier and be capable of acting on the entire
1789 switch (Layout.back()) {
1790 default: llvm_unreachable("Invalid layout specifier");
1791 case 'b': NumLanes = 16; break;
1792 case 'h': NumLanes = 8; break;
1793 case 's': NumLanes = 4; break;
1794 case 'd': NumLanes = 2; break;
1795 case 'q': NumLanes = 1; break;
1799 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1803 return MatchOperand_Success;
1807 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1809 // This callback is used for things like DWARF frame directives in
1810 // assembly. They don't care about things like NEON layouts or lanes, they
1811 // just want to be able to produce the DWARF register number.
1812 StringRef LayoutSpec;
1813 SMLoc RegEndLoc, LayoutLoc;
1814 StartLoc = Parser.getTok().getLoc();
1816 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1820 EndLoc = Parser.getTok().getLoc();
1825 AArch64AsmParser::OperandMatchResultTy
1826 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1827 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1828 // Since these operands occur in very limited circumstances, without
1829 // alternatives, we actually signal an error if there is no match. If relaxing
1830 // this, beware of unintended consequences: an immediate will be accepted
1831 // during matching, no matter how it gets into the AArch64Operand.
1832 const AsmToken &Tok = Parser.getTok();
1833 SMLoc S = Tok.getLoc();
1835 if (Tok.is(AsmToken::Identifier)) {
1837 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1840 Error(S, "operand specifier not recognised");
1841 return MatchOperand_ParseFail;
1844 Parser.Lex(); // We're done with the identifier. Eat it
1846 SMLoc E = Parser.getTok().getLoc();
1847 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1848 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1849 return MatchOperand_Success;
1850 } else if (Tok.is(AsmToken::Hash)) {
1853 const MCExpr *ImmVal;
1854 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1855 return MatchOperand_ParseFail;
1857 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1858 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1859 Error(S, "Invalid immediate for instruction");
1860 return MatchOperand_ParseFail;
1863 SMLoc E = Parser.getTok().getLoc();
1864 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1865 return MatchOperand_Success;
1868 Error(S, "unexpected operand for instruction");
1869 return MatchOperand_ParseFail;
1872 AArch64AsmParser::OperandMatchResultTy
1873 AArch64AsmParser::ParseSysRegOperand(
1874 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1875 const AsmToken &Tok = Parser.getTok();
1877 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1878 // kind of string: SPSel is valid for two different forms of MSR with two
1879 // different encodings. There's no collision at the moment, but the potential
1881 if (!Tok.is(AsmToken::Identifier)) {
1882 return MatchOperand_NoMatch;
1885 SMLoc S = Tok.getLoc();
1886 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1887 Parser.Lex(); // Eat identifier
1889 return MatchOperand_Success;
1892 AArch64AsmParser::OperandMatchResultTy
1893 AArch64AsmParser::ParseLSXAddressOperand(
1894 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1895 SMLoc S = Parser.getTok().getLoc();
1898 SMLoc RegEndLoc, LayoutLoc;
1900 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1901 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1902 || Layout.size() != 0) {
1903 // Check Layout.size because we don't want to let "x3.4s" or similar
1905 return MatchOperand_NoMatch;
1907 Parser.Lex(); // Eat register
1909 if (Parser.getTok().is(AsmToken::RBrac)) {
1911 SMLoc E = Parser.getTok().getLoc();
1912 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1913 return MatchOperand_Success;
1916 // Otherwise, only ", #0" is valid
1918 if (Parser.getTok().isNot(AsmToken::Comma)) {
1919 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1920 return MatchOperand_ParseFail;
1922 Parser.Lex(); // Eat ','
1924 if (Parser.getTok().isNot(AsmToken::Hash)) {
1925 Error(Parser.getTok().getLoc(), "expected '#0'");
1926 return MatchOperand_ParseFail;
1928 Parser.Lex(); // Eat '#'
1930 if (Parser.getTok().isNot(AsmToken::Integer)
1931 || Parser.getTok().getIntVal() != 0 ) {
1932 Error(Parser.getTok().getLoc(), "expected '#0'");
1933 return MatchOperand_ParseFail;
1935 Parser.Lex(); // Eat '0'
1937 SMLoc E = Parser.getTok().getLoc();
1938 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1939 return MatchOperand_Success;
1942 AArch64AsmParser::OperandMatchResultTy
1943 AArch64AsmParser::ParseShiftExtend(
1944 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1945 StringRef IDVal = Parser.getTok().getIdentifier();
1946 std::string LowerID = IDVal.lower();
1948 A64SE::ShiftExtSpecifiers Spec =
1949 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1950 .Case("lsl", A64SE::LSL)
1951 .Case("msl", A64SE::MSL)
1952 .Case("lsr", A64SE::LSR)
1953 .Case("asr", A64SE::ASR)
1954 .Case("ror", A64SE::ROR)
1955 .Case("uxtb", A64SE::UXTB)
1956 .Case("uxth", A64SE::UXTH)
1957 .Case("uxtw", A64SE::UXTW)
1958 .Case("uxtx", A64SE::UXTX)
1959 .Case("sxtb", A64SE::SXTB)
1960 .Case("sxth", A64SE::SXTH)
1961 .Case("sxtw", A64SE::SXTW)
1962 .Case("sxtx", A64SE::SXTX)
1963 .Default(A64SE::Invalid);
1965 if (Spec == A64SE::Invalid)
1966 return MatchOperand_NoMatch;
1970 S = Parser.getTok().getLoc();
1973 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1974 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1975 // The shift amount can be omitted for the extending versions, but not real
1977 // add x0, x0, x0, uxtb
1978 // is valid, and equivalent to
1979 // add x0, x0, x0, uxtb #0
1981 if (Parser.getTok().is(AsmToken::Comma) ||
1982 Parser.getTok().is(AsmToken::EndOfStatement) ||
1983 Parser.getTok().is(AsmToken::RBrac)) {
1984 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1986 return MatchOperand_Success;
1990 // Eat # at beginning of immediate
1991 if (!Parser.getTok().is(AsmToken::Hash)) {
1992 Error(Parser.getTok().getLoc(),
1993 "expected #imm after shift specifier");
1994 return MatchOperand_ParseFail;
1998 // Make sure we do actually have a number
1999 if (!Parser.getTok().is(AsmToken::Integer)) {
2000 Error(Parser.getTok().getLoc(),
2001 "expected integer shift amount");
2002 return MatchOperand_ParseFail;
2004 unsigned Amount = Parser.getTok().getIntVal();
2006 E = Parser.getTok().getLoc();
2008 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
2011 return MatchOperand_Success;
2014 /// Try to parse a vector register token, If it is a vector register,
2015 /// the token is eaten and return true. Otherwise return false.
2016 bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
2017 StringRef &Layout, SMLoc &LayoutLoc) {
2018 bool IsVector = true;
2020 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
2022 else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
2023 .contains(RegNum) &&
2024 !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
2027 else if (Layout.size() == 0)
2031 Error(Parser.getTok().getLoc(), "expected vector type register");
2033 Parser.Lex(); // Eat this token.
2038 // A vector list contains 1-4 consecutive registers.
2039 // Now there are two kinds of vector list when number of vector > 1:
2040 // (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
2041 // (2) {Vn.layout - Vm.layout}
2042 // If the layout is like .b/.h/.s/.d, also parse the lane.
2043 AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
2044 SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
2045 if (Parser.getTok().isNot(AsmToken::LCurly)) {
2046 Error(Parser.getTok().getLoc(), "'{' expected");
2047 return MatchOperand_ParseFail;
2049 SMLoc SLoc = Parser.getTok().getLoc();
2050 Parser.Lex(); // Eat '{' token.
2052 unsigned Reg, Count = 1;
2053 StringRef LayoutStr;
2054 SMLoc RegEndLoc, LayoutLoc;
2055 if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2056 return MatchOperand_ParseFail;
2058 if (Parser.getTok().is(AsmToken::Minus)) {
2059 Parser.Lex(); // Eat the minus.
2062 StringRef LayoutStr2;
2063 SMLoc RegEndLoc2, LayoutLoc2;
2064 SMLoc RegLoc2 = Parser.getTok().getLoc();
2066 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2067 return MatchOperand_ParseFail;
2068 unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2070 if (LayoutStr != LayoutStr2) {
2071 Error(LayoutLoc2, "expected the same vector layout");
2072 return MatchOperand_ParseFail;
2074 if (Space == 0 || Space > 3) {
2075 Error(RegLoc2, "invalid number of vectors");
2076 return MatchOperand_ParseFail;
2081 unsigned LastReg = Reg;
2082 while (Parser.getTok().is(AsmToken::Comma)) {
2083 Parser.Lex(); // Eat the comma.
2085 StringRef LayoutStr2;
2086 SMLoc RegEndLoc2, LayoutLoc2;
2087 SMLoc RegLoc2 = Parser.getTok().getLoc();
2089 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2090 return MatchOperand_ParseFail;
2091 unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2092 : (Reg2 + 32 - LastReg);
2095 // The space between two vectors should be 1. And they should have the same layout.
2096 // Total count shouldn't be great than 4
2098 Error(RegLoc2, "invalid space between two vectors");
2099 return MatchOperand_ParseFail;
2101 if (LayoutStr != LayoutStr2) {
2102 Error(LayoutLoc2, "expected the same vector layout");
2103 return MatchOperand_ParseFail;
2106 Error(RegLoc2, "invalid number of vectors");
2107 return MatchOperand_ParseFail;
2114 if (Parser.getTok().isNot(AsmToken::RCurly)) {
2115 Error(Parser.getTok().getLoc(), "'}' expected");
2116 return MatchOperand_ParseFail;
2118 SMLoc ELoc = Parser.getTok().getLoc();
2119 Parser.Lex(); // Eat '}' token.
2121 A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2122 if (Count > 1) { // If count > 1, create vector list using super register.
2123 bool IsVec64 = (Layout < A64Layout::VL_16B);
2124 static unsigned SupRegIDs[3][2] = {
2125 { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2126 { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2127 { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2129 unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2130 unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2131 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2132 Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2133 &AArch64MCRegisterClasses[SupRegID]);
2136 AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2138 if (Parser.getTok().is(AsmToken::LBrac)) {
2139 uint32_t NumLanes = 0;
2141 case A64Layout::VL_B : NumLanes = 16; break;
2142 case A64Layout::VL_H : NumLanes = 8; break;
2143 case A64Layout::VL_S : NumLanes = 4; break;
2144 case A64Layout::VL_D : NumLanes = 2; break;
2146 SMLoc Loc = getLexer().getLoc();
2147 Error(Loc, "expected comma before next operand");
2148 return MatchOperand_ParseFail;
2150 return ParseNEONLane(Operands, NumLanes);
2152 return MatchOperand_Success;
2156 // FIXME: We would really like to be able to tablegen'erate this.
2157 bool AArch64AsmParser::
2158 validateInstruction(MCInst &Inst,
2159 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2160 switch (Inst.getOpcode()) {
2161 case AArch64::BFIwwii:
2162 case AArch64::BFIxxii:
2163 case AArch64::SBFIZwwii:
2164 case AArch64::SBFIZxxii:
2165 case AArch64::UBFIZwwii:
2166 case AArch64::UBFIZxxii: {
2167 unsigned ImmOps = Inst.getNumOperands() - 2;
2168 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2169 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2171 if (ImmR != 0 && ImmS >= ImmR) {
2172 return Error(Operands[4]->getStartLoc(),
2173 "requested insert overflows register");
2177 case AArch64::BFXILwwii:
2178 case AArch64::BFXILxxii:
2179 case AArch64::SBFXwwii:
2180 case AArch64::SBFXxxii:
2181 case AArch64::UBFXwwii:
2182 case AArch64::UBFXxxii: {
2183 unsigned ImmOps = Inst.getNumOperands() - 2;
2184 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2185 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2186 int64_t RegWidth = 0;
2187 switch (Inst.getOpcode()) {
2188 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2191 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2196 if (ImmS >= RegWidth || ImmS < ImmR) {
2197 return Error(Operands[4]->getStartLoc(),
2198 "requested extract overflows register");
2202 case AArch64::ICix: {
2203 int64_t ImmVal = Inst.getOperand(0).getImm();
2204 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2205 if (!A64IC::NeedsRegister(ICOp)) {
2206 return Error(Operands[1]->getStartLoc(),
2207 "specified IC op does not use a register");
2211 case AArch64::ICi: {
2212 int64_t ImmVal = Inst.getOperand(0).getImm();
2213 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2214 if (A64IC::NeedsRegister(ICOp)) {
2215 return Error(Operands[1]->getStartLoc(),
2216 "specified IC op requires a register");
2220 case AArch64::TLBIix: {
2221 int64_t ImmVal = Inst.getOperand(0).getImm();
2222 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2223 if (!A64TLBI::NeedsRegister(TLBIOp)) {
2224 return Error(Operands[1]->getStartLoc(),
2225 "specified TLBI op does not use a register");
2229 case AArch64::TLBIi: {
2230 int64_t ImmVal = Inst.getOperand(0).getImm();
2231 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2232 if (A64TLBI::NeedsRegister(TLBIOp)) {
2233 return Error(Operands[1]->getStartLoc(),
2234 "specified TLBI op requires a register");
2244 // Parses the instruction *together with* all operands, appending each parsed
2245 // operand to the "Operands" list
2246 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2247 StringRef Name, SMLoc NameLoc,
2248 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2249 StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
2250 .Case("beq", "b.eq")
2251 .Case("bne", "b.ne")
2252 .Case("bhs", "b.hs")
2253 .Case("bcs", "b.cs")
2254 .Case("blo", "b.lo")
2255 .Case("bcc", "b.cc")
2256 .Case("bmi", "b.mi")
2257 .Case("bpl", "b.pl")
2258 .Case("bvs", "b.vs")
2259 .Case("bvc", "b.vc")
2260 .Case("bhi", "b.hi")
2261 .Case("bls", "b.ls")
2262 .Case("bge", "b.ge")
2263 .Case("blt", "b.lt")
2264 .Case("bgt", "b.gt")
2265 .Case("ble", "b.le")
2266 .Case("bal", "b.al")
2267 .Case("bnv", "b.nv")
2270 size_t CondCodePos = PatchedName.find('.');
2272 StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
2273 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2275 if (CondCodePos != StringRef::npos) {
2276 // We have a condition code
2277 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2278 StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
2279 A64CC::CondCodes Code;
2281 Code = A64StringToCondCode(CondStr);
2283 if (Code == A64CC::Invalid) {
2284 Error(S, "invalid condition code");
2285 Parser.eatToEndOfStatement();
2289 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2291 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
2292 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2293 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2296 // Now we parse the operands of this instruction
2297 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2298 // Read the first operand.
2299 if (ParseOperand(Operands, Mnemonic)) {
2300 Parser.eatToEndOfStatement();
2304 while (getLexer().is(AsmToken::Comma)) {
2305 Parser.Lex(); // Eat the comma.
2307 // Parse and remember the operand.
2308 if (ParseOperand(Operands, Mnemonic)) {
2309 Parser.eatToEndOfStatement();
2314 // After successfully parsing some operands there are two special cases to
2315 // consider (i.e. notional operands not separated by commas). Both are due
2316 // to memory specifiers:
2317 // + An RBrac will end an address for load/store/prefetch
2318 // + An '!' will indicate a pre-indexed operation.
2320 // It's someone else's responsibility to make sure these tokens are sane
2321 // in the given context!
2322 if (Parser.getTok().is(AsmToken::RBrac)) {
2323 SMLoc Loc = Parser.getTok().getLoc();
2324 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2328 if (Parser.getTok().is(AsmToken::Exclaim)) {
2329 SMLoc Loc = Parser.getTok().getLoc();
2330 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2336 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2337 SMLoc Loc = getLexer().getLoc();
2338 Parser.eatToEndOfStatement();
2339 return Error(Loc, "expected comma before next operand");
2342 // Eat the EndOfStatement
2348 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2349 StringRef IDVal = DirectiveID.getIdentifier();
2350 if (IDVal == ".hword")
2351 return ParseDirectiveWord(2, DirectiveID.getLoc());
2352 else if (IDVal == ".word")
2353 return ParseDirectiveWord(4, DirectiveID.getLoc());
2354 else if (IDVal == ".xword")
2355 return ParseDirectiveWord(8, DirectiveID.getLoc());
2356 else if (IDVal == ".tlsdesccall")
2357 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2362 /// parseDirectiveWord
2363 /// ::= .word [ expression (, expression)* ]
2364 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2365 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2367 const MCExpr *Value;
2368 if (getParser().parseExpression(Value))
2371 getParser().getStreamer().EmitValue(Value, Size);
2373 if (getLexer().is(AsmToken::EndOfStatement))
2376 // FIXME: Improve diagnostic.
2377 if (getLexer().isNot(AsmToken::Comma)) {
2378 Error(L, "unexpected token in directive");
2389 // parseDirectiveTLSDescCall:
2390 // ::= .tlsdesccall symbol
2391 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2393 if (getParser().parseIdentifier(Name)) {
2394 Error(L, "expected symbol after directive");
2398 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2399 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2402 Inst.setOpcode(AArch64::TLSDESCCALL);
2403 Inst.addOperand(MCOperand::CreateExpr(Expr));
2405 getParser().getStreamer().EmitInstruction(Inst, STI);
2410 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2411 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2412 MCStreamer &Out, unsigned &ErrorInfo,
2413 bool MatchingInlineAsm) {
2415 unsigned MatchResult;
2416 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2419 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2420 return Error(IDLoc, "too few operands for instruction");
2422 switch (MatchResult) {
2425 if (validateInstruction(Inst, Operands))
2428 Out.EmitInstruction(Inst, STI);
2430 case Match_MissingFeature:
2431 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2433 case Match_InvalidOperand: {
2434 SMLoc ErrorLoc = IDLoc;
2435 if (ErrorInfo != ~0U) {
2436 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2437 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2440 return Error(ErrorLoc, "invalid operand for instruction");
2442 case Match_MnemonicFail:
2443 return Error(IDLoc, "invalid instruction");
2445 case Match_AddSubRegExtendSmall:
2446 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2447 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2448 case Match_AddSubRegExtendLarge:
2449 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2450 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2451 case Match_AddSubRegShift32:
2452 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2453 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2454 case Match_AddSubRegShift64:
2455 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2456 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2457 case Match_AddSubSecondSource:
2458 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2459 "expected compatible register, symbol or integer in range [0, 4095]");
2460 case Match_CVTFixedPos32:
2461 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2462 "expected integer in range [1, 32]");
2463 case Match_CVTFixedPos64:
2464 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2465 "expected integer in range [1, 64]");
2466 case Match_CondCode:
2467 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2468 "expected AArch64 condition code");
2470 // Any situation which allows a nontrivial floating-point constant also
2471 // allows a register.
2472 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2473 "expected compatible register or floating-point constant");
2475 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2476 "expected floating-point constant #0.0 or invalid register type");
2478 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2479 "expected label or encodable integer pc offset");
2481 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2482 "expected lane specifier '[1]'");
2483 case Match_LoadStoreExtend32_1:
2484 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2485 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2486 case Match_LoadStoreExtend32_2:
2487 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2488 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2489 case Match_LoadStoreExtend32_4:
2490 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2491 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2492 case Match_LoadStoreExtend32_8:
2493 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2494 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2495 case Match_LoadStoreExtend32_16:
2496 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2497 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2498 case Match_LoadStoreExtend64_1:
2499 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2500 "expected 'lsl' or 'sxtx' with optional shift of #0");
2501 case Match_LoadStoreExtend64_2:
2502 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2503 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2504 case Match_LoadStoreExtend64_4:
2505 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2506 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2507 case Match_LoadStoreExtend64_8:
2508 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2509 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2510 case Match_LoadStoreExtend64_16:
2511 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2512 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2513 case Match_LoadStoreSImm7_4:
2514 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2515 "expected integer multiple of 4 in range [-256, 252]");
2516 case Match_LoadStoreSImm7_8:
2517 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2518 "expected integer multiple of 8 in range [-512, 504]");
2519 case Match_LoadStoreSImm7_16:
2520 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2521 "expected integer multiple of 16 in range [-1024, 1008]");
2522 case Match_LoadStoreSImm9:
2523 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2524 "expected integer in range [-256, 255]");
2525 case Match_LoadStoreUImm12_1:
2526 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2527 "expected symbolic reference or integer in range [0, 4095]");
2528 case Match_LoadStoreUImm12_2:
2529 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2530 "expected symbolic reference or integer in range [0, 8190]");
2531 case Match_LoadStoreUImm12_4:
2532 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2533 "expected symbolic reference or integer in range [0, 16380]");
2534 case Match_LoadStoreUImm12_8:
2535 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2536 "expected symbolic reference or integer in range [0, 32760]");
2537 case Match_LoadStoreUImm12_16:
2538 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2539 "expected symbolic reference or integer in range [0, 65520]");
2540 case Match_LogicalSecondSource:
2541 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2542 "expected compatible register or logical immediate");
2543 case Match_MOVWUImm16:
2544 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2545 "expected relocated symbol or integer in range [0, 65535]");
2547 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2548 "expected readable system register");
2550 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2551 "expected writable system register or pstate");
2552 case Match_NamedImm_at:
2553 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2554 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2555 case Match_NamedImm_dbarrier:
2556 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2557 "expected integer in range [0, 15] or symbolic barrier operand");
2558 case Match_NamedImm_dc:
2559 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2560 "expected symbolic 'dc' operand");
2561 case Match_NamedImm_ic:
2562 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2563 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2564 case Match_NamedImm_isb:
2565 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2566 "expected integer in range [0, 15] or 'sy'");
2567 case Match_NamedImm_prefetch:
2568 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2569 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2570 case Match_NamedImm_tlbi:
2571 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2572 "expected translation buffer invalidation operand");
2574 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2575 "expected integer in range [0, 65535]");
2577 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2578 "expected integer in range [0, 7]");
2580 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2581 "expected integer in range [0, 15]");
2583 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2584 "expected integer in range [0, 31]");
2586 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2587 "expected integer in range [0, 63]");
2589 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2590 "expected integer in range [0, 127]");
2592 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2593 "expected integer in range [<lsb>, 31]");
2595 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2596 "expected integer in range [<lsb>, 63]");
2598 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2599 "expected integer in range [1, 8]");
2600 case Match_ShrImm16:
2601 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2602 "expected integer in range [1, 16]");
2603 case Match_ShrImm32:
2604 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2605 "expected integer in range [1, 32]");
2606 case Match_ShrImm64:
2607 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2608 "expected integer in range [1, 64]");
2610 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2611 "expected integer in range [0, 7]");
2612 case Match_ShlImm16:
2613 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2614 "expected integer in range [0, 15]");
2615 case Match_ShlImm32:
2616 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2617 "expected integer in range [0, 31]");
2618 case Match_ShlImm64:
2619 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2620 "expected integer in range [0, 63]");
2623 llvm_unreachable("Implement any new match types added!");
2627 void AArch64Operand::print(raw_ostream &OS) const {
2630 OS << "<CondCode: " << CondCode.Code << ">";
2633 OS << "<fpimm: " << FPImm.Val << ">";
2636 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2637 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2640 getImm()->print(OS);
2643 OS << "<register " << getReg() << '>';
2646 OS << '\'' << getToken() << '\'';
2649 OS << "<shift: type=" << ShiftExtend.ShiftType
2650 << ", amount=" << ShiftExtend.Amount << ">";
2653 StringRef Name(SysReg.Data, SysReg.Length);
2654 OS << "<sysreg: " << Name << '>';
2658 llvm_unreachable("No idea how to print this kind of operand");
2663 void AArch64Operand::dump() const {
2668 /// Force static initialization.
2669 extern "C" void LLVMInitializeAArch64AsmParser() {
2670 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
2671 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
2674 #define GET_REGISTER_MATCHER
2675 #define GET_MATCHER_IMPLEMENTATION
2676 #include "AArch64GenAsmMatcher.inc"