1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCParser/MCAsmLexer.h"
27 #include "llvm/MC/MCParser/MCAsmParser.h"
28 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCStreamer.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCTargetAsmParser.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58 const MCInstrInfo &MII)
59 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60 MCAsmParserExtension::Initialize(_Parser);
62 // Initialize the set of available features.
63 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
66 // These are the public interface of the MCTargetAsmParser
67 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
70 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
72 bool ParseDirective(AsmToken DirectiveID);
73 bool ParseDirectiveTLSDescCall(SMLoc L);
74 bool ParseDirectiveWord(unsigned Size, SMLoc L);
76 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78 MCStreamer&Out, unsigned &ErrorInfo,
79 bool MatchingInlineAsm);
81 // The rest of the sub-parsers have more freedom over interface: they return
82 // an OperandMatchResultTy because it's less ambiguous than true/false or
83 // -1/0/1 even if it is more verbose
85 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
88 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
90 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
93 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
101 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
113 ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
115 template<typename SomeNamedImmMapper> OperandMatchResultTy
116 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
117 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
121 ParseNamedImmOperand(const NamedImmMapper &Mapper,
122 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
131 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
133 bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
136 OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
138 bool validateInstruction(MCInst &Inst,
139 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
141 /// Scan the next token (which had better be an identifier) and determine
142 /// whether it represents a general-purpose or vector register. It returns
143 /// true if an identifier was found and populates its reference arguments. It
144 /// does not consume the token.
146 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
147 SMLoc &LayoutLoc) const;
155 /// Instances of this class represent a parsed AArch64 machine instruction.
156 class AArch64Operand : public MCParsedAsmOperand {
159 k_ImmWithLSL, // #uimm {, LSL #amt }
160 k_CondCode, // eq/ne/...
161 k_FPImmediate, // Limited-precision floating-point imm
162 k_Immediate, // Including expressions referencing symbols
165 k_VectorList, // A sequential list of 1 to 4 registers.
166 k_SysReg, // The register operand of MRS and MSR instructions
167 k_Token, // The mnemonic; other raw tokens the auto-generated
168 k_WrappedRegister // Load/store exclusive permit a wrapped register.
171 SMLoc StartLoc, EndLoc;
173 struct ImmWithLSLOp {
175 unsigned ShiftAmount;
180 A64CC::CondCodes Code;
195 struct ShiftExtendOp {
196 A64SE::ShiftExtSpecifiers ShiftType;
201 // A vector register list is a sequential list of 1 to 4 registers.
202 struct VectorListOp {
205 A64Layout::VectorLayout Layout;
219 struct ImmWithLSLOp ImmWithLSL;
220 struct CondCodeOp CondCode;
221 struct FPImmOp FPImm;
224 struct ShiftExtendOp ShiftExtend;
225 struct VectorListOp VectorList;
226 struct SysRegOp SysReg;
230 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
231 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
234 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
237 SMLoc getStartLoc() const { return StartLoc; }
238 SMLoc getEndLoc() const { return EndLoc; }
239 void print(raw_ostream&) const;
242 StringRef getToken() const {
243 assert(Kind == k_Token && "Invalid access!");
244 return StringRef(Tok.Data, Tok.Length);
247 unsigned getReg() const {
248 assert((Kind == k_Register || Kind == k_WrappedRegister)
249 && "Invalid access!");
253 const MCExpr *getImm() const {
254 assert(Kind == k_Immediate && "Invalid access!");
258 A64CC::CondCodes getCondCode() const {
259 assert(Kind == k_CondCode && "Invalid access!");
260 return CondCode.Code;
263 static bool isNonConstantExpr(const MCExpr *E,
264 AArch64MCExpr::VariantKind &Variant) {
265 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
266 Variant = A64E->getKind();
268 } else if (!isa<MCConstantExpr>(E)) {
269 Variant = AArch64MCExpr::VK_AARCH64_None;
276 bool isCondCode() const { return Kind == k_CondCode; }
277 bool isToken() const { return Kind == k_Token; }
278 bool isReg() const { return Kind == k_Register; }
279 bool isImm() const { return Kind == k_Immediate; }
280 bool isMem() const { return false; }
281 bool isFPImm() const { return Kind == k_FPImmediate; }
282 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
283 bool isSysReg() const { return Kind == k_SysReg; }
284 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
285 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
287 bool isAddSubImmLSL0() const {
288 if (!isImmWithLSL()) return false;
289 if (ImmWithLSL.ShiftAmount != 0) return false;
291 AArch64MCExpr::VariantKind Variant;
292 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
293 return Variant == AArch64MCExpr::VK_AARCH64_LO12
294 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
295 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
296 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
297 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
298 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
301 // Otherwise it should be a real immediate in range:
302 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
303 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
306 bool isAddSubImmLSL12() const {
307 if (!isImmWithLSL()) return false;
308 if (ImmWithLSL.ShiftAmount != 12) return false;
310 AArch64MCExpr::VariantKind Variant;
311 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
312 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
313 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
316 // Otherwise it should be a real immediate in range:
317 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
318 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
321 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
322 if (!isShiftOrExtend()) return false;
324 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
325 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
328 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
331 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
334 bool isAdrpLabel() const {
335 if (!isImm()) return false;
337 AArch64MCExpr::VariantKind Variant;
338 if (isNonConstantExpr(getImm(), Variant)) {
339 return Variant == AArch64MCExpr::VK_AARCH64_None
340 || Variant == AArch64MCExpr::VK_AARCH64_GOT
341 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
342 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
345 return isLabel<21, 4096>();
348 template<unsigned RegWidth> bool isBitfieldWidth() const {
349 if (!isImm()) return false;
351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
352 if (!CE) return false;
354 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
357 template<int RegWidth>
358 bool isCVTFixedPos() const {
359 if (!isImm()) return false;
361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
362 if (!CE) return false;
364 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
367 bool isFMOVImm() const {
368 if (!isFPImm()) return false;
370 APFloat RealVal(FPImm.Val);
372 return A64Imms::isFPImm(RealVal, ImmVal);
375 bool isFPZero() const {
376 if (!isFPImm()) return false;
378 APFloat RealVal(FPImm.Val);
379 return RealVal.isPosZero();
382 template<unsigned field_width, unsigned scale>
383 bool isLabel() const {
384 if (!isImm()) return false;
386 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
388 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
389 int64_t Val = CE->getValue();
390 int64_t Min = - (scale * (1LL << (field_width - 1)));
391 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
392 return (Val % scale) == 0 && Val >= Min && Val <= Max;
395 // N.b. this disallows explicit relocation specifications via an
396 // AArch64MCExpr. Users needing that behaviour
400 bool isLane1() const {
401 if (!isImm()) return false;
403 // Because it's come through custom assembly parsing, it must always be a
404 // constant expression.
405 return cast<MCConstantExpr>(getImm())->getValue() == 1;
408 bool isLoadLitLabel() const {
409 if (!isImm()) return false;
411 AArch64MCExpr::VariantKind Variant;
412 if (isNonConstantExpr(getImm(), Variant)) {
413 return Variant == AArch64MCExpr::VK_AARCH64_None
414 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
417 return isLabel<19, 4>();
420 template<unsigned RegWidth> bool isLogicalImm() const {
421 if (!isImm()) return false;
423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
424 if (!CE) return false;
427 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
430 template<unsigned RegWidth> bool isLogicalImmMOV() const {
431 if (!isLogicalImm<RegWidth>()) return false;
433 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
435 // The move alias for ORR is only valid if the immediate cannot be
436 // represented with a move (immediate) instruction; they take priority.
438 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
439 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
442 template<int MemSize>
443 bool isOffsetUImm12() const {
444 if (!isImm()) return false;
446 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
448 // Assume they know what they're doing for now if they've given us a
449 // non-constant expression. In principle we could check for ridiculous
450 // things that can't possibly work or relocations that would almost
451 // certainly break resulting code.
455 int64_t Val = CE->getValue();
457 // Must be a multiple of the access size in bytes.
458 if ((Val & (MemSize - 1)) != 0) return false;
460 // Must be 12-bit unsigned
461 return Val >= 0 && Val <= 0xfff * MemSize;
464 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
465 bool isShift() const {
466 if (!isShiftOrExtend()) return false;
468 if (ShiftExtend.ShiftType != SHKind)
471 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
474 bool isMOVN32Imm() const {
475 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
476 AArch64MCExpr::VK_AARCH64_SABS_G0,
477 AArch64MCExpr::VK_AARCH64_SABS_G1,
478 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
479 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
480 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
481 AArch64MCExpr::VK_AARCH64_TPREL_G1,
482 AArch64MCExpr::VK_AARCH64_TPREL_G0,
484 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
486 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
489 bool isMOVN64Imm() const {
490 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
491 AArch64MCExpr::VK_AARCH64_SABS_G0,
492 AArch64MCExpr::VK_AARCH64_SABS_G1,
493 AArch64MCExpr::VK_AARCH64_SABS_G2,
494 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
495 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
496 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
497 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
498 AArch64MCExpr::VK_AARCH64_TPREL_G2,
499 AArch64MCExpr::VK_AARCH64_TPREL_G1,
500 AArch64MCExpr::VK_AARCH64_TPREL_G0,
502 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
504 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
508 bool isMOVZ32Imm() const {
509 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
510 AArch64MCExpr::VK_AARCH64_ABS_G0,
511 AArch64MCExpr::VK_AARCH64_ABS_G1,
512 AArch64MCExpr::VK_AARCH64_SABS_G0,
513 AArch64MCExpr::VK_AARCH64_SABS_G1,
514 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
515 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
516 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
517 AArch64MCExpr::VK_AARCH64_TPREL_G1,
518 AArch64MCExpr::VK_AARCH64_TPREL_G0,
520 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
522 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
525 bool isMOVZ64Imm() const {
526 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
527 AArch64MCExpr::VK_AARCH64_ABS_G0,
528 AArch64MCExpr::VK_AARCH64_ABS_G1,
529 AArch64MCExpr::VK_AARCH64_ABS_G2,
530 AArch64MCExpr::VK_AARCH64_ABS_G3,
531 AArch64MCExpr::VK_AARCH64_SABS_G0,
532 AArch64MCExpr::VK_AARCH64_SABS_G1,
533 AArch64MCExpr::VK_AARCH64_SABS_G2,
534 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
535 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
536 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
537 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
538 AArch64MCExpr::VK_AARCH64_TPREL_G2,
539 AArch64MCExpr::VK_AARCH64_TPREL_G1,
540 AArch64MCExpr::VK_AARCH64_TPREL_G0,
542 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
544 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
547 bool isMOVK32Imm() const {
548 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
549 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
550 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
551 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
552 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
553 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
554 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
555 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
557 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
559 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
562 bool isMOVK64Imm() const {
563 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
564 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
565 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
566 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
567 AArch64MCExpr::VK_AARCH64_ABS_G3,
568 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
569 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
570 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
571 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
572 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
574 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
576 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
579 bool isMoveWideImm(unsigned RegWidth,
580 const AArch64MCExpr::VariantKind *PermittedModifiers,
581 unsigned NumModifiers) const {
582 if (!isImmWithLSL()) return false;
584 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
585 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
587 AArch64MCExpr::VariantKind Modifier;
588 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
589 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
590 if (!ImmWithLSL.ImplicitAmount) return false;
592 for (unsigned i = 0; i < NumModifiers; ++i)
593 if (PermittedModifiers[i] == Modifier) return true;
598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
599 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
602 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
603 bool isMoveWideMovAlias() const {
604 if (!isImm()) return false;
606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
607 if (!CE) return false;
610 uint64_t Value = CE->getValue();
612 // If this is a 32-bit instruction then all bits above 32 should be the
613 // same: either of these is fine because signed/unsigned values should be
615 if (RegWidth == 32) {
616 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
619 Value &= 0xffffffffULL;
622 return isValidImm(RegWidth, Value, UImm16, Shift);
625 bool isMSRWithReg() const {
626 if (!isSysReg()) return false;
628 bool IsKnownRegister;
629 StringRef Name(SysReg.Data, SysReg.Length);
630 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
632 return IsKnownRegister;
635 bool isMSRPState() const {
636 if (!isSysReg()) return false;
638 bool IsKnownRegister;
639 StringRef Name(SysReg.Data, SysReg.Length);
640 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
642 return IsKnownRegister;
646 if (!isSysReg()) return false;
648 // First check against specific MSR-only (write-only) registers
649 bool IsKnownRegister;
650 StringRef Name(SysReg.Data, SysReg.Length);
651 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
653 return IsKnownRegister;
656 bool isPRFM() const {
657 if (!isImm()) return false;
659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664 return CE->getValue() >= 0 && CE->getValue() <= 31;
667 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
668 if (!isShiftOrExtend()) return false;
670 if (ShiftExtend.ShiftType != SHKind)
673 return ShiftExtend.Amount <= 4;
676 bool isRegExtendLSL() const {
677 if (!isShiftOrExtend()) return false;
679 if (ShiftExtend.ShiftType != A64SE::LSL)
682 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
685 // if 0 < value <= w, return true
686 bool isShrFixedWidth(int w) const {
689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
692 int64_t Value = CE->getValue();
693 return Value > 0 && Value <= w;
696 bool isShrImm8() const { return isShrFixedWidth(8); }
698 bool isShrImm16() const { return isShrFixedWidth(16); }
700 bool isShrImm32() const { return isShrFixedWidth(32); }
702 bool isShrImm64() const { return isShrFixedWidth(64); }
704 // if 0 <= value < w, return true
705 bool isShlFixedWidth(int w) const {
708 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711 int64_t Value = CE->getValue();
712 return Value >= 0 && Value < w;
715 bool isShlImm8() const { return isShlFixedWidth(8); }
717 bool isShlImm16() const { return isShlFixedWidth(16); }
719 bool isShlImm32() const { return isShlFixedWidth(32); }
721 bool isShlImm64() const { return isShlFixedWidth(64); }
723 bool isNeonMovImmShiftLSL() const {
724 if (!isShiftOrExtend())
727 if (ShiftExtend.ShiftType != A64SE::LSL)
730 // Valid shift amount is 0, 8, 16 and 24.
731 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
734 bool isNeonMovImmShiftLSLH() const {
735 if (!isShiftOrExtend())
738 if (ShiftExtend.ShiftType != A64SE::LSL)
741 // Valid shift amount is 0 and 8.
742 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
745 bool isNeonMovImmShiftMSL() const {
746 if (!isShiftOrExtend())
749 if (ShiftExtend.ShiftType != A64SE::MSL)
752 // Valid shift amount is 8 and 16.
753 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
756 template <A64Layout::VectorLayout Layout, unsigned Count>
757 bool isVectorList() const {
758 return Kind == k_VectorList && VectorList.Layout == Layout &&
759 VectorList.Count == Count;
762 template <int MemSize> bool isSImm7Scaled() const {
766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
767 if (!CE) return false;
769 int64_t Val = CE->getValue();
770 if (Val % MemSize != 0) return false;
774 return Val >= -64 && Val < 64;
777 template<int BitWidth>
778 bool isSImm() const {
779 if (!isImm()) return false;
781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782 if (!CE) return false;
784 return CE->getValue() >= -(1LL << (BitWidth - 1))
785 && CE->getValue() < (1LL << (BitWidth - 1));
788 template<int bitWidth>
789 bool isUImm() const {
790 if (!isImm()) return false;
792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793 if (!CE) return false;
795 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
798 bool isUImm() const {
799 if (!isImm()) return false;
801 return isa<MCConstantExpr>(getImm());
804 bool isNeonUImm64Mask() const {
808 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
812 uint64_t Value = CE->getValue();
814 // i64 value with each byte being either 0x00 or 0xff.
815 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
816 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
821 // if value == N, return true
823 bool isExactImm() const {
824 if (!isImm()) return false;
826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
827 if (!CE) return false;
829 return CE->getValue() == N;
832 bool isFPZeroIZero() const {
836 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
837 unsigned ShiftAmount,
840 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
841 Op->ImmWithLSL.Val = Val;
842 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
843 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
847 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
849 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
850 Op->CondCode.Code = Code;
854 static AArch64Operand *CreateFPImm(double Val,
856 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
861 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
862 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
867 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
868 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
869 Op->Reg.RegNum = RegNum;
873 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
874 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
875 Op->Reg.RegNum = RegNum;
879 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
883 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
884 Op->ShiftExtend.ShiftType = ShiftTyp;
885 Op->ShiftExtend.Amount = Amount;
886 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
890 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
891 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
892 Op->Tok.Data = Str.data();
893 Op->Tok.Length = Str.size();
897 static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
898 A64Layout::VectorLayout Layout,
900 AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
901 Op->VectorList.RegNum = RegNum;
902 Op->VectorList.Count = Count;
903 Op->VectorList.Layout = Layout;
909 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
910 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
911 Op->Tok.Data = Str.data();
912 Op->Tok.Length = Str.size();
917 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
918 // Add as immediates when possible.
919 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
920 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
922 Inst.addOperand(MCOperand::CreateExpr(Expr));
925 template<unsigned RegWidth>
926 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
927 assert(N == 1 && "Invalid number of operands!");
928 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
929 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
930 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
933 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
934 assert(N == 1 && "Invalid number of operands!");
935 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
936 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
939 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
940 assert(N == 1 && "Invalid number of operands!");
942 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
943 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
945 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
948 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
949 assert(N == 1 && "Invalid number of operands!");
950 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
953 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
954 assert(N == 1 && "Invalid number of operands!");
956 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
957 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
960 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
961 assert(N == 1 && "Invalid number of operands!");
963 APFloat RealVal(FPImm.Val);
965 A64Imms::isFPImm(RealVal, ImmVal);
967 Inst.addOperand(MCOperand::CreateImm(ImmVal));
970 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
971 assert(N == 1 && "Invalid number of operands");
972 Inst.addOperand(MCOperand::CreateImm(0));
975 void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
976 addFPZeroOperands(Inst, N);
979 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
980 assert(N == 1 && "Invalid number of operands!");
981 unsigned Encoded = A64InvertCondCode(getCondCode());
982 Inst.addOperand(MCOperand::CreateImm(Encoded));
985 void addRegOperands(MCInst &Inst, unsigned N) const {
986 assert(N == 1 && "Invalid number of operands!");
987 Inst.addOperand(MCOperand::CreateReg(getReg()));
990 void addImmOperands(MCInst &Inst, unsigned N) const {
991 assert(N == 1 && "Invalid number of operands!");
992 addExpr(Inst, getImm());
995 template<int MemSize>
996 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
997 assert(N == 1 && "Invalid number of operands!");
999 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1000 uint64_t Val = CE->getValue() / MemSize;
1001 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
1004 template<int BitWidth>
1005 void addSImmOperands(MCInst &Inst, unsigned N) const {
1006 assert(N == 1 && "Invalid number of operands!");
1008 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1009 uint64_t Val = CE->getValue();
1010 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
1013 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1014 assert (N == 1 && "Invalid number of operands!");
1016 addExpr(Inst, ImmWithLSL.Val);
1019 template<unsigned field_width, unsigned scale>
1020 void addLabelOperands(MCInst &Inst, unsigned N) const {
1021 assert(N == 1 && "Invalid number of operands!");
1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1026 addExpr(Inst, Imm.Val);
1030 int64_t Val = CE->getValue();
1031 assert(Val % scale == 0 && "Unaligned immediate in instruction");
1034 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1037 template<int MemSize>
1038 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1039 assert(N == 1 && "Invalid number of operands!");
1041 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1042 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1044 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1048 template<unsigned RegWidth>
1049 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1050 assert(N == 1 && "Invalid number of operands");
1051 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1054 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1056 Inst.addOperand(MCOperand::CreateImm(Bits));
1059 void addMRSOperands(MCInst &Inst, unsigned N) const {
1060 assert(N == 1 && "Invalid number of operands!");
1063 StringRef Name(SysReg.Data, SysReg.Length);
1064 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1066 Inst.addOperand(MCOperand::CreateImm(Bits));
1069 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1070 assert(N == 1 && "Invalid number of operands!");
1073 StringRef Name(SysReg.Data, SysReg.Length);
1074 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1076 Inst.addOperand(MCOperand::CreateImm(Bits));
1079 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1080 assert(N == 1 && "Invalid number of operands!");
1083 StringRef Name(SysReg.Data, SysReg.Length);
1084 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1086 Inst.addOperand(MCOperand::CreateImm(Bits));
1089 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1090 assert(N == 2 && "Invalid number of operands!");
1092 addExpr(Inst, ImmWithLSL.Val);
1094 AArch64MCExpr::VariantKind Variant;
1095 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1096 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1100 // We know it's relocated
1102 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1103 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1104 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1105 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1106 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1107 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1108 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1109 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1110 Inst.addOperand(MCOperand::CreateImm(0));
1112 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1113 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1114 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1115 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1116 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1117 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1118 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1119 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1120 Inst.addOperand(MCOperand::CreateImm(1));
1122 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1123 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1124 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1125 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1126 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1127 Inst.addOperand(MCOperand::CreateImm(2));
1129 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1130 Inst.addOperand(MCOperand::CreateImm(3));
1132 default: llvm_unreachable("Inappropriate move wide relocation");
1136 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1137 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1138 assert(N == 2 && "Invalid number of operands!");
1141 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1142 uint64_t Value = CE->getValue();
1144 if (RegWidth == 32) {
1145 Value &= 0xffffffffULL;
1148 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1150 assert(Valid && "Invalid immediates should have been weeded out by now");
1152 Inst.addOperand(MCOperand::CreateImm(UImm16));
1153 Inst.addOperand(MCOperand::CreateImm(Shift));
1156 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1159 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1160 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1161 && "PRFM operand should be 5-bits");
1163 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1166 // For Add-sub (extended register) operands.
1167 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1173 // For Vector Immediates shifted imm operands.
1174 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1178 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1180 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1181 int64_t Imm = ShiftExtend.Amount / 8;
1182 Inst.addOperand(MCOperand::CreateImm(Imm));
1185 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1188 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1189 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1191 // Encode LSLH shift amount 0, 8 as 0, 1.
1192 int64_t Imm = ShiftExtend.Amount / 8;
1193 Inst.addOperand(MCOperand::CreateImm(Imm));
1196 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1199 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1200 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1202 // Encode MSL shift amount 8, 16 as 0, 1.
1203 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1204 Inst.addOperand(MCOperand::CreateImm(Imm));
1207 // For the extend in load-store (register offset) instructions.
1208 template<unsigned MemSize>
1209 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1210 addAddrRegExtendOperands(Inst, N, MemSize);
1213 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1214 unsigned MemSize) const {
1215 assert(N == 1 && "Invalid number of operands!");
1217 // First bit of Option is set in instruction classes, the high two bits are
1219 unsigned OptionHi = 0;
1220 switch (ShiftExtend.ShiftType) {
1230 llvm_unreachable("Invalid extend type for register offset");
1234 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1236 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1239 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1241 void addShiftOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1244 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1247 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1250 // A bit from each byte in the constant forms the encoded immediate
1251 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1252 uint64_t Value = CE->getValue();
1255 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1256 Imm |= (Value & 1) << i;
1258 Inst.addOperand(MCOperand::CreateImm(Imm));
1261 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1267 } // end anonymous namespace.
1269 AArch64AsmParser::OperandMatchResultTy
1270 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1271 StringRef Mnemonic) {
1273 // See if the operand has a custom parser
1274 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1276 // It could either succeed, fail or just not care.
1277 if (ResTy != MatchOperand_NoMatch)
1280 switch (getLexer().getKind()) {
1282 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1283 return MatchOperand_ParseFail;
1284 case AsmToken::Identifier: {
1285 // It might be in the LSL/UXTB family ...
1286 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1288 // We can only continue if no tokens were eaten.
1289 if (GotShift != MatchOperand_NoMatch)
1292 // ... or it might be a register ...
1293 uint32_t NumLanes = 0;
1294 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1295 assert(GotReg != MatchOperand_ParseFail
1296 && "register parsing shouldn't partially succeed");
1298 if (GotReg == MatchOperand_Success) {
1299 if (Parser.getTok().is(AsmToken::LBrac))
1300 return ParseNEONLane(Operands, NumLanes);
1302 return MatchOperand_Success;
1304 // ... or it might be a symbolish thing
1307 case AsmToken::LParen: // E.g. (strcmp-4)
1308 case AsmToken::Integer: // 1f, 2b labels
1309 case AsmToken::String: // quoted labels
1310 case AsmToken::Dot: // . is Current location
1311 case AsmToken::Dollar: // $ is PC
1312 case AsmToken::Colon: {
1313 SMLoc StartLoc = Parser.getTok().getLoc();
1315 const MCExpr *ImmVal = 0;
1317 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1318 return MatchOperand_ParseFail;
1320 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1321 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1322 return MatchOperand_Success;
1324 case AsmToken::Hash: { // Immediates
1325 SMLoc StartLoc = Parser.getTok().getLoc();
1327 const MCExpr *ImmVal = 0;
1330 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1331 return MatchOperand_ParseFail;
1333 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1334 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1335 return MatchOperand_Success;
1337 case AsmToken::LBrac: {
1338 SMLoc Loc = Parser.getTok().getLoc();
1339 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1340 Parser.Lex(); // Eat '['
1342 // There's no comma after a '[', so we can parse the next operand
1344 return ParseOperand(Operands, Mnemonic);
1346 // The following will likely be useful later, but not in very early cases
1347 case AsmToken::LCurly: // SIMD vector list is not parsed here
1348 llvm_unreachable("Don't know how to deal with '{' in operand");
1349 return MatchOperand_ParseFail;
1353 AArch64AsmParser::OperandMatchResultTy
1354 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1355 if (getLexer().is(AsmToken::Colon)) {
1356 AArch64MCExpr::VariantKind RefKind;
1358 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1359 if (ResTy != MatchOperand_Success)
1362 const MCExpr *SubExprVal;
1363 if (getParser().parseExpression(SubExprVal))
1364 return MatchOperand_ParseFail;
1366 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1367 return MatchOperand_Success;
1370 // No weird AArch64MCExpr prefix
1371 return getParser().parseExpression(ExprVal)
1372 ? MatchOperand_ParseFail : MatchOperand_Success;
1375 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1376 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1377 AArch64AsmParser::OperandMatchResultTy
1378 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1379 uint32_t NumLanes) {
1380 SMLoc Loc = Parser.getTok().getLoc();
1382 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1383 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1384 Parser.Lex(); // Eat '['
1386 if (Parser.getTok().isNot(AsmToken::Integer)) {
1387 Error(Parser.getTok().getLoc(), "expected lane number");
1388 return MatchOperand_ParseFail;
1391 if (Parser.getTok().getIntVal() >= NumLanes) {
1392 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1393 return MatchOperand_ParseFail;
1396 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1398 SMLoc S = Parser.getTok().getLoc();
1399 Parser.Lex(); // Eat actual lane
1400 SMLoc E = Parser.getTok().getLoc();
1401 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1404 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1405 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1406 return MatchOperand_ParseFail;
1409 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1410 Parser.Lex(); // Eat ']'
1412 return MatchOperand_Success;
1415 AArch64AsmParser::OperandMatchResultTy
1416 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1417 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1420 if (getLexer().isNot(AsmToken::Identifier)) {
1421 Error(Parser.getTok().getLoc(),
1422 "expected relocation specifier in operand after ':'");
1423 return MatchOperand_ParseFail;
1426 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1427 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1428 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1429 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1430 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1431 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1432 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1433 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1434 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1435 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1436 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1437 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1438 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1439 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1440 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1441 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1442 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1443 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1444 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1445 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1446 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1447 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1448 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1449 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1450 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1451 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1452 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1453 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1454 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1455 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1456 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1457 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1458 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1459 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1460 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1461 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1462 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1463 .Default(AArch64MCExpr::VK_AARCH64_None);
1465 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1466 Error(Parser.getTok().getLoc(),
1467 "expected relocation specifier in operand after ':'");
1468 return MatchOperand_ParseFail;
1470 Parser.Lex(); // Eat identifier
1472 if (getLexer().isNot(AsmToken::Colon)) {
1473 Error(Parser.getTok().getLoc(),
1474 "expected ':' after relocation specifier");
1475 return MatchOperand_ParseFail;
1478 return MatchOperand_Success;
1481 AArch64AsmParser::OperandMatchResultTy
1482 AArch64AsmParser::ParseImmWithLSLOperand(
1483 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1484 // FIXME?: I want to live in a world where immediates must start with
1485 // #. Please don't dash my hopes (well, do if you have a good reason).
1486 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1488 SMLoc S = Parser.getTok().getLoc();
1489 Parser.Lex(); // Eat '#'
1492 if (ParseImmediate(Imm) != MatchOperand_Success)
1493 return MatchOperand_ParseFail;
1494 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1495 SMLoc E = Parser.getTok().getLoc();
1496 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1497 return MatchOperand_Success;
1503 // The optional operand must be "lsl #N" where N is non-negative.
1504 if (Parser.getTok().is(AsmToken::Identifier)
1505 && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1508 if (Parser.getTok().is(AsmToken::Hash)) {
1511 if (Parser.getTok().isNot(AsmToken::Integer)) {
1512 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1513 return MatchOperand_ParseFail;
1518 int64_t ShiftAmount = Parser.getTok().getIntVal();
1520 if (ShiftAmount < 0) {
1521 Error(Parser.getTok().getLoc(), "positive shift amount required");
1522 return MatchOperand_ParseFail;
1524 Parser.Lex(); // Eat the number
1526 SMLoc E = Parser.getTok().getLoc();
1527 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1529 return MatchOperand_Success;
1533 AArch64AsmParser::OperandMatchResultTy
1534 AArch64AsmParser::ParseCondCodeOperand(
1535 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1536 if (Parser.getTok().isNot(AsmToken::Identifier))
1537 return MatchOperand_NoMatch;
1539 StringRef Tok = Parser.getTok().getIdentifier();
1540 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1542 if (CondCode == A64CC::Invalid)
1543 return MatchOperand_NoMatch;
1545 SMLoc S = Parser.getTok().getLoc();
1546 Parser.Lex(); // Eat condition code
1547 SMLoc E = Parser.getTok().getLoc();
1549 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1550 return MatchOperand_Success;
1553 AArch64AsmParser::OperandMatchResultTy
1554 AArch64AsmParser::ParseCRxOperand(
1555 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1556 SMLoc S = Parser.getTok().getLoc();
1557 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1558 Error(S, "Expected cN operand where 0 <= N <= 15");
1559 return MatchOperand_ParseFail;
1562 StringRef Tok = Parser.getTok().getIdentifier();
1563 if (Tok[0] != 'c' && Tok[0] != 'C') {
1564 Error(S, "Expected cN operand where 0 <= N <= 15");
1565 return MatchOperand_ParseFail;
1569 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1570 if (BadNum || CRNum > 15) {
1571 Error(S, "Expected cN operand where 0 <= N <= 15");
1572 return MatchOperand_ParseFail;
1575 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1578 SMLoc E = Parser.getTok().getLoc();
1580 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1581 return MatchOperand_Success;
1584 AArch64AsmParser::OperandMatchResultTy
1585 AArch64AsmParser::ParseFPImmOperand(
1586 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1588 // FIXME?: I want to live in a world where immediates must start with
1589 // #. Please don't dash my hopes (well, do if you have a good reason).
1590 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1592 SMLoc S = Parser.getTok().getLoc();
1593 Parser.Lex(); // Eat '#'
1595 bool Negative = false;
1596 if (Parser.getTok().is(AsmToken::Minus)) {
1598 Parser.Lex(); // Eat '-'
1599 } else if (Parser.getTok().is(AsmToken::Plus)) {
1600 Parser.Lex(); // Eat '+'
1603 if (Parser.getTok().isNot(AsmToken::Real)) {
1604 Error(S, "Expected floating-point immediate");
1605 return MatchOperand_ParseFail;
1608 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1609 if (Negative) RealVal.changeSign();
1610 double DblVal = RealVal.convertToDouble();
1612 Parser.Lex(); // Eat real number
1613 SMLoc E = Parser.getTok().getLoc();
1615 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1616 return MatchOperand_Success;
1619 AArch64AsmParser::OperandMatchResultTy
1620 AArch64AsmParser::ParseFPImm0AndImm0Operand(
1621 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1622 // FIXME?: I want to live in a world where immediates must start with
1623 // #. Please don't dash my hopes (well, do if you have a good reason).
1625 //This function is only used in floating compare with zero instructions to get
1626 //those instructions accept both #0.0 and #0.
1627 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1629 SMLoc S = Parser.getTok().getLoc();
1630 Parser.Lex(); // Eat '#'
1632 APFloat RealVal(0.0);
1633 if (Parser.getTok().is(AsmToken::Real)) {
1634 if(Parser.getTok().getString() != "0.0") {
1635 Error(S, "only #0.0 is acceptable as immediate");
1636 return MatchOperand_ParseFail;
1639 else if (Parser.getTok().is(AsmToken::Integer)) {
1640 if(Parser.getTok().getIntVal() != 0) {
1641 Error(S, "only #0.0 is acceptable as immediate");
1642 return MatchOperand_ParseFail;
1646 Error(S, "only #0.0 is acceptable as immediate");
1647 return MatchOperand_ParseFail;
1650 Parser.Lex(); // Eat real number
1651 SMLoc E = Parser.getTok().getLoc();
1653 Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
1654 return MatchOperand_Success;
1657 // Automatically generated
1658 static unsigned MatchRegisterName(StringRef Name);
1661 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1663 SMLoc &LayoutLoc) const {
1664 const AsmToken &Tok = Parser.getTok();
1666 if (Tok.isNot(AsmToken::Identifier))
1669 std::string LowerReg = Tok.getString().lower();
1670 size_t DotPos = LowerReg.find('.');
1672 bool IsVec128 = false;
1673 SMLoc S = Tok.getLoc();
1674 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1676 if (DotPos == std::string::npos) {
1677 Layout = StringRef();
1679 // Everything afterwards needs to be a literal token, expected to be
1680 // '.2d','.b' etc for vector registers.
1682 // This StringSwitch validates the input and (perhaps more importantly)
1683 // gives us a permanent string to use in the token (a pointer into LowerReg
1684 // would go out of scope when we return).
1685 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1686 StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1688 // See if it's a 128-bit layout first.
1689 Layout = StringSwitch<const char *>(LayoutText)
1690 .Case(".q", ".q").Case(".1q", ".1q")
1691 .Case(".d", ".d").Case(".2d", ".2d")
1692 .Case(".s", ".s").Case(".4s", ".4s")
1693 .Case(".h", ".h").Case(".8h", ".8h")
1694 .Case(".b", ".b").Case(".16b", ".16b")
1697 if (Layout.size() != 0)
1700 Layout = StringSwitch<const char *>(LayoutText)
1708 if (Layout.size() == 0) {
1709 // If we've still not pinned it down the register is malformed.
1714 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1715 if (RegNum == AArch64::NoRegister) {
1716 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1717 .Case("ip0", AArch64::X16)
1718 .Case("ip1", AArch64::X17)
1719 .Case("fp", AArch64::X29)
1720 .Case("lr", AArch64::X30)
1721 .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1722 .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1723 .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1724 .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1725 .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1726 .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1727 .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1728 .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1729 .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1730 .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1731 .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1732 .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1733 .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1734 .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1735 .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1736 .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1737 .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1738 .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1739 .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1740 .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1741 .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1742 .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1743 .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1744 .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1745 .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1746 .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1747 .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1748 .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1749 .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1750 .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1751 .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1752 .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1753 .Default(AArch64::NoRegister);
1755 if (RegNum == AArch64::NoRegister)
1761 AArch64AsmParser::OperandMatchResultTy
1762 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1763 uint32_t &NumLanes) {
1766 SMLoc RegEndLoc, LayoutLoc;
1767 SMLoc S = Parser.getTok().getLoc();
1769 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1770 return MatchOperand_NoMatch;
1772 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1774 if (Layout.size() != 0) {
1775 unsigned long long TmpLanes = 0;
1776 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1777 if (TmpLanes != 0) {
1778 NumLanes = TmpLanes;
1780 // If the number of lanes isn't specified explicitly, a valid instruction
1781 // will have an element specifier and be capable of acting on the entire
1783 switch (Layout.back()) {
1784 default: llvm_unreachable("Invalid layout specifier");
1785 case 'b': NumLanes = 16; break;
1786 case 'h': NumLanes = 8; break;
1787 case 's': NumLanes = 4; break;
1788 case 'd': NumLanes = 2; break;
1789 case 'q': NumLanes = 1; break;
1793 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1797 return MatchOperand_Success;
1801 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1803 // This callback is used for things like DWARF frame directives in
1804 // assembly. They don't care about things like NEON layouts or lanes, they
1805 // just want to be able to produce the DWARF register number.
1806 StringRef LayoutSpec;
1807 SMLoc RegEndLoc, LayoutLoc;
1808 StartLoc = Parser.getTok().getLoc();
1810 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1814 EndLoc = Parser.getTok().getLoc();
1819 AArch64AsmParser::OperandMatchResultTy
1820 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1821 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1822 // Since these operands occur in very limited circumstances, without
1823 // alternatives, we actually signal an error if there is no match. If relaxing
1824 // this, beware of unintended consequences: an immediate will be accepted
1825 // during matching, no matter how it gets into the AArch64Operand.
1826 const AsmToken &Tok = Parser.getTok();
1827 SMLoc S = Tok.getLoc();
1829 if (Tok.is(AsmToken::Identifier)) {
1831 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1834 Error(S, "operand specifier not recognised");
1835 return MatchOperand_ParseFail;
1838 Parser.Lex(); // We're done with the identifier. Eat it
1840 SMLoc E = Parser.getTok().getLoc();
1841 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1842 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1843 return MatchOperand_Success;
1844 } else if (Tok.is(AsmToken::Hash)) {
1847 const MCExpr *ImmVal;
1848 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1849 return MatchOperand_ParseFail;
1851 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1852 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1853 Error(S, "Invalid immediate for instruction");
1854 return MatchOperand_ParseFail;
1857 SMLoc E = Parser.getTok().getLoc();
1858 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1859 return MatchOperand_Success;
1862 Error(S, "unexpected operand for instruction");
1863 return MatchOperand_ParseFail;
1866 AArch64AsmParser::OperandMatchResultTy
1867 AArch64AsmParser::ParseSysRegOperand(
1868 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1869 const AsmToken &Tok = Parser.getTok();
1871 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1872 // kind of string: SPSel is valid for two different forms of MSR with two
1873 // different encodings. There's no collision at the moment, but the potential
1875 if (!Tok.is(AsmToken::Identifier)) {
1876 return MatchOperand_NoMatch;
1879 SMLoc S = Tok.getLoc();
1880 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1881 Parser.Lex(); // Eat identifier
1883 return MatchOperand_Success;
1886 AArch64AsmParser::OperandMatchResultTy
1887 AArch64AsmParser::ParseLSXAddressOperand(
1888 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1889 SMLoc S = Parser.getTok().getLoc();
1892 SMLoc RegEndLoc, LayoutLoc;
1894 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1895 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1896 || Layout.size() != 0) {
1897 // Check Layout.size because we don't want to let "x3.4s" or similar
1899 return MatchOperand_NoMatch;
1901 Parser.Lex(); // Eat register
1903 if (Parser.getTok().is(AsmToken::RBrac)) {
1905 SMLoc E = Parser.getTok().getLoc();
1906 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1907 return MatchOperand_Success;
1910 // Otherwise, only ", #0" is valid
1912 if (Parser.getTok().isNot(AsmToken::Comma)) {
1913 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1914 return MatchOperand_ParseFail;
1916 Parser.Lex(); // Eat ','
1918 if (Parser.getTok().isNot(AsmToken::Hash)) {
1919 Error(Parser.getTok().getLoc(), "expected '#0'");
1920 return MatchOperand_ParseFail;
1922 Parser.Lex(); // Eat '#'
1924 if (Parser.getTok().isNot(AsmToken::Integer)
1925 || Parser.getTok().getIntVal() != 0 ) {
1926 Error(Parser.getTok().getLoc(), "expected '#0'");
1927 return MatchOperand_ParseFail;
1929 Parser.Lex(); // Eat '0'
1931 SMLoc E = Parser.getTok().getLoc();
1932 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1933 return MatchOperand_Success;
1936 AArch64AsmParser::OperandMatchResultTy
1937 AArch64AsmParser::ParseShiftExtend(
1938 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1939 StringRef IDVal = Parser.getTok().getIdentifier();
1940 std::string LowerID = IDVal.lower();
1942 A64SE::ShiftExtSpecifiers Spec =
1943 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1944 .Case("lsl", A64SE::LSL)
1945 .Case("msl", A64SE::MSL)
1946 .Case("lsr", A64SE::LSR)
1947 .Case("asr", A64SE::ASR)
1948 .Case("ror", A64SE::ROR)
1949 .Case("uxtb", A64SE::UXTB)
1950 .Case("uxth", A64SE::UXTH)
1951 .Case("uxtw", A64SE::UXTW)
1952 .Case("uxtx", A64SE::UXTX)
1953 .Case("sxtb", A64SE::SXTB)
1954 .Case("sxth", A64SE::SXTH)
1955 .Case("sxtw", A64SE::SXTW)
1956 .Case("sxtx", A64SE::SXTX)
1957 .Default(A64SE::Invalid);
1959 if (Spec == A64SE::Invalid)
1960 return MatchOperand_NoMatch;
1964 S = Parser.getTok().getLoc();
1967 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1968 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1969 // The shift amount can be omitted for the extending versions, but not real
1971 // add x0, x0, x0, uxtb
1972 // is valid, and equivalent to
1973 // add x0, x0, x0, uxtb #0
1975 if (Parser.getTok().is(AsmToken::Comma) ||
1976 Parser.getTok().is(AsmToken::EndOfStatement) ||
1977 Parser.getTok().is(AsmToken::RBrac)) {
1978 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1980 return MatchOperand_Success;
1984 // Eat # at beginning of immediate
1985 if (!Parser.getTok().is(AsmToken::Hash)) {
1986 Error(Parser.getTok().getLoc(),
1987 "expected #imm after shift specifier");
1988 return MatchOperand_ParseFail;
1992 // Make sure we do actually have a number
1993 if (!Parser.getTok().is(AsmToken::Integer)) {
1994 Error(Parser.getTok().getLoc(),
1995 "expected integer shift amount");
1996 return MatchOperand_ParseFail;
1998 unsigned Amount = Parser.getTok().getIntVal();
2000 E = Parser.getTok().getLoc();
2002 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
2005 return MatchOperand_Success;
2008 /// Try to parse a vector register token, If it is a vector register,
2009 /// the token is eaten and return true. Otherwise return false.
2010 bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
2011 StringRef &Layout, SMLoc &LayoutLoc) {
2012 bool IsVector = true;
2014 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
2016 else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
2017 .contains(RegNum) &&
2018 !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
2021 else if (Layout.size() == 0)
2025 Error(Parser.getTok().getLoc(), "expected vector type register");
2027 Parser.Lex(); // Eat this token.
2032 // A vector list contains 1-4 consecutive registers.
2033 // Now there are two kinds of vector list when number of vector > 1:
2034 // (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
2035 // (2) {Vn.layout - Vm.layout}
2036 // If the layout is like .b/.h/.s/.d, also parse the lane.
2037 AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
2038 SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
2039 if (Parser.getTok().isNot(AsmToken::LCurly)) {
2040 Error(Parser.getTok().getLoc(), "'{' expected");
2041 return MatchOperand_ParseFail;
2043 SMLoc SLoc = Parser.getTok().getLoc();
2044 Parser.Lex(); // Eat '{' token.
2046 unsigned Reg, Count = 1;
2047 StringRef LayoutStr;
2048 SMLoc RegEndLoc, LayoutLoc;
2049 if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2050 return MatchOperand_ParseFail;
2052 if (Parser.getTok().is(AsmToken::Minus)) {
2053 Parser.Lex(); // Eat the minus.
2056 StringRef LayoutStr2;
2057 SMLoc RegEndLoc2, LayoutLoc2;
2058 SMLoc RegLoc2 = Parser.getTok().getLoc();
2060 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2061 return MatchOperand_ParseFail;
2062 unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2064 if (LayoutStr != LayoutStr2) {
2065 Error(LayoutLoc2, "expected the same vector layout");
2066 return MatchOperand_ParseFail;
2068 if (Space == 0 || Space > 3) {
2069 Error(RegLoc2, "invalid number of vectors");
2070 return MatchOperand_ParseFail;
2075 unsigned LastReg = Reg;
2076 while (Parser.getTok().is(AsmToken::Comma)) {
2077 Parser.Lex(); // Eat the comma.
2079 StringRef LayoutStr2;
2080 SMLoc RegEndLoc2, LayoutLoc2;
2081 SMLoc RegLoc2 = Parser.getTok().getLoc();
2083 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2084 return MatchOperand_ParseFail;
2085 unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2086 : (Reg2 + 32 - LastReg);
2089 // The space between two vectors should be 1. And they should have the same layout.
2090 // Total count shouldn't be great than 4
2092 Error(RegLoc2, "invalid space between two vectors");
2093 return MatchOperand_ParseFail;
2095 if (LayoutStr != LayoutStr2) {
2096 Error(LayoutLoc2, "expected the same vector layout");
2097 return MatchOperand_ParseFail;
2100 Error(RegLoc2, "invalid number of vectors");
2101 return MatchOperand_ParseFail;
2108 if (Parser.getTok().isNot(AsmToken::RCurly)) {
2109 Error(Parser.getTok().getLoc(), "'}' expected");
2110 return MatchOperand_ParseFail;
2112 SMLoc ELoc = Parser.getTok().getLoc();
2113 Parser.Lex(); // Eat '}' token.
2115 A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2116 if (Count > 1) { // If count > 1, create vector list using super register.
2117 bool IsVec64 = (Layout < A64Layout::VL_16B);
2118 static unsigned SupRegIDs[3][2] = {
2119 { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2120 { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2121 { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2123 unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2124 unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2125 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2126 Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2127 &AArch64MCRegisterClasses[SupRegID]);
2130 AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2132 if (Parser.getTok().is(AsmToken::LBrac)) {
2133 uint32_t NumLanes = 0;
2135 case A64Layout::VL_B : NumLanes = 16; break;
2136 case A64Layout::VL_H : NumLanes = 8; break;
2137 case A64Layout::VL_S : NumLanes = 4; break;
2138 case A64Layout::VL_D : NumLanes = 2; break;
2140 SMLoc Loc = getLexer().getLoc();
2141 Error(Loc, "expected comma before next operand");
2142 return MatchOperand_ParseFail;
2144 return ParseNEONLane(Operands, NumLanes);
2146 return MatchOperand_Success;
2150 // FIXME: We would really like to be able to tablegen'erate this.
2151 bool AArch64AsmParser::
2152 validateInstruction(MCInst &Inst,
2153 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2154 switch (Inst.getOpcode()) {
2155 case AArch64::BFIwwii:
2156 case AArch64::BFIxxii:
2157 case AArch64::SBFIZwwii:
2158 case AArch64::SBFIZxxii:
2159 case AArch64::UBFIZwwii:
2160 case AArch64::UBFIZxxii: {
2161 unsigned ImmOps = Inst.getNumOperands() - 2;
2162 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2163 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2165 if (ImmR != 0 && ImmS >= ImmR) {
2166 return Error(Operands[4]->getStartLoc(),
2167 "requested insert overflows register");
2171 case AArch64::BFXILwwii:
2172 case AArch64::BFXILxxii:
2173 case AArch64::SBFXwwii:
2174 case AArch64::SBFXxxii:
2175 case AArch64::UBFXwwii:
2176 case AArch64::UBFXxxii: {
2177 unsigned ImmOps = Inst.getNumOperands() - 2;
2178 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2179 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2180 int64_t RegWidth = 0;
2181 switch (Inst.getOpcode()) {
2182 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2185 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2190 if (ImmS >= RegWidth || ImmS < ImmR) {
2191 return Error(Operands[4]->getStartLoc(),
2192 "requested extract overflows register");
2196 case AArch64::ICix: {
2197 int64_t ImmVal = Inst.getOperand(0).getImm();
2198 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2199 if (!A64IC::NeedsRegister(ICOp)) {
2200 return Error(Operands[1]->getStartLoc(),
2201 "specified IC op does not use a register");
2205 case AArch64::ICi: {
2206 int64_t ImmVal = Inst.getOperand(0).getImm();
2207 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2208 if (A64IC::NeedsRegister(ICOp)) {
2209 return Error(Operands[1]->getStartLoc(),
2210 "specified IC op requires a register");
2214 case AArch64::TLBIix: {
2215 int64_t ImmVal = Inst.getOperand(0).getImm();
2216 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2217 if (!A64TLBI::NeedsRegister(TLBIOp)) {
2218 return Error(Operands[1]->getStartLoc(),
2219 "specified TLBI op does not use a register");
2223 case AArch64::TLBIi: {
2224 int64_t ImmVal = Inst.getOperand(0).getImm();
2225 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2226 if (A64TLBI::NeedsRegister(TLBIOp)) {
2227 return Error(Operands[1]->getStartLoc(),
2228 "specified TLBI op requires a register");
2238 // Parses the instruction *together with* all operands, appending each parsed
2239 // operand to the "Operands" list
2240 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2241 StringRef Name, SMLoc NameLoc,
2242 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2243 StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
2244 .Case("beq", "b.eq")
2245 .Case("bne", "b.ne")
2246 .Case("bhs", "b.hs")
2247 .Case("bcs", "b.cs")
2248 .Case("blo", "b.lo")
2249 .Case("bcc", "b.cc")
2250 .Case("bmi", "b.mi")
2251 .Case("bpl", "b.pl")
2252 .Case("bvs", "b.vs")
2253 .Case("bvc", "b.vc")
2254 .Case("bhi", "b.hi")
2255 .Case("bls", "b.ls")
2256 .Case("bge", "b.ge")
2257 .Case("blt", "b.lt")
2258 .Case("bgt", "b.gt")
2259 .Case("ble", "b.le")
2260 .Case("bal", "b.al")
2261 .Case("bnv", "b.nv")
2264 size_t CondCodePos = PatchedName.find('.');
2266 StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
2267 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2269 if (CondCodePos != StringRef::npos) {
2270 // We have a condition code
2271 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2272 StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
2273 A64CC::CondCodes Code;
2275 Code = A64StringToCondCode(CondStr);
2277 if (Code == A64CC::Invalid) {
2278 Error(S, "invalid condition code");
2279 Parser.eatToEndOfStatement();
2283 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2285 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
2286 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2287 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2290 // Now we parse the operands of this instruction
2291 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2292 // Read the first operand.
2293 if (ParseOperand(Operands, Mnemonic)) {
2294 Parser.eatToEndOfStatement();
2298 while (getLexer().is(AsmToken::Comma)) {
2299 Parser.Lex(); // Eat the comma.
2301 // Parse and remember the operand.
2302 if (ParseOperand(Operands, Mnemonic)) {
2303 Parser.eatToEndOfStatement();
2308 // After successfully parsing some operands there are two special cases to
2309 // consider (i.e. notional operands not separated by commas). Both are due
2310 // to memory specifiers:
2311 // + An RBrac will end an address for load/store/prefetch
2312 // + An '!' will indicate a pre-indexed operation.
2314 // It's someone else's responsibility to make sure these tokens are sane
2315 // in the given context!
2316 if (Parser.getTok().is(AsmToken::RBrac)) {
2317 SMLoc Loc = Parser.getTok().getLoc();
2318 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2322 if (Parser.getTok().is(AsmToken::Exclaim)) {
2323 SMLoc Loc = Parser.getTok().getLoc();
2324 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2330 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2331 SMLoc Loc = getLexer().getLoc();
2332 Parser.eatToEndOfStatement();
2333 return Error(Loc, "expected comma before next operand");
2336 // Eat the EndOfStatement
2342 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2343 StringRef IDVal = DirectiveID.getIdentifier();
2344 if (IDVal == ".hword")
2345 return ParseDirectiveWord(2, DirectiveID.getLoc());
2346 else if (IDVal == ".word")
2347 return ParseDirectiveWord(4, DirectiveID.getLoc());
2348 else if (IDVal == ".xword")
2349 return ParseDirectiveWord(8, DirectiveID.getLoc());
2350 else if (IDVal == ".tlsdesccall")
2351 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2356 /// parseDirectiveWord
2357 /// ::= .word [ expression (, expression)* ]
2358 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2359 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2361 const MCExpr *Value;
2362 if (getParser().parseExpression(Value))
2365 getParser().getStreamer().EmitValue(Value, Size);
2367 if (getLexer().is(AsmToken::EndOfStatement))
2370 // FIXME: Improve diagnostic.
2371 if (getLexer().isNot(AsmToken::Comma)) {
2372 Error(L, "unexpected token in directive");
2383 // parseDirectiveTLSDescCall:
2384 // ::= .tlsdesccall symbol
2385 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2387 if (getParser().parseIdentifier(Name)) {
2388 Error(L, "expected symbol after directive");
2392 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2393 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2396 Inst.setOpcode(AArch64::TLSDESCCALL);
2397 Inst.addOperand(MCOperand::CreateExpr(Expr));
2399 getParser().getStreamer().EmitInstruction(Inst, STI);
2404 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2405 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2406 MCStreamer &Out, unsigned &ErrorInfo,
2407 bool MatchingInlineAsm) {
2409 unsigned MatchResult;
2410 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2413 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2414 return Error(IDLoc, "too few operands for instruction");
2416 switch (MatchResult) {
2419 if (validateInstruction(Inst, Operands))
2422 Out.EmitInstruction(Inst, STI);
2424 case Match_MissingFeature:
2425 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2427 case Match_InvalidOperand: {
2428 SMLoc ErrorLoc = IDLoc;
2429 if (ErrorInfo != ~0U) {
2430 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2431 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2434 return Error(ErrorLoc, "invalid operand for instruction");
2436 case Match_MnemonicFail:
2437 return Error(IDLoc, "invalid instruction");
2439 case Match_AddSubRegExtendSmall:
2440 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2441 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2442 case Match_AddSubRegExtendLarge:
2443 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2444 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2445 case Match_AddSubRegShift32:
2446 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2447 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2448 case Match_AddSubRegShift64:
2449 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2450 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2451 case Match_AddSubSecondSource:
2452 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2453 "expected compatible register, symbol or integer in range [0, 4095]");
2454 case Match_CVTFixedPos32:
2455 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2456 "expected integer in range [1, 32]");
2457 case Match_CVTFixedPos64:
2458 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2459 "expected integer in range [1, 64]");
2460 case Match_CondCode:
2461 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2462 "expected AArch64 condition code");
2464 // Any situation which allows a nontrivial floating-point constant also
2465 // allows a register.
2466 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2467 "expected compatible register or floating-point constant");
2469 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2470 "expected floating-point constant #0.0 or invalid register type");
2472 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2473 "expected label or encodable integer pc offset");
2475 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2476 "expected lane specifier '[1]'");
2477 case Match_LoadStoreExtend32_1:
2478 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2479 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2480 case Match_LoadStoreExtend32_2:
2481 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2482 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2483 case Match_LoadStoreExtend32_4:
2484 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2485 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2486 case Match_LoadStoreExtend32_8:
2487 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2488 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2489 case Match_LoadStoreExtend32_16:
2490 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2491 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2492 case Match_LoadStoreExtend64_1:
2493 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2494 "expected 'lsl' or 'sxtx' with optional shift of #0");
2495 case Match_LoadStoreExtend64_2:
2496 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2497 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2498 case Match_LoadStoreExtend64_4:
2499 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2500 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2501 case Match_LoadStoreExtend64_8:
2502 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2503 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2504 case Match_LoadStoreExtend64_16:
2505 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2506 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2507 case Match_LoadStoreSImm7_4:
2508 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2509 "expected integer multiple of 4 in range [-256, 252]");
2510 case Match_LoadStoreSImm7_8:
2511 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2512 "expected integer multiple of 8 in range [-512, 504]");
2513 case Match_LoadStoreSImm7_16:
2514 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2515 "expected integer multiple of 16 in range [-1024, 1008]");
2516 case Match_LoadStoreSImm9:
2517 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2518 "expected integer in range [-256, 255]");
2519 case Match_LoadStoreUImm12_1:
2520 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2521 "expected symbolic reference or integer in range [0, 4095]");
2522 case Match_LoadStoreUImm12_2:
2523 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2524 "expected symbolic reference or integer in range [0, 8190]");
2525 case Match_LoadStoreUImm12_4:
2526 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2527 "expected symbolic reference or integer in range [0, 16380]");
2528 case Match_LoadStoreUImm12_8:
2529 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2530 "expected symbolic reference or integer in range [0, 32760]");
2531 case Match_LoadStoreUImm12_16:
2532 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2533 "expected symbolic reference or integer in range [0, 65520]");
2534 case Match_LogicalSecondSource:
2535 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2536 "expected compatible register or logical immediate");
2537 case Match_MOVWUImm16:
2538 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2539 "expected relocated symbol or integer in range [0, 65535]");
2541 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2542 "expected readable system register");
2544 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2545 "expected writable system register or pstate");
2546 case Match_NamedImm_at:
2547 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2548 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2549 case Match_NamedImm_dbarrier:
2550 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2551 "expected integer in range [0, 15] or symbolic barrier operand");
2552 case Match_NamedImm_dc:
2553 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2554 "expected symbolic 'dc' operand");
2555 case Match_NamedImm_ic:
2556 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2557 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2558 case Match_NamedImm_isb:
2559 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2560 "expected integer in range [0, 15] or 'sy'");
2561 case Match_NamedImm_prefetch:
2562 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2563 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2564 case Match_NamedImm_tlbi:
2565 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2566 "expected translation buffer invalidation operand");
2568 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2569 "expected integer in range [0, 65535]");
2571 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2572 "expected integer in range [0, 7]");
2574 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2575 "expected integer in range [0, 15]");
2577 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2578 "expected integer in range [0, 31]");
2580 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2581 "expected integer in range [0, 63]");
2583 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2584 "expected integer in range [0, 127]");
2586 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2587 "expected integer in range [<lsb>, 31]");
2589 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2590 "expected integer in range [<lsb>, 63]");
2592 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2593 "expected integer in range [1, 8]");
2594 case Match_ShrImm16:
2595 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2596 "expected integer in range [1, 16]");
2597 case Match_ShrImm32:
2598 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2599 "expected integer in range [1, 32]");
2600 case Match_ShrImm64:
2601 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2602 "expected integer in range [1, 64]");
2604 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2605 "expected integer in range [0, 7]");
2606 case Match_ShlImm16:
2607 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2608 "expected integer in range [0, 15]");
2609 case Match_ShlImm32:
2610 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2611 "expected integer in range [0, 31]");
2612 case Match_ShlImm64:
2613 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2614 "expected integer in range [0, 63]");
2617 llvm_unreachable("Implement any new match types added!");
2621 void AArch64Operand::print(raw_ostream &OS) const {
2624 OS << "<CondCode: " << CondCode.Code << ">";
2627 OS << "<fpimm: " << FPImm.Val << ">";
2630 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2631 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2634 getImm()->print(OS);
2637 OS << "<register " << getReg() << '>';
2640 OS << '\'' << getToken() << '\'';
2643 OS << "<shift: type=" << ShiftExtend.ShiftType
2644 << ", amount=" << ShiftExtend.Amount << ">";
2647 StringRef Name(SysReg.Data, SysReg.Length);
2648 OS << "<sysreg: " << Name << '>';
2652 llvm_unreachable("No idea how to print this kind of operand");
2657 void AArch64Operand::dump() const {
2662 /// Force static initialization.
2663 extern "C" void LLVMInitializeAArch64AsmParser() {
2664 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
2665 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
2668 #define GET_REGISTER_MATCHER
2669 #define GET_MATCHER_IMPLEMENTATION
2670 #include "AArch64GenAsmMatcher.inc"