1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Support/TargetRegistry.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
58 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
59 MCAsmParserExtension::Initialize(_Parser);
61 // Initialize the set of available features.
62 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
65 // These are the public interface of the MCTargetAsmParser
66 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
67 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
69 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
71 bool ParseDirective(AsmToken DirectiveID);
72 bool ParseDirectiveTLSDescCall(SMLoc L);
73 bool ParseDirectiveWord(unsigned Size, SMLoc L);
75 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
77 MCStreamer&Out, unsigned &ErrorInfo,
78 bool MatchingInlineAsm);
80 // The rest of the sub-parsers have more freedom over interface: they return
81 // an OperandMatchResultTy because it's less ambiguous than true/false or
82 // -1/0/1 even if it is more verbose
84 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
87 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
89 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
92 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
96 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
100 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
103 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
106 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
109 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111 template<typename SomeNamedImmMapper> OperandMatchResultTy
112 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
113 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
117 ParseNamedImmOperand(const NamedImmMapper &Mapper,
118 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
121 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
124 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
127 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
129 bool validateInstruction(MCInst &Inst,
130 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
132 /// Scan the next token (which had better be an identifier) and determine
133 /// whether it represents a general-purpose or vector register. It returns
134 /// true if an identifier was found and populates its reference arguments. It
135 /// does not consume the token.
137 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
138 SMLoc &LayoutLoc) const;
146 /// Instances of this class represent a parsed AArch64 machine instruction.
147 class AArch64Operand : public MCParsedAsmOperand {
150 k_ImmWithLSL, // #uimm {, LSL #amt }
151 k_CondCode, // eq/ne/...
152 k_FPImmediate, // Limited-precision floating-point imm
153 k_Immediate, // Including expressions referencing symbols
156 k_SysReg, // The register operand of MRS and MSR instructions
157 k_Token, // The mnemonic; other raw tokens the auto-generated
158 k_WrappedRegister // Load/store exclusive permit a wrapped register.
161 SMLoc StartLoc, EndLoc;
163 struct ImmWithLSLOp {
165 unsigned ShiftAmount;
170 A64CC::CondCodes Code;
185 struct ShiftExtendOp {
186 A64SE::ShiftExtSpecifiers ShiftType;
202 struct ImmWithLSLOp ImmWithLSL;
203 struct CondCodeOp CondCode;
204 struct FPImmOp FPImm;
207 struct ShiftExtendOp ShiftExtend;
208 struct SysRegOp SysReg;
212 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
213 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
216 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
219 SMLoc getStartLoc() const { return StartLoc; }
220 SMLoc getEndLoc() const { return EndLoc; }
221 void print(raw_ostream&) const;
224 StringRef getToken() const {
225 assert(Kind == k_Token && "Invalid access!");
226 return StringRef(Tok.Data, Tok.Length);
229 unsigned getReg() const {
230 assert((Kind == k_Register || Kind == k_WrappedRegister)
231 && "Invalid access!");
235 const MCExpr *getImm() const {
236 assert(Kind == k_Immediate && "Invalid access!");
240 A64CC::CondCodes getCondCode() const {
241 assert(Kind == k_CondCode && "Invalid access!");
242 return CondCode.Code;
245 static bool isNonConstantExpr(const MCExpr *E,
246 AArch64MCExpr::VariantKind &Variant) {
247 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
248 Variant = A64E->getKind();
250 } else if (!isa<MCConstantExpr>(E)) {
251 Variant = AArch64MCExpr::VK_AARCH64_None;
258 bool isCondCode() const { return Kind == k_CondCode; }
259 bool isToken() const { return Kind == k_Token; }
260 bool isReg() const { return Kind == k_Register; }
261 bool isImm() const { return Kind == k_Immediate; }
262 bool isMem() const { return false; }
263 bool isFPImm() const { return Kind == k_FPImmediate; }
264 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
265 bool isSysReg() const { return Kind == k_SysReg; }
266 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
267 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
269 bool isAddSubImmLSL0() const {
270 if (!isImmWithLSL()) return false;
271 if (ImmWithLSL.ShiftAmount != 0) return false;
273 AArch64MCExpr::VariantKind Variant;
274 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
275 return Variant == AArch64MCExpr::VK_AARCH64_LO12
276 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
277 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
278 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
279 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
280 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
283 // Otherwise it should be a real immediate in range:
284 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
285 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
288 bool isAddSubImmLSL12() const {
289 if (!isImmWithLSL()) return false;
290 if (ImmWithLSL.ShiftAmount != 12) return false;
292 AArch64MCExpr::VariantKind Variant;
293 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
294 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
295 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
298 // Otherwise it should be a real immediate in range:
299 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
300 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
303 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
304 if (!isShiftOrExtend()) return false;
306 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
307 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
310 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
313 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
316 bool isAdrpLabel() const {
317 if (!isImm()) return false;
319 AArch64MCExpr::VariantKind Variant;
320 if (isNonConstantExpr(getImm(), Variant)) {
321 return Variant == AArch64MCExpr::VK_AARCH64_None
322 || Variant == AArch64MCExpr::VK_AARCH64_GOT
323 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
324 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
327 return isLabel<21, 4096>();
330 template<unsigned RegWidth> bool isBitfieldWidth() const {
331 if (!isImm()) return false;
333 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
334 if (!CE) return false;
336 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
339 template<int RegWidth>
340 bool isCVTFixedPos() const {
341 if (!isImm()) return false;
343 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
344 if (!CE) return false;
346 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
349 bool isFMOVImm() const {
350 if (!isFPImm()) return false;
352 APFloat RealVal(FPImm.Val);
354 return A64Imms::isFPImm(RealVal, ImmVal);
357 bool isFPZero() const {
358 if (!isFPImm()) return false;
360 APFloat RealVal(FPImm.Val);
361 return RealVal.isPosZero();
364 template<unsigned field_width, unsigned scale>
365 bool isLabel() const {
366 if (!isImm()) return false;
368 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
370 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
371 int64_t Val = CE->getValue();
372 int64_t Min = - (scale * (1LL << (field_width - 1)));
373 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
374 return (Val % scale) == 0 && Val >= Min && Val <= Max;
377 // N.b. this disallows explicit relocation specifications via an
378 // AArch64MCExpr. Users needing that behaviour
382 bool isLane1() const {
383 if (!isImm()) return false;
385 // Because it's come through custom assembly parsing, it must always be a
386 // constant expression.
387 return cast<MCConstantExpr>(getImm())->getValue() == 1;
390 bool isLoadLitLabel() const {
391 if (!isImm()) return false;
393 AArch64MCExpr::VariantKind Variant;
394 if (isNonConstantExpr(getImm(), Variant)) {
395 return Variant == AArch64MCExpr::VK_AARCH64_None
396 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
399 return isLabel<19, 4>();
402 template<unsigned RegWidth> bool isLogicalImm() const {
403 if (!isImm()) return false;
405 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
406 if (!CE) return false;
409 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
412 template<unsigned RegWidth> bool isLogicalImmMOV() const {
413 if (!isLogicalImm<RegWidth>()) return false;
415 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
417 // The move alias for ORR is only valid if the immediate cannot be
418 // represented with a move (immediate) instruction; they take priority.
420 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
421 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
424 template<int MemSize>
425 bool isOffsetUImm12() const {
426 if (!isImm()) return false;
428 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
430 // Assume they know what they're doing for now if they've given us a
431 // non-constant expression. In principle we could check for ridiculous
432 // things that can't possibly work or relocations that would almost
433 // certainly break resulting code.
437 int64_t Val = CE->getValue();
439 // Must be a multiple of the access size in bytes.
440 if ((Val & (MemSize - 1)) != 0) return false;
442 // Must be 12-bit unsigned
443 return Val >= 0 && Val <= 0xfff * MemSize;
446 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
447 bool isShift() const {
448 if (!isShiftOrExtend()) return false;
450 if (ShiftExtend.ShiftType != SHKind)
453 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
456 bool isMOVN32Imm() const {
457 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
458 AArch64MCExpr::VK_AARCH64_SABS_G0,
459 AArch64MCExpr::VK_AARCH64_SABS_G1,
460 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
461 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
462 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
463 AArch64MCExpr::VK_AARCH64_TPREL_G1,
464 AArch64MCExpr::VK_AARCH64_TPREL_G0,
466 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
468 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
471 bool isMOVN64Imm() const {
472 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
473 AArch64MCExpr::VK_AARCH64_SABS_G0,
474 AArch64MCExpr::VK_AARCH64_SABS_G1,
475 AArch64MCExpr::VK_AARCH64_SABS_G2,
476 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
477 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
478 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
479 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
480 AArch64MCExpr::VK_AARCH64_TPREL_G2,
481 AArch64MCExpr::VK_AARCH64_TPREL_G1,
482 AArch64MCExpr::VK_AARCH64_TPREL_G0,
484 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
486 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
490 bool isMOVZ32Imm() const {
491 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
492 AArch64MCExpr::VK_AARCH64_ABS_G0,
493 AArch64MCExpr::VK_AARCH64_ABS_G1,
494 AArch64MCExpr::VK_AARCH64_SABS_G0,
495 AArch64MCExpr::VK_AARCH64_SABS_G1,
496 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
497 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
498 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
499 AArch64MCExpr::VK_AARCH64_TPREL_G1,
500 AArch64MCExpr::VK_AARCH64_TPREL_G0,
502 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
504 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
507 bool isMOVZ64Imm() const {
508 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
509 AArch64MCExpr::VK_AARCH64_ABS_G0,
510 AArch64MCExpr::VK_AARCH64_ABS_G1,
511 AArch64MCExpr::VK_AARCH64_ABS_G2,
512 AArch64MCExpr::VK_AARCH64_ABS_G3,
513 AArch64MCExpr::VK_AARCH64_SABS_G0,
514 AArch64MCExpr::VK_AARCH64_SABS_G1,
515 AArch64MCExpr::VK_AARCH64_SABS_G2,
516 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
517 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
518 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
519 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
520 AArch64MCExpr::VK_AARCH64_TPREL_G2,
521 AArch64MCExpr::VK_AARCH64_TPREL_G1,
522 AArch64MCExpr::VK_AARCH64_TPREL_G0,
524 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
526 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
529 bool isMOVK32Imm() const {
530 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
531 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
532 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
533 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
534 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
535 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
536 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
537 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
539 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
541 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
544 bool isMOVK64Imm() const {
545 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
546 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
547 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
548 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
549 AArch64MCExpr::VK_AARCH64_ABS_G3,
550 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
551 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
552 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
553 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
554 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
556 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
558 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
561 bool isMoveWideImm(unsigned RegWidth,
562 const AArch64MCExpr::VariantKind *PermittedModifiers,
563 unsigned NumModifiers) const {
564 if (!isImmWithLSL()) return false;
566 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
567 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
569 AArch64MCExpr::VariantKind Modifier;
570 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
571 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
572 if (!ImmWithLSL.ImplicitAmount) return false;
574 for (unsigned i = 0; i < NumModifiers; ++i)
575 if (PermittedModifiers[i] == Modifier) return true;
580 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
581 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
584 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
585 bool isMoveWideMovAlias() const {
586 if (!isImm()) return false;
588 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
589 if (!CE) return false;
592 uint64_t Value = CE->getValue();
594 // If this is a 32-bit instruction then all bits above 32 should be the
595 // same: either of these is fine because signed/unsigned values should be
597 if (RegWidth == 32) {
598 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
601 Value &= 0xffffffffULL;
604 return isValidImm(RegWidth, Value, UImm16, Shift);
607 bool isMSRWithReg() const {
608 if (!isSysReg()) return false;
610 bool IsKnownRegister;
611 StringRef Name(SysReg.Data, SysReg.Length);
612 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
614 return IsKnownRegister;
617 bool isMSRPState() const {
618 if (!isSysReg()) return false;
620 bool IsKnownRegister;
621 StringRef Name(SysReg.Data, SysReg.Length);
622 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
624 return IsKnownRegister;
628 if (!isSysReg()) return false;
630 // First check against specific MSR-only (write-only) registers
631 bool IsKnownRegister;
632 StringRef Name(SysReg.Data, SysReg.Length);
633 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
635 return IsKnownRegister;
638 bool isPRFM() const {
639 if (!isImm()) return false;
641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
646 return CE->getValue() >= 0 && CE->getValue() <= 31;
649 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
650 if (!isShiftOrExtend()) return false;
652 if (ShiftExtend.ShiftType != SHKind)
655 return ShiftExtend.Amount <= 4;
658 bool isRegExtendLSL() const {
659 if (!isShiftOrExtend()) return false;
661 if (ShiftExtend.ShiftType != A64SE::LSL)
664 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
667 // if 0 < value <= w, return true
668 bool isShrFixedWidth(int w) const {
671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674 int64_t Value = CE->getValue();
675 return Value > 0 && Value <= w;
678 bool isShrImm8() const { return isShrFixedWidth(8); }
680 bool isShrImm16() const { return isShrFixedWidth(16); }
682 bool isShrImm32() const { return isShrFixedWidth(32); }
684 bool isShrImm64() const { return isShrFixedWidth(64); }
686 bool isNeonMovImmShiftLSL() const {
687 if (!isShiftOrExtend())
690 if (ShiftExtend.ShiftType != A64SE::LSL)
693 // Valid shift amount is 0, 8, 16 and 24.
694 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
697 bool isNeonMovImmShiftLSLH() const {
698 if (!isShiftOrExtend())
701 if (ShiftExtend.ShiftType != A64SE::LSL)
704 // Valid shift amount is 0 and 8.
705 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
708 bool isNeonMovImmShiftMSL() const {
709 if (!isShiftOrExtend())
712 if (ShiftExtend.ShiftType != A64SE::MSL)
715 // Valid shift amount is 8 and 16.
716 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
719 template <int MemSize> bool isSImm7Scaled() const {
723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
724 if (!CE) return false;
726 int64_t Val = CE->getValue();
727 if (Val % MemSize != 0) return false;
731 return Val >= -64 && Val < 64;
734 template<int BitWidth>
735 bool isSImm() const {
736 if (!isImm()) return false;
738 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
739 if (!CE) return false;
741 return CE->getValue() >= -(1LL << (BitWidth - 1))
742 && CE->getValue() < (1LL << (BitWidth - 1));
745 template<int bitWidth>
746 bool isUImm() const {
747 if (!isImm()) return false;
749 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750 if (!CE) return false;
752 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
755 bool isUImm() const {
756 if (!isImm()) return false;
758 return isa<MCConstantExpr>(getImm());
761 bool isNeonUImm64Mask() const {
765 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769 uint64_t Value = CE->getValue();
771 // i64 value with each byte being either 0x00 or 0xff.
772 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
773 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
778 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
779 unsigned ShiftAmount,
782 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
783 Op->ImmWithLSL.Val = Val;
784 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
785 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
789 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
791 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
792 Op->CondCode.Code = Code;
796 static AArch64Operand *CreateFPImm(double Val,
798 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
803 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
804 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
809 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
810 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
811 Op->Reg.RegNum = RegNum;
815 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
816 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
817 Op->Reg.RegNum = RegNum;
821 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
825 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
826 Op->ShiftExtend.ShiftType = ShiftTyp;
827 Op->ShiftExtend.Amount = Amount;
828 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
832 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
833 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
834 Op->Tok.Data = Str.data();
835 Op->Tok.Length = Str.size();
839 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
840 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
841 Op->Tok.Data = Str.data();
842 Op->Tok.Length = Str.size();
847 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
848 // Add as immediates when possible.
849 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
850 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
852 Inst.addOperand(MCOperand::CreateExpr(Expr));
855 template<unsigned RegWidth>
856 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
857 assert(N == 1 && "Invalid number of operands!");
858 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
859 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
860 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
863 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
864 assert(N == 1 && "Invalid number of operands!");
865 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
866 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
869 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
870 assert(N == 1 && "Invalid number of operands!");
872 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
873 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
875 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
878 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
879 assert(N == 1 && "Invalid number of operands!");
880 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
883 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
884 assert(N == 1 && "Invalid number of operands!");
886 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
887 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
890 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
891 assert(N == 1 && "Invalid number of operands!");
893 APFloat RealVal(FPImm.Val);
895 A64Imms::isFPImm(RealVal, ImmVal);
897 Inst.addOperand(MCOperand::CreateImm(ImmVal));
900 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
901 assert(N == 1 && "Invalid number of operands");
902 Inst.addOperand(MCOperand::CreateImm(0));
905 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
906 assert(N == 1 && "Invalid number of operands!");
907 unsigned Encoded = A64InvertCondCode(getCondCode());
908 Inst.addOperand(MCOperand::CreateImm(Encoded));
911 void addRegOperands(MCInst &Inst, unsigned N) const {
912 assert(N == 1 && "Invalid number of operands!");
913 Inst.addOperand(MCOperand::CreateReg(getReg()));
916 void addImmOperands(MCInst &Inst, unsigned N) const {
917 assert(N == 1 && "Invalid number of operands!");
918 addExpr(Inst, getImm());
921 template<int MemSize>
922 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
923 assert(N == 1 && "Invalid number of operands!");
925 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
926 uint64_t Val = CE->getValue() / MemSize;
927 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
930 template<int BitWidth>
931 void addSImmOperands(MCInst &Inst, unsigned N) const {
932 assert(N == 1 && "Invalid number of operands!");
934 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
935 uint64_t Val = CE->getValue();
936 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
939 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
940 assert (N == 1 && "Invalid number of operands!");
942 addExpr(Inst, ImmWithLSL.Val);
945 template<unsigned field_width, unsigned scale>
946 void addLabelOperands(MCInst &Inst, unsigned N) const {
947 assert(N == 1 && "Invalid number of operands!");
949 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
952 addExpr(Inst, Imm.Val);
956 int64_t Val = CE->getValue();
957 assert(Val % scale == 0 && "Unaligned immediate in instruction");
960 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
963 template<int MemSize>
964 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
965 assert(N == 1 && "Invalid number of operands!");
967 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
968 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
970 Inst.addOperand(MCOperand::CreateExpr(getImm()));
974 template<unsigned RegWidth>
975 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
976 assert(N == 1 && "Invalid number of operands");
977 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
980 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
982 Inst.addOperand(MCOperand::CreateImm(Bits));
985 void addMRSOperands(MCInst &Inst, unsigned N) const {
986 assert(N == 1 && "Invalid number of operands!");
989 StringRef Name(SysReg.Data, SysReg.Length);
990 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
992 Inst.addOperand(MCOperand::CreateImm(Bits));
995 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
996 assert(N == 1 && "Invalid number of operands!");
999 StringRef Name(SysReg.Data, SysReg.Length);
1000 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1002 Inst.addOperand(MCOperand::CreateImm(Bits));
1005 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1006 assert(N == 1 && "Invalid number of operands!");
1009 StringRef Name(SysReg.Data, SysReg.Length);
1010 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1012 Inst.addOperand(MCOperand::CreateImm(Bits));
1015 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1016 assert(N == 2 && "Invalid number of operands!");
1018 addExpr(Inst, ImmWithLSL.Val);
1020 AArch64MCExpr::VariantKind Variant;
1021 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1022 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1026 // We know it's relocated
1028 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1029 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1030 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1031 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1032 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1033 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1034 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1035 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1036 Inst.addOperand(MCOperand::CreateImm(0));
1038 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1039 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1040 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1041 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1042 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1043 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1044 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1045 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1046 Inst.addOperand(MCOperand::CreateImm(1));
1048 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1049 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1050 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1051 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1052 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1053 Inst.addOperand(MCOperand::CreateImm(2));
1055 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1056 Inst.addOperand(MCOperand::CreateImm(3));
1058 default: llvm_unreachable("Inappropriate move wide relocation");
1062 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1063 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1064 assert(N == 2 && "Invalid number of operands!");
1067 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1068 uint64_t Value = CE->getValue();
1070 if (RegWidth == 32) {
1071 Value &= 0xffffffffULL;
1074 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1076 assert(Valid && "Invalid immediates should have been weeded out by now");
1078 Inst.addOperand(MCOperand::CreateImm(UImm16));
1079 Inst.addOperand(MCOperand::CreateImm(Shift));
1082 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1083 assert(N == 1 && "Invalid number of operands!");
1085 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1086 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1087 && "PRFM operand should be 5-bits");
1089 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1092 // For Add-sub (extended register) operands.
1093 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1094 assert(N == 1 && "Invalid number of operands!");
1096 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1099 // For Vector Immediates shifted imm operands.
1100 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1101 assert(N == 1 && "Invalid number of operands!");
1103 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1104 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1106 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1107 int64_t Imm = ShiftExtend.Amount / 8;
1108 Inst.addOperand(MCOperand::CreateImm(Imm));
1111 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1112 assert(N == 1 && "Invalid number of operands!");
1114 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1115 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1117 // Encode LSLH shift amount 0, 8 as 0, 1.
1118 int64_t Imm = ShiftExtend.Amount / 8;
1119 Inst.addOperand(MCOperand::CreateImm(Imm));
1122 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1125 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1126 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1128 // Encode MSL shift amount 8, 16 as 0, 1.
1129 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1130 Inst.addOperand(MCOperand::CreateImm(Imm));
1133 // For the extend in load-store (register offset) instructions.
1134 template<unsigned MemSize>
1135 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1136 addAddrRegExtendOperands(Inst, N, MemSize);
1139 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1140 unsigned MemSize) const {
1141 assert(N == 1 && "Invalid number of operands!");
1143 // First bit of Option is set in instruction classes, the high two bits are
1145 unsigned OptionHi = 0;
1146 switch (ShiftExtend.ShiftType) {
1156 llvm_unreachable("Invalid extend type for register offset");
1160 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1162 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1165 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1167 void addShiftOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1173 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1176 // A bit from each byte in the constant forms the encoded immediate
1177 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1178 uint64_t Value = CE->getValue();
1181 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1182 Imm |= (Value & 1) << i;
1184 Inst.addOperand(MCOperand::CreateImm(Imm));
1188 } // end anonymous namespace.
1190 AArch64AsmParser::OperandMatchResultTy
1191 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1192 StringRef Mnemonic) {
1194 // See if the operand has a custom parser
1195 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1197 // It could either succeed, fail or just not care.
1198 if (ResTy != MatchOperand_NoMatch)
1201 switch (getLexer().getKind()) {
1203 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1204 return MatchOperand_ParseFail;
1205 case AsmToken::Identifier: {
1206 // It might be in the LSL/UXTB family ...
1207 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1209 // We can only continue if no tokens were eaten.
1210 if (GotShift != MatchOperand_NoMatch)
1213 // ... or it might be a register ...
1214 uint32_t NumLanes = 0;
1215 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1216 assert(GotReg != MatchOperand_ParseFail
1217 && "register parsing shouldn't partially succeed");
1219 if (GotReg == MatchOperand_Success) {
1220 if (Parser.getTok().is(AsmToken::LBrac))
1221 return ParseNEONLane(Operands, NumLanes);
1223 return MatchOperand_Success;
1226 // ... or it might be a symbolish thing
1229 case AsmToken::LParen: // E.g. (strcmp-4)
1230 case AsmToken::Integer: // 1f, 2b labels
1231 case AsmToken::String: // quoted labels
1232 case AsmToken::Dot: // . is Current location
1233 case AsmToken::Dollar: // $ is PC
1234 case AsmToken::Colon: {
1235 SMLoc StartLoc = Parser.getTok().getLoc();
1237 const MCExpr *ImmVal = 0;
1239 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1240 return MatchOperand_ParseFail;
1242 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1243 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1244 return MatchOperand_Success;
1246 case AsmToken::Hash: { // Immediates
1247 SMLoc StartLoc = Parser.getTok().getLoc();
1249 const MCExpr *ImmVal = 0;
1252 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1253 return MatchOperand_ParseFail;
1255 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1256 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1257 return MatchOperand_Success;
1259 case AsmToken::LBrac: {
1260 SMLoc Loc = Parser.getTok().getLoc();
1261 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1262 Parser.Lex(); // Eat '['
1264 // There's no comma after a '[', so we can parse the next operand
1266 return ParseOperand(Operands, Mnemonic);
1268 // The following will likely be useful later, but not in very early cases
1269 case AsmToken::LCurly: // Weird SIMD lists
1270 llvm_unreachable("Don't know how to deal with '{' in operand");
1271 return MatchOperand_ParseFail;
1275 AArch64AsmParser::OperandMatchResultTy
1276 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1277 if (getLexer().is(AsmToken::Colon)) {
1278 AArch64MCExpr::VariantKind RefKind;
1280 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1281 if (ResTy != MatchOperand_Success)
1284 const MCExpr *SubExprVal;
1285 if (getParser().parseExpression(SubExprVal))
1286 return MatchOperand_ParseFail;
1288 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1289 return MatchOperand_Success;
1292 // No weird AArch64MCExpr prefix
1293 return getParser().parseExpression(ExprVal)
1294 ? MatchOperand_ParseFail : MatchOperand_Success;
1297 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1298 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1299 AArch64AsmParser::OperandMatchResultTy
1300 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1301 uint32_t NumLanes) {
1302 SMLoc Loc = Parser.getTok().getLoc();
1304 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1305 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1306 Parser.Lex(); // Eat '['
1308 if (Parser.getTok().isNot(AsmToken::Integer)) {
1309 Error(Parser.getTok().getLoc(), "expected lane number");
1310 return MatchOperand_ParseFail;
1313 if (Parser.getTok().getIntVal() >= NumLanes) {
1314 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1315 return MatchOperand_ParseFail;
1318 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1320 SMLoc S = Parser.getTok().getLoc();
1321 Parser.Lex(); // Eat actual lane
1322 SMLoc E = Parser.getTok().getLoc();
1323 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1326 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1327 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1328 return MatchOperand_ParseFail;
1331 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1332 Parser.Lex(); // Eat ']'
1334 return MatchOperand_Success;
1337 AArch64AsmParser::OperandMatchResultTy
1338 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1339 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1342 if (getLexer().isNot(AsmToken::Identifier)) {
1343 Error(Parser.getTok().getLoc(),
1344 "expected relocation specifier in operand after ':'");
1345 return MatchOperand_ParseFail;
1348 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1349 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1350 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1351 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1352 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1353 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1354 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1355 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1356 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1357 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1358 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1359 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1360 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1361 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1362 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1363 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1364 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1365 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1366 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1367 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1368 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1369 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1370 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1371 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1372 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1373 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1374 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1375 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1376 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1377 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1378 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1379 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1380 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1381 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1382 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1383 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1384 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1385 .Default(AArch64MCExpr::VK_AARCH64_None);
1387 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1388 Error(Parser.getTok().getLoc(),
1389 "expected relocation specifier in operand after ':'");
1390 return MatchOperand_ParseFail;
1392 Parser.Lex(); // Eat identifier
1394 if (getLexer().isNot(AsmToken::Colon)) {
1395 Error(Parser.getTok().getLoc(),
1396 "expected ':' after relocation specifier");
1397 return MatchOperand_ParseFail;
1400 return MatchOperand_Success;
1403 AArch64AsmParser::OperandMatchResultTy
1404 AArch64AsmParser::ParseImmWithLSLOperand(
1405 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1406 // FIXME?: I want to live in a world where immediates must start with
1407 // #. Please don't dash my hopes (well, do if you have a good reason).
1408 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1410 SMLoc S = Parser.getTok().getLoc();
1411 Parser.Lex(); // Eat '#'
1414 if (ParseImmediate(Imm) != MatchOperand_Success)
1415 return MatchOperand_ParseFail;
1416 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1417 SMLoc E = Parser.getTok().getLoc();
1418 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1419 return MatchOperand_Success;
1425 // The optional operand must be "lsl #N" where N is non-negative.
1426 if (Parser.getTok().is(AsmToken::Identifier)
1427 && Parser.getTok().getIdentifier().lower() == "lsl") {
1430 if (Parser.getTok().is(AsmToken::Hash)) {
1433 if (Parser.getTok().isNot(AsmToken::Integer)) {
1434 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1435 return MatchOperand_ParseFail;
1440 int64_t ShiftAmount = Parser.getTok().getIntVal();
1442 if (ShiftAmount < 0) {
1443 Error(Parser.getTok().getLoc(), "positive shift amount required");
1444 return MatchOperand_ParseFail;
1446 Parser.Lex(); // Eat the number
1448 SMLoc E = Parser.getTok().getLoc();
1449 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1451 return MatchOperand_Success;
1455 AArch64AsmParser::OperandMatchResultTy
1456 AArch64AsmParser::ParseCondCodeOperand(
1457 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1458 if (Parser.getTok().isNot(AsmToken::Identifier))
1459 return MatchOperand_NoMatch;
1461 StringRef Tok = Parser.getTok().getIdentifier();
1462 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1464 if (CondCode == A64CC::Invalid)
1465 return MatchOperand_NoMatch;
1467 SMLoc S = Parser.getTok().getLoc();
1468 Parser.Lex(); // Eat condition code
1469 SMLoc E = Parser.getTok().getLoc();
1471 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1472 return MatchOperand_Success;
1475 AArch64AsmParser::OperandMatchResultTy
1476 AArch64AsmParser::ParseCRxOperand(
1477 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1478 SMLoc S = Parser.getTok().getLoc();
1479 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1480 Error(S, "Expected cN operand where 0 <= N <= 15");
1481 return MatchOperand_ParseFail;
1484 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1485 StringRef Tok(LowerTok);
1486 if (Tok[0] != 'c') {
1487 Error(S, "Expected cN operand where 0 <= N <= 15");
1488 return MatchOperand_ParseFail;
1492 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1493 if (BadNum || CRNum > 15) {
1494 Error(S, "Expected cN operand where 0 <= N <= 15");
1495 return MatchOperand_ParseFail;
1498 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1501 SMLoc E = Parser.getTok().getLoc();
1503 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1504 return MatchOperand_Success;
1507 AArch64AsmParser::OperandMatchResultTy
1508 AArch64AsmParser::ParseFPImmOperand(
1509 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1511 // FIXME?: I want to live in a world where immediates must start with
1512 // #. Please don't dash my hopes (well, do if you have a good reason).
1513 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1515 SMLoc S = Parser.getTok().getLoc();
1516 Parser.Lex(); // Eat '#'
1518 bool Negative = false;
1519 if (Parser.getTok().is(AsmToken::Minus)) {
1521 Parser.Lex(); // Eat '-'
1522 } else if (Parser.getTok().is(AsmToken::Plus)) {
1523 Parser.Lex(); // Eat '+'
1526 if (Parser.getTok().isNot(AsmToken::Real)) {
1527 Error(S, "Expected floating-point immediate");
1528 return MatchOperand_ParseFail;
1531 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1532 if (Negative) RealVal.changeSign();
1533 double DblVal = RealVal.convertToDouble();
1535 Parser.Lex(); // Eat real number
1536 SMLoc E = Parser.getTok().getLoc();
1538 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1539 return MatchOperand_Success;
1543 // Automatically generated
1544 static unsigned MatchRegisterName(StringRef Name);
1547 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1549 SMLoc &LayoutLoc) const {
1550 const AsmToken &Tok = Parser.getTok();
1552 if (Tok.isNot(AsmToken::Identifier))
1555 std::string LowerReg = Tok.getString().lower();
1556 size_t DotPos = LowerReg.find('.');
1558 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1559 if (RegNum == AArch64::NoRegister) {
1560 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1561 .Case("ip0", AArch64::X16)
1562 .Case("ip1", AArch64::X17)
1563 .Case("fp", AArch64::X29)
1564 .Case("lr", AArch64::X30)
1565 .Default(AArch64::NoRegister);
1567 if (RegNum == AArch64::NoRegister)
1570 SMLoc S = Tok.getLoc();
1571 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1573 if (DotPos == StringRef::npos) {
1574 Layout = StringRef();
1576 // Everything afterwards needs to be a literal token, expected to be
1577 // '.2d','.b' etc for vector registers.
1579 // This StringSwitch validates the input and (perhaps more importantly)
1580 // gives us a permanent string to use in the token (a pointer into LowerReg
1581 // would go out of scope when we return).
1582 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1583 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1584 Layout = StringSwitch<const char *>(LayoutText)
1585 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1586 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1587 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1588 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1591 if (Layout.size() == 0) {
1592 // Malformed register
1600 AArch64AsmParser::OperandMatchResultTy
1601 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1602 uint32_t &NumLanes) {
1605 SMLoc RegEndLoc, LayoutLoc;
1606 SMLoc S = Parser.getTok().getLoc();
1608 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1609 return MatchOperand_NoMatch;
1611 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1613 if (Layout.size() != 0) {
1614 unsigned long long TmpLanes = 0;
1615 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1616 if (TmpLanes != 0) {
1617 NumLanes = TmpLanes;
1619 // If the number of lanes isn't specified explicitly, a valid instruction
1620 // will have an element specifier and be capable of acting on the entire
1622 switch (Layout.back()) {
1623 default: llvm_unreachable("Invalid layout specifier");
1624 case 'b': NumLanes = 16; break;
1625 case 'h': NumLanes = 8; break;
1626 case 's': NumLanes = 4; break;
1627 case 'd': NumLanes = 2; break;
1631 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1635 return MatchOperand_Success;
1639 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1641 // This callback is used for things like DWARF frame directives in
1642 // assembly. They don't care about things like NEON layouts or lanes, they
1643 // just want to be able to produce the DWARF register number.
1644 StringRef LayoutSpec;
1645 SMLoc RegEndLoc, LayoutLoc;
1646 StartLoc = Parser.getTok().getLoc();
1648 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1652 EndLoc = Parser.getTok().getLoc();
1657 AArch64AsmParser::OperandMatchResultTy
1658 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1659 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1660 // Since these operands occur in very limited circumstances, without
1661 // alternatives, we actually signal an error if there is no match. If relaxing
1662 // this, beware of unintended consequences: an immediate will be accepted
1663 // during matching, no matter how it gets into the AArch64Operand.
1664 const AsmToken &Tok = Parser.getTok();
1665 SMLoc S = Tok.getLoc();
1667 if (Tok.is(AsmToken::Identifier)) {
1669 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1672 Error(S, "operand specifier not recognised");
1673 return MatchOperand_ParseFail;
1676 Parser.Lex(); // We're done with the identifier. Eat it
1678 SMLoc E = Parser.getTok().getLoc();
1679 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1680 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1681 return MatchOperand_Success;
1682 } else if (Tok.is(AsmToken::Hash)) {
1685 const MCExpr *ImmVal;
1686 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1687 return MatchOperand_ParseFail;
1689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1690 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1691 Error(S, "Invalid immediate for instruction");
1692 return MatchOperand_ParseFail;
1695 SMLoc E = Parser.getTok().getLoc();
1696 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1697 return MatchOperand_Success;
1700 Error(S, "unexpected operand for instruction");
1701 return MatchOperand_ParseFail;
1704 AArch64AsmParser::OperandMatchResultTy
1705 AArch64AsmParser::ParseSysRegOperand(
1706 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1707 const AsmToken &Tok = Parser.getTok();
1709 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1710 // kind of string: SPSel is valid for two different forms of MSR with two
1711 // different encodings. There's no collision at the moment, but the potential
1713 if (!Tok.is(AsmToken::Identifier)) {
1714 return MatchOperand_NoMatch;
1717 SMLoc S = Tok.getLoc();
1718 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1719 Parser.Lex(); // Eat identifier
1721 return MatchOperand_Success;
1724 AArch64AsmParser::OperandMatchResultTy
1725 AArch64AsmParser::ParseLSXAddressOperand(
1726 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1727 SMLoc S = Parser.getTok().getLoc();
1730 SMLoc RegEndLoc, LayoutLoc;
1732 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1733 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1734 || Layout.size() != 0) {
1735 // Check Layout.size because we don't want to let "x3.4s" or similar
1737 return MatchOperand_NoMatch;
1739 Parser.Lex(); // Eat register
1741 if (Parser.getTok().is(AsmToken::RBrac)) {
1743 SMLoc E = Parser.getTok().getLoc();
1744 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1745 return MatchOperand_Success;
1748 // Otherwise, only ", #0" is valid
1750 if (Parser.getTok().isNot(AsmToken::Comma)) {
1751 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1752 return MatchOperand_ParseFail;
1754 Parser.Lex(); // Eat ','
1756 if (Parser.getTok().isNot(AsmToken::Hash)) {
1757 Error(Parser.getTok().getLoc(), "expected '#0'");
1758 return MatchOperand_ParseFail;
1760 Parser.Lex(); // Eat '#'
1762 if (Parser.getTok().isNot(AsmToken::Integer)
1763 || Parser.getTok().getIntVal() != 0 ) {
1764 Error(Parser.getTok().getLoc(), "expected '#0'");
1765 return MatchOperand_ParseFail;
1767 Parser.Lex(); // Eat '0'
1769 SMLoc E = Parser.getTok().getLoc();
1770 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1771 return MatchOperand_Success;
1774 AArch64AsmParser::OperandMatchResultTy
1775 AArch64AsmParser::ParseShiftExtend(
1776 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1777 StringRef IDVal = Parser.getTok().getIdentifier();
1778 std::string LowerID = IDVal.lower();
1780 A64SE::ShiftExtSpecifiers Spec =
1781 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1782 .Case("lsl", A64SE::LSL)
1783 .Case("msl", A64SE::MSL)
1784 .Case("lsr", A64SE::LSR)
1785 .Case("asr", A64SE::ASR)
1786 .Case("ror", A64SE::ROR)
1787 .Case("uxtb", A64SE::UXTB)
1788 .Case("uxth", A64SE::UXTH)
1789 .Case("uxtw", A64SE::UXTW)
1790 .Case("uxtx", A64SE::UXTX)
1791 .Case("sxtb", A64SE::SXTB)
1792 .Case("sxth", A64SE::SXTH)
1793 .Case("sxtw", A64SE::SXTW)
1794 .Case("sxtx", A64SE::SXTX)
1795 .Default(A64SE::Invalid);
1797 if (Spec == A64SE::Invalid)
1798 return MatchOperand_NoMatch;
1802 S = Parser.getTok().getLoc();
1805 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1806 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1807 // The shift amount can be omitted for the extending versions, but not real
1809 // add x0, x0, x0, uxtb
1810 // is valid, and equivalent to
1811 // add x0, x0, x0, uxtb #0
1813 if (Parser.getTok().is(AsmToken::Comma) ||
1814 Parser.getTok().is(AsmToken::EndOfStatement) ||
1815 Parser.getTok().is(AsmToken::RBrac)) {
1816 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1818 return MatchOperand_Success;
1822 // Eat # at beginning of immediate
1823 if (!Parser.getTok().is(AsmToken::Hash)) {
1824 Error(Parser.getTok().getLoc(),
1825 "expected #imm after shift specifier");
1826 return MatchOperand_ParseFail;
1830 // Make sure we do actually have a number
1831 if (!Parser.getTok().is(AsmToken::Integer)) {
1832 Error(Parser.getTok().getLoc(),
1833 "expected integer shift amount");
1834 return MatchOperand_ParseFail;
1836 unsigned Amount = Parser.getTok().getIntVal();
1838 E = Parser.getTok().getLoc();
1840 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1843 return MatchOperand_Success;
1846 // FIXME: We would really like to be able to tablegen'erate this.
1847 bool AArch64AsmParser::
1848 validateInstruction(MCInst &Inst,
1849 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1850 switch (Inst.getOpcode()) {
1851 case AArch64::BFIwwii:
1852 case AArch64::BFIxxii:
1853 case AArch64::SBFIZwwii:
1854 case AArch64::SBFIZxxii:
1855 case AArch64::UBFIZwwii:
1856 case AArch64::UBFIZxxii: {
1857 unsigned ImmOps = Inst.getNumOperands() - 2;
1858 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1859 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1861 if (ImmR != 0 && ImmS >= ImmR) {
1862 return Error(Operands[4]->getStartLoc(),
1863 "requested insert overflows register");
1867 case AArch64::BFXILwwii:
1868 case AArch64::BFXILxxii:
1869 case AArch64::SBFXwwii:
1870 case AArch64::SBFXxxii:
1871 case AArch64::UBFXwwii:
1872 case AArch64::UBFXxxii: {
1873 unsigned ImmOps = Inst.getNumOperands() - 2;
1874 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1875 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1876 int64_t RegWidth = 0;
1877 switch (Inst.getOpcode()) {
1878 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1881 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1886 if (ImmS >= RegWidth || ImmS < ImmR) {
1887 return Error(Operands[4]->getStartLoc(),
1888 "requested extract overflows register");
1892 case AArch64::ICix: {
1893 int64_t ImmVal = Inst.getOperand(0).getImm();
1894 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1895 if (!A64IC::NeedsRegister(ICOp)) {
1896 return Error(Operands[1]->getStartLoc(),
1897 "specified IC op does not use a register");
1901 case AArch64::ICi: {
1902 int64_t ImmVal = Inst.getOperand(0).getImm();
1903 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1904 if (A64IC::NeedsRegister(ICOp)) {
1905 return Error(Operands[1]->getStartLoc(),
1906 "specified IC op requires a register");
1910 case AArch64::TLBIix: {
1911 int64_t ImmVal = Inst.getOperand(0).getImm();
1912 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1913 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1914 return Error(Operands[1]->getStartLoc(),
1915 "specified TLBI op does not use a register");
1919 case AArch64::TLBIi: {
1920 int64_t ImmVal = Inst.getOperand(0).getImm();
1921 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1922 if (A64TLBI::NeedsRegister(TLBIOp)) {
1923 return Error(Operands[1]->getStartLoc(),
1924 "specified TLBI op requires a register");
1934 // Parses the instruction *together with* all operands, appending each parsed
1935 // operand to the "Operands" list
1936 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1937 StringRef Name, SMLoc NameLoc,
1938 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1939 size_t CondCodePos = Name.find('.');
1941 StringRef Mnemonic = Name.substr(0, CondCodePos);
1942 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1944 if (CondCodePos != StringRef::npos) {
1945 // We have a condition code
1946 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1947 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1948 A64CC::CondCodes Code;
1950 Code = A64StringToCondCode(CondStr);
1952 if (Code == A64CC::Invalid) {
1953 Error(S, "invalid condition code");
1954 Parser.eatToEndOfStatement();
1958 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1960 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1961 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1962 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1965 // Now we parse the operands of this instruction
1966 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1967 // Read the first operand.
1968 if (ParseOperand(Operands, Mnemonic)) {
1969 Parser.eatToEndOfStatement();
1973 while (getLexer().is(AsmToken::Comma)) {
1974 Parser.Lex(); // Eat the comma.
1976 // Parse and remember the operand.
1977 if (ParseOperand(Operands, Mnemonic)) {
1978 Parser.eatToEndOfStatement();
1983 // After successfully parsing some operands there are two special cases to
1984 // consider (i.e. notional operands not separated by commas). Both are due
1985 // to memory specifiers:
1986 // + An RBrac will end an address for load/store/prefetch
1987 // + An '!' will indicate a pre-indexed operation.
1989 // It's someone else's responsibility to make sure these tokens are sane
1990 // in the given context!
1991 if (Parser.getTok().is(AsmToken::RBrac)) {
1992 SMLoc Loc = Parser.getTok().getLoc();
1993 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1997 if (Parser.getTok().is(AsmToken::Exclaim)) {
1998 SMLoc Loc = Parser.getTok().getLoc();
1999 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2005 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2006 SMLoc Loc = getLexer().getLoc();
2007 Parser.eatToEndOfStatement();
2008 return Error(Loc, "expected comma before next operand");
2011 // Eat the EndOfStatement
2017 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2018 StringRef IDVal = DirectiveID.getIdentifier();
2019 if (IDVal == ".hword")
2020 return ParseDirectiveWord(2, DirectiveID.getLoc());
2021 else if (IDVal == ".word")
2022 return ParseDirectiveWord(4, DirectiveID.getLoc());
2023 else if (IDVal == ".xword")
2024 return ParseDirectiveWord(8, DirectiveID.getLoc());
2025 else if (IDVal == ".tlsdesccall")
2026 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2031 /// parseDirectiveWord
2032 /// ::= .word [ expression (, expression)* ]
2033 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2034 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2036 const MCExpr *Value;
2037 if (getParser().parseExpression(Value))
2040 getParser().getStreamer().EmitValue(Value, Size);
2042 if (getLexer().is(AsmToken::EndOfStatement))
2045 // FIXME: Improve diagnostic.
2046 if (getLexer().isNot(AsmToken::Comma))
2047 return Error(L, "unexpected token in directive");
2056 // parseDirectiveTLSDescCall:
2057 // ::= .tlsdesccall symbol
2058 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2060 if (getParser().parseIdentifier(Name))
2061 return Error(L, "expected symbol after directive");
2063 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2064 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2067 Inst.setOpcode(AArch64::TLSDESCCALL);
2068 Inst.addOperand(MCOperand::CreateExpr(Expr));
2070 getParser().getStreamer().EmitInstruction(Inst);
2075 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2076 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2077 MCStreamer &Out, unsigned &ErrorInfo,
2078 bool MatchingInlineAsm) {
2080 unsigned MatchResult;
2081 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2084 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2085 return Error(IDLoc, "too few operands for instruction");
2087 switch (MatchResult) {
2090 if (validateInstruction(Inst, Operands))
2093 Out.EmitInstruction(Inst);
2095 case Match_MissingFeature:
2096 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2098 case Match_InvalidOperand: {
2099 SMLoc ErrorLoc = IDLoc;
2100 if (ErrorInfo != ~0U) {
2101 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2102 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2105 return Error(ErrorLoc, "invalid operand for instruction");
2107 case Match_MnemonicFail:
2108 return Error(IDLoc, "invalid instruction");
2110 case Match_AddSubRegExtendSmall:
2111 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2113 case Match_AddSubRegExtendLarge:
2114 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2116 case Match_AddSubRegShift32:
2117 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2119 case Match_AddSubRegShift64:
2120 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2122 case Match_AddSubSecondSource:
2123 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124 "expected compatible register, symbol or integer in range [0, 4095]");
2125 case Match_CVTFixedPos32:
2126 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127 "expected integer in range [1, 32]");
2128 case Match_CVTFixedPos64:
2129 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130 "expected integer in range [1, 64]");
2131 case Match_CondCode:
2132 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133 "expected AArch64 condition code");
2135 // Any situation which allows a nontrivial floating-point constant also
2136 // allows a register.
2137 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2138 "expected compatible register or floating-point constant");
2140 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2141 "expected floating-point constant #0.0 or invalid register type");
2143 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2144 "expected label or encodable integer pc offset");
2146 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2147 "expected lane specifier '[1]'");
2148 case Match_LoadStoreExtend32_1:
2149 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2150 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2151 case Match_LoadStoreExtend32_2:
2152 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2153 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2154 case Match_LoadStoreExtend32_4:
2155 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2156 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2157 case Match_LoadStoreExtend32_8:
2158 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2159 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2160 case Match_LoadStoreExtend32_16:
2161 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2162 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2163 case Match_LoadStoreExtend64_1:
2164 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2165 "expected 'lsl' or 'sxtx' with optional shift of #0");
2166 case Match_LoadStoreExtend64_2:
2167 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2168 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2169 case Match_LoadStoreExtend64_4:
2170 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2171 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2172 case Match_LoadStoreExtend64_8:
2173 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2174 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2175 case Match_LoadStoreExtend64_16:
2176 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2177 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2178 case Match_LoadStoreSImm7_4:
2179 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2180 "expected integer multiple of 4 in range [-256, 252]");
2181 case Match_LoadStoreSImm7_8:
2182 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2183 "expected integer multiple of 8 in range [-512, 508]");
2184 case Match_LoadStoreSImm7_16:
2185 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2186 "expected integer multiple of 16 in range [-1024, 1016]");
2187 case Match_LoadStoreSImm9:
2188 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2189 "expected integer in range [-256, 255]");
2190 case Match_LoadStoreUImm12_1:
2191 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2192 "expected symbolic reference or integer in range [0, 4095]");
2193 case Match_LoadStoreUImm12_2:
2194 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2195 "expected symbolic reference or integer in range [0, 8190]");
2196 case Match_LoadStoreUImm12_4:
2197 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2198 "expected symbolic reference or integer in range [0, 16380]");
2199 case Match_LoadStoreUImm12_8:
2200 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2201 "expected symbolic reference or integer in range [0, 32760]");
2202 case Match_LoadStoreUImm12_16:
2203 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2204 "expected symbolic reference or integer in range [0, 65520]");
2205 case Match_LogicalSecondSource:
2206 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2207 "expected compatible register or logical immediate");
2208 case Match_MOVWUImm16:
2209 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2210 "expected relocated symbol or integer in range [0, 65535]");
2212 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2213 "expected readable system register");
2215 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2216 "expected writable system register or pstate");
2217 case Match_NamedImm_at:
2218 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2219 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2220 case Match_NamedImm_dbarrier:
2221 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2222 "expected integer in range [0, 15] or symbolic barrier operand");
2223 case Match_NamedImm_dc:
2224 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2225 "expected symbolic 'dc' operand");
2226 case Match_NamedImm_ic:
2227 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2228 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2229 case Match_NamedImm_isb:
2230 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2231 "expected integer in range [0, 15] or 'sy'");
2232 case Match_NamedImm_prefetch:
2233 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2234 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2235 case Match_NamedImm_tlbi:
2236 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2237 "expected translation buffer invalidation operand");
2239 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2240 "expected integer in range [0, 65535]");
2242 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2243 "expected integer in range [0, 7]");
2245 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2246 "expected integer in range [0, 15]");
2248 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2249 "expected integer in range [0, 31]");
2251 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2252 "expected integer in range [0, 63]");
2254 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2255 "expected integer in range [0, 127]");
2257 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2258 "expected integer in range [<lsb>, 31]");
2260 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2261 "expected integer in range [<lsb>, 63]");
2263 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2264 "expected integer in range [1, 8]");
2265 case Match_ShrImm16:
2266 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2267 "expected integer in range [1, 16]");
2268 case Match_ShrImm32:
2269 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2270 "expected integer in range [1, 32]");
2271 case Match_ShrImm64:
2272 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2273 "expected integer in range [1, 64]");
2276 llvm_unreachable("Implement any new match types added!");
2280 void AArch64Operand::print(raw_ostream &OS) const {
2283 OS << "<CondCode: " << CondCode.Code << ">";
2286 OS << "<fpimm: " << FPImm.Val << ">";
2289 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2290 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2293 getImm()->print(OS);
2296 OS << "<register " << getReg() << '>';
2299 OS << '\'' << getToken() << '\'';
2302 OS << "<shift: type=" << ShiftExtend.ShiftType
2303 << ", amount=" << ShiftExtend.Amount << ">";
2306 StringRef Name(SysReg.Data, SysReg.Length);
2307 OS << "<sysreg: " << Name << '>';
2311 llvm_unreachable("No idea how to print this kind of operand");
2316 void AArch64Operand::dump() const {
2321 /// Force static initialization.
2322 extern "C" void LLVMInitializeAArch64AsmParser() {
2323 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2326 #define GET_REGISTER_MATCHER
2327 #define GET_MATCHER_IMPLEMENTATION
2328 #include "AArch64GenAsmMatcher.inc"