1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Support/TargetRegistry.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58 const MCInstrInfo &MII)
59 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60 MCAsmParserExtension::Initialize(_Parser);
62 // Initialize the set of available features.
63 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
66 // These are the public interface of the MCTargetAsmParser
67 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
70 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
72 bool ParseDirective(AsmToken DirectiveID);
73 bool ParseDirectiveTLSDescCall(SMLoc L);
74 bool ParseDirectiveWord(unsigned Size, SMLoc L);
76 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78 MCStreamer&Out, unsigned &ErrorInfo,
79 bool MatchingInlineAsm);
81 // The rest of the sub-parsers have more freedom over interface: they return
82 // an OperandMatchResultTy because it's less ambiguous than true/false or
83 // -1/0/1 even if it is more verbose
85 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
88 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
90 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
93 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
101 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
112 template<typename SomeNamedImmMapper> OperandMatchResultTy
113 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
114 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
118 ParseNamedImmOperand(const NamedImmMapper &Mapper,
119 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
130 bool validateInstruction(MCInst &Inst,
131 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
133 /// Scan the next token (which had better be an identifier) and determine
134 /// whether it represents a general-purpose or vector register. It returns
135 /// true if an identifier was found and populates its reference arguments. It
136 /// does not consume the token.
138 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
139 SMLoc &LayoutLoc) const;
147 /// Instances of this class represent a parsed AArch64 machine instruction.
148 class AArch64Operand : public MCParsedAsmOperand {
151 k_ImmWithLSL, // #uimm {, LSL #amt }
152 k_CondCode, // eq/ne/...
153 k_FPImmediate, // Limited-precision floating-point imm
154 k_Immediate, // Including expressions referencing symbols
157 k_SysReg, // The register operand of MRS and MSR instructions
158 k_Token, // The mnemonic; other raw tokens the auto-generated
159 k_WrappedRegister // Load/store exclusive permit a wrapped register.
162 SMLoc StartLoc, EndLoc;
164 struct ImmWithLSLOp {
166 unsigned ShiftAmount;
171 A64CC::CondCodes Code;
186 struct ShiftExtendOp {
187 A64SE::ShiftExtSpecifiers ShiftType;
203 struct ImmWithLSLOp ImmWithLSL;
204 struct CondCodeOp CondCode;
205 struct FPImmOp FPImm;
208 struct ShiftExtendOp ShiftExtend;
209 struct SysRegOp SysReg;
213 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
214 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
217 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
220 SMLoc getStartLoc() const { return StartLoc; }
221 SMLoc getEndLoc() const { return EndLoc; }
222 void print(raw_ostream&) const;
225 StringRef getToken() const {
226 assert(Kind == k_Token && "Invalid access!");
227 return StringRef(Tok.Data, Tok.Length);
230 unsigned getReg() const {
231 assert((Kind == k_Register || Kind == k_WrappedRegister)
232 && "Invalid access!");
236 const MCExpr *getImm() const {
237 assert(Kind == k_Immediate && "Invalid access!");
241 A64CC::CondCodes getCondCode() const {
242 assert(Kind == k_CondCode && "Invalid access!");
243 return CondCode.Code;
246 static bool isNonConstantExpr(const MCExpr *E,
247 AArch64MCExpr::VariantKind &Variant) {
248 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
249 Variant = A64E->getKind();
251 } else if (!isa<MCConstantExpr>(E)) {
252 Variant = AArch64MCExpr::VK_AARCH64_None;
259 bool isCondCode() const { return Kind == k_CondCode; }
260 bool isToken() const { return Kind == k_Token; }
261 bool isReg() const { return Kind == k_Register; }
262 bool isImm() const { return Kind == k_Immediate; }
263 bool isMem() const { return false; }
264 bool isFPImm() const { return Kind == k_FPImmediate; }
265 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
266 bool isSysReg() const { return Kind == k_SysReg; }
267 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
268 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
270 bool isAddSubImmLSL0() const {
271 if (!isImmWithLSL()) return false;
272 if (ImmWithLSL.ShiftAmount != 0) return false;
274 AArch64MCExpr::VariantKind Variant;
275 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
276 return Variant == AArch64MCExpr::VK_AARCH64_LO12
277 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
278 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
279 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
280 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
281 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
284 // Otherwise it should be a real immediate in range:
285 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
286 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
289 bool isAddSubImmLSL12() const {
290 if (!isImmWithLSL()) return false;
291 if (ImmWithLSL.ShiftAmount != 12) return false;
293 AArch64MCExpr::VariantKind Variant;
294 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
295 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
296 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
299 // Otherwise it should be a real immediate in range:
300 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
301 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
304 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
305 if (!isShiftOrExtend()) return false;
307 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
308 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
311 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
314 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
317 bool isAdrpLabel() const {
318 if (!isImm()) return false;
320 AArch64MCExpr::VariantKind Variant;
321 if (isNonConstantExpr(getImm(), Variant)) {
322 return Variant == AArch64MCExpr::VK_AARCH64_None
323 || Variant == AArch64MCExpr::VK_AARCH64_GOT
324 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
325 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
328 return isLabel<21, 4096>();
331 template<unsigned RegWidth> bool isBitfieldWidth() const {
332 if (!isImm()) return false;
334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
335 if (!CE) return false;
337 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
340 template<int RegWidth>
341 bool isCVTFixedPos() const {
342 if (!isImm()) return false;
344 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
345 if (!CE) return false;
347 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
350 bool isFMOVImm() const {
351 if (!isFPImm()) return false;
353 APFloat RealVal(FPImm.Val);
355 return A64Imms::isFPImm(RealVal, ImmVal);
358 bool isFPZero() const {
359 if (!isFPImm()) return false;
361 APFloat RealVal(FPImm.Val);
362 return RealVal.isPosZero();
365 template<unsigned field_width, unsigned scale>
366 bool isLabel() const {
367 if (!isImm()) return false;
369 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
371 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
372 int64_t Val = CE->getValue();
373 int64_t Min = - (scale * (1LL << (field_width - 1)));
374 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
375 return (Val % scale) == 0 && Val >= Min && Val <= Max;
378 // N.b. this disallows explicit relocation specifications via an
379 // AArch64MCExpr. Users needing that behaviour
383 bool isLane1() const {
384 if (!isImm()) return false;
386 // Because it's come through custom assembly parsing, it must always be a
387 // constant expression.
388 return cast<MCConstantExpr>(getImm())->getValue() == 1;
391 bool isLoadLitLabel() const {
392 if (!isImm()) return false;
394 AArch64MCExpr::VariantKind Variant;
395 if (isNonConstantExpr(getImm(), Variant)) {
396 return Variant == AArch64MCExpr::VK_AARCH64_None
397 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
400 return isLabel<19, 4>();
403 template<unsigned RegWidth> bool isLogicalImm() const {
404 if (!isImm()) return false;
406 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
407 if (!CE) return false;
410 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
413 template<unsigned RegWidth> bool isLogicalImmMOV() const {
414 if (!isLogicalImm<RegWidth>()) return false;
416 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
418 // The move alias for ORR is only valid if the immediate cannot be
419 // represented with a move (immediate) instruction; they take priority.
421 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
422 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
425 template<int MemSize>
426 bool isOffsetUImm12() const {
427 if (!isImm()) return false;
429 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
431 // Assume they know what they're doing for now if they've given us a
432 // non-constant expression. In principle we could check for ridiculous
433 // things that can't possibly work or relocations that would almost
434 // certainly break resulting code.
438 int64_t Val = CE->getValue();
440 // Must be a multiple of the access size in bytes.
441 if ((Val & (MemSize - 1)) != 0) return false;
443 // Must be 12-bit unsigned
444 return Val >= 0 && Val <= 0xfff * MemSize;
447 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
448 bool isShift() const {
449 if (!isShiftOrExtend()) return false;
451 if (ShiftExtend.ShiftType != SHKind)
454 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
457 bool isMOVN32Imm() const {
458 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
459 AArch64MCExpr::VK_AARCH64_SABS_G0,
460 AArch64MCExpr::VK_AARCH64_SABS_G1,
461 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
462 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
463 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
464 AArch64MCExpr::VK_AARCH64_TPREL_G1,
465 AArch64MCExpr::VK_AARCH64_TPREL_G0,
467 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
469 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
472 bool isMOVN64Imm() const {
473 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
474 AArch64MCExpr::VK_AARCH64_SABS_G0,
475 AArch64MCExpr::VK_AARCH64_SABS_G1,
476 AArch64MCExpr::VK_AARCH64_SABS_G2,
477 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
478 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
479 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
480 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
481 AArch64MCExpr::VK_AARCH64_TPREL_G2,
482 AArch64MCExpr::VK_AARCH64_TPREL_G1,
483 AArch64MCExpr::VK_AARCH64_TPREL_G0,
485 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
487 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
491 bool isMOVZ32Imm() const {
492 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
493 AArch64MCExpr::VK_AARCH64_ABS_G0,
494 AArch64MCExpr::VK_AARCH64_ABS_G1,
495 AArch64MCExpr::VK_AARCH64_SABS_G0,
496 AArch64MCExpr::VK_AARCH64_SABS_G1,
497 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
498 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
499 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
500 AArch64MCExpr::VK_AARCH64_TPREL_G1,
501 AArch64MCExpr::VK_AARCH64_TPREL_G0,
503 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
505 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
508 bool isMOVZ64Imm() const {
509 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
510 AArch64MCExpr::VK_AARCH64_ABS_G0,
511 AArch64MCExpr::VK_AARCH64_ABS_G1,
512 AArch64MCExpr::VK_AARCH64_ABS_G2,
513 AArch64MCExpr::VK_AARCH64_ABS_G3,
514 AArch64MCExpr::VK_AARCH64_SABS_G0,
515 AArch64MCExpr::VK_AARCH64_SABS_G1,
516 AArch64MCExpr::VK_AARCH64_SABS_G2,
517 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
518 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
519 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
520 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
521 AArch64MCExpr::VK_AARCH64_TPREL_G2,
522 AArch64MCExpr::VK_AARCH64_TPREL_G1,
523 AArch64MCExpr::VK_AARCH64_TPREL_G0,
525 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
527 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
530 bool isMOVK32Imm() const {
531 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
532 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
533 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
534 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
535 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
536 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
537 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
538 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
540 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
542 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
545 bool isMOVK64Imm() const {
546 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
547 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
548 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
549 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
550 AArch64MCExpr::VK_AARCH64_ABS_G3,
551 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
552 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
553 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
554 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
555 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
557 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
559 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
562 bool isMoveWideImm(unsigned RegWidth,
563 const AArch64MCExpr::VariantKind *PermittedModifiers,
564 unsigned NumModifiers) const {
565 if (!isImmWithLSL()) return false;
567 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
568 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
570 AArch64MCExpr::VariantKind Modifier;
571 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
572 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
573 if (!ImmWithLSL.ImplicitAmount) return false;
575 for (unsigned i = 0; i < NumModifiers; ++i)
576 if (PermittedModifiers[i] == Modifier) return true;
581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
582 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
585 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
586 bool isMoveWideMovAlias() const {
587 if (!isImm()) return false;
589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
590 if (!CE) return false;
593 uint64_t Value = CE->getValue();
595 // If this is a 32-bit instruction then all bits above 32 should be the
596 // same: either of these is fine because signed/unsigned values should be
598 if (RegWidth == 32) {
599 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
602 Value &= 0xffffffffULL;
605 return isValidImm(RegWidth, Value, UImm16, Shift);
608 bool isMSRWithReg() const {
609 if (!isSysReg()) return false;
611 bool IsKnownRegister;
612 StringRef Name(SysReg.Data, SysReg.Length);
613 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
615 return IsKnownRegister;
618 bool isMSRPState() const {
619 if (!isSysReg()) return false;
621 bool IsKnownRegister;
622 StringRef Name(SysReg.Data, SysReg.Length);
623 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
625 return IsKnownRegister;
629 if (!isSysReg()) return false;
631 // First check against specific MSR-only (write-only) registers
632 bool IsKnownRegister;
633 StringRef Name(SysReg.Data, SysReg.Length);
634 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
636 return IsKnownRegister;
639 bool isPRFM() const {
640 if (!isImm()) return false;
642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
647 return CE->getValue() >= 0 && CE->getValue() <= 31;
650 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
651 if (!isShiftOrExtend()) return false;
653 if (ShiftExtend.ShiftType != SHKind)
656 return ShiftExtend.Amount <= 4;
659 bool isRegExtendLSL() const {
660 if (!isShiftOrExtend()) return false;
662 if (ShiftExtend.ShiftType != A64SE::LSL)
665 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
668 // if 0 < value <= w, return true
669 bool isShrFixedWidth(int w) const {
672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
675 int64_t Value = CE->getValue();
676 return Value > 0 && Value <= w;
679 bool isShrImm8() const { return isShrFixedWidth(8); }
681 bool isShrImm16() const { return isShrFixedWidth(16); }
683 bool isShrImm32() const { return isShrFixedWidth(32); }
685 bool isShrImm64() const { return isShrFixedWidth(64); }
687 bool isNeonMovImmShiftLSL() const {
688 if (!isShiftOrExtend())
691 if (ShiftExtend.ShiftType != A64SE::LSL)
694 // Valid shift amount is 0, 8, 16 and 24.
695 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
698 bool isNeonMovImmShiftLSLH() const {
699 if (!isShiftOrExtend())
702 if (ShiftExtend.ShiftType != A64SE::LSL)
705 // Valid shift amount is 0 and 8.
706 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
709 bool isNeonMovImmShiftMSL() const {
710 if (!isShiftOrExtend())
713 if (ShiftExtend.ShiftType != A64SE::MSL)
716 // Valid shift amount is 8 and 16.
717 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
720 template <int MemSize> bool isSImm7Scaled() const {
724 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725 if (!CE) return false;
727 int64_t Val = CE->getValue();
728 if (Val % MemSize != 0) return false;
732 return Val >= -64 && Val < 64;
735 template<int BitWidth>
736 bool isSImm() const {
737 if (!isImm()) return false;
739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740 if (!CE) return false;
742 return CE->getValue() >= -(1LL << (BitWidth - 1))
743 && CE->getValue() < (1LL << (BitWidth - 1));
746 template<int bitWidth>
747 bool isUImm() const {
748 if (!isImm()) return false;
750 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751 if (!CE) return false;
753 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
756 bool isUImm() const {
757 if (!isImm()) return false;
759 return isa<MCConstantExpr>(getImm());
762 bool isNeonUImm64Mask() const {
766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770 uint64_t Value = CE->getValue();
772 // i64 value with each byte being either 0x00 or 0xff.
773 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
774 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
779 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
780 unsigned ShiftAmount,
783 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
784 Op->ImmWithLSL.Val = Val;
785 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
786 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
790 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
792 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
793 Op->CondCode.Code = Code;
797 static AArch64Operand *CreateFPImm(double Val,
799 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
804 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
805 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
810 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
811 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
812 Op->Reg.RegNum = RegNum;
816 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
817 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
818 Op->Reg.RegNum = RegNum;
822 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
826 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
827 Op->ShiftExtend.ShiftType = ShiftTyp;
828 Op->ShiftExtend.Amount = Amount;
829 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
833 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
834 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
835 Op->Tok.Data = Str.data();
836 Op->Tok.Length = Str.size();
840 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
841 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
842 Op->Tok.Data = Str.data();
843 Op->Tok.Length = Str.size();
848 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
849 // Add as immediates when possible.
850 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
851 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
853 Inst.addOperand(MCOperand::CreateExpr(Expr));
856 template<unsigned RegWidth>
857 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
858 assert(N == 1 && "Invalid number of operands!");
859 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
860 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
861 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
864 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
865 assert(N == 1 && "Invalid number of operands!");
866 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
867 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
870 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
871 assert(N == 1 && "Invalid number of operands!");
873 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
874 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
876 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
879 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
880 assert(N == 1 && "Invalid number of operands!");
881 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
884 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
885 assert(N == 1 && "Invalid number of operands!");
887 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
888 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
891 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
892 assert(N == 1 && "Invalid number of operands!");
894 APFloat RealVal(FPImm.Val);
896 A64Imms::isFPImm(RealVal, ImmVal);
898 Inst.addOperand(MCOperand::CreateImm(ImmVal));
901 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
902 assert(N == 1 && "Invalid number of operands");
903 Inst.addOperand(MCOperand::CreateImm(0));
906 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
907 assert(N == 1 && "Invalid number of operands!");
908 unsigned Encoded = A64InvertCondCode(getCondCode());
909 Inst.addOperand(MCOperand::CreateImm(Encoded));
912 void addRegOperands(MCInst &Inst, unsigned N) const {
913 assert(N == 1 && "Invalid number of operands!");
914 Inst.addOperand(MCOperand::CreateReg(getReg()));
917 void addImmOperands(MCInst &Inst, unsigned N) const {
918 assert(N == 1 && "Invalid number of operands!");
919 addExpr(Inst, getImm());
922 template<int MemSize>
923 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
924 assert(N == 1 && "Invalid number of operands!");
926 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
927 uint64_t Val = CE->getValue() / MemSize;
928 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
931 template<int BitWidth>
932 void addSImmOperands(MCInst &Inst, unsigned N) const {
933 assert(N == 1 && "Invalid number of operands!");
935 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
936 uint64_t Val = CE->getValue();
937 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
940 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
941 assert (N == 1 && "Invalid number of operands!");
943 addExpr(Inst, ImmWithLSL.Val);
946 template<unsigned field_width, unsigned scale>
947 void addLabelOperands(MCInst &Inst, unsigned N) const {
948 assert(N == 1 && "Invalid number of operands!");
950 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
953 addExpr(Inst, Imm.Val);
957 int64_t Val = CE->getValue();
958 assert(Val % scale == 0 && "Unaligned immediate in instruction");
961 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
964 template<int MemSize>
965 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
966 assert(N == 1 && "Invalid number of operands!");
968 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
969 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
971 Inst.addOperand(MCOperand::CreateExpr(getImm()));
975 template<unsigned RegWidth>
976 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
977 assert(N == 1 && "Invalid number of operands");
978 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
981 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
983 Inst.addOperand(MCOperand::CreateImm(Bits));
986 void addMRSOperands(MCInst &Inst, unsigned N) const {
987 assert(N == 1 && "Invalid number of operands!");
990 StringRef Name(SysReg.Data, SysReg.Length);
991 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
993 Inst.addOperand(MCOperand::CreateImm(Bits));
996 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
997 assert(N == 1 && "Invalid number of operands!");
1000 StringRef Name(SysReg.Data, SysReg.Length);
1001 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1003 Inst.addOperand(MCOperand::CreateImm(Bits));
1006 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1007 assert(N == 1 && "Invalid number of operands!");
1010 StringRef Name(SysReg.Data, SysReg.Length);
1011 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1013 Inst.addOperand(MCOperand::CreateImm(Bits));
1016 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1017 assert(N == 2 && "Invalid number of operands!");
1019 addExpr(Inst, ImmWithLSL.Val);
1021 AArch64MCExpr::VariantKind Variant;
1022 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1023 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1027 // We know it's relocated
1029 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1030 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1031 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1032 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1033 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1034 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1035 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1036 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1037 Inst.addOperand(MCOperand::CreateImm(0));
1039 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1040 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1041 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1042 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1043 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1044 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1045 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1046 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1047 Inst.addOperand(MCOperand::CreateImm(1));
1049 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1050 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1051 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1052 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1053 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1054 Inst.addOperand(MCOperand::CreateImm(2));
1056 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1057 Inst.addOperand(MCOperand::CreateImm(3));
1059 default: llvm_unreachable("Inappropriate move wide relocation");
1063 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1064 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1065 assert(N == 2 && "Invalid number of operands!");
1068 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1069 uint64_t Value = CE->getValue();
1071 if (RegWidth == 32) {
1072 Value &= 0xffffffffULL;
1075 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1077 assert(Valid && "Invalid immediates should have been weeded out by now");
1079 Inst.addOperand(MCOperand::CreateImm(UImm16));
1080 Inst.addOperand(MCOperand::CreateImm(Shift));
1083 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1084 assert(N == 1 && "Invalid number of operands!");
1086 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1087 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1088 && "PRFM operand should be 5-bits");
1090 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1093 // For Add-sub (extended register) operands.
1094 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1097 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1100 // For Vector Immediates shifted imm operands.
1101 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1102 assert(N == 1 && "Invalid number of operands!");
1104 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1105 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1107 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1108 int64_t Imm = ShiftExtend.Amount / 8;
1109 Inst.addOperand(MCOperand::CreateImm(Imm));
1112 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1115 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1116 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1118 // Encode LSLH shift amount 0, 8 as 0, 1.
1119 int64_t Imm = ShiftExtend.Amount / 8;
1120 Inst.addOperand(MCOperand::CreateImm(Imm));
1123 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1126 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1127 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1129 // Encode MSL shift amount 8, 16 as 0, 1.
1130 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1131 Inst.addOperand(MCOperand::CreateImm(Imm));
1134 // For the extend in load-store (register offset) instructions.
1135 template<unsigned MemSize>
1136 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1137 addAddrRegExtendOperands(Inst, N, MemSize);
1140 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1141 unsigned MemSize) const {
1142 assert(N == 1 && "Invalid number of operands!");
1144 // First bit of Option is set in instruction classes, the high two bits are
1146 unsigned OptionHi = 0;
1147 switch (ShiftExtend.ShiftType) {
1157 llvm_unreachable("Invalid extend type for register offset");
1161 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1163 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1166 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1168 void addShiftOperands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1171 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1174 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 // A bit from each byte in the constant forms the encoded immediate
1178 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1179 uint64_t Value = CE->getValue();
1182 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1183 Imm |= (Value & 1) << i;
1185 Inst.addOperand(MCOperand::CreateImm(Imm));
1189 } // end anonymous namespace.
1191 AArch64AsmParser::OperandMatchResultTy
1192 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1193 StringRef Mnemonic) {
1195 // See if the operand has a custom parser
1196 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1198 // It could either succeed, fail or just not care.
1199 if (ResTy != MatchOperand_NoMatch)
1202 switch (getLexer().getKind()) {
1204 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1205 return MatchOperand_ParseFail;
1206 case AsmToken::Identifier: {
1207 // It might be in the LSL/UXTB family ...
1208 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1210 // We can only continue if no tokens were eaten.
1211 if (GotShift != MatchOperand_NoMatch)
1214 // ... or it might be a register ...
1215 uint32_t NumLanes = 0;
1216 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1217 assert(GotReg != MatchOperand_ParseFail
1218 && "register parsing shouldn't partially succeed");
1220 if (GotReg == MatchOperand_Success) {
1221 if (Parser.getTok().is(AsmToken::LBrac))
1222 return ParseNEONLane(Operands, NumLanes);
1224 return MatchOperand_Success;
1227 // ... or it might be a symbolish thing
1230 case AsmToken::LParen: // E.g. (strcmp-4)
1231 case AsmToken::Integer: // 1f, 2b labels
1232 case AsmToken::String: // quoted labels
1233 case AsmToken::Dot: // . is Current location
1234 case AsmToken::Dollar: // $ is PC
1235 case AsmToken::Colon: {
1236 SMLoc StartLoc = Parser.getTok().getLoc();
1238 const MCExpr *ImmVal = 0;
1240 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1241 return MatchOperand_ParseFail;
1243 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1244 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1245 return MatchOperand_Success;
1247 case AsmToken::Hash: { // Immediates
1248 SMLoc StartLoc = Parser.getTok().getLoc();
1250 const MCExpr *ImmVal = 0;
1253 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1254 return MatchOperand_ParseFail;
1256 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1257 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1258 return MatchOperand_Success;
1260 case AsmToken::LBrac: {
1261 SMLoc Loc = Parser.getTok().getLoc();
1262 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1263 Parser.Lex(); // Eat '['
1265 // There's no comma after a '[', so we can parse the next operand
1267 return ParseOperand(Operands, Mnemonic);
1269 // The following will likely be useful later, but not in very early cases
1270 case AsmToken::LCurly: // Weird SIMD lists
1271 llvm_unreachable("Don't know how to deal with '{' in operand");
1272 return MatchOperand_ParseFail;
1276 AArch64AsmParser::OperandMatchResultTy
1277 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1278 if (getLexer().is(AsmToken::Colon)) {
1279 AArch64MCExpr::VariantKind RefKind;
1281 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1282 if (ResTy != MatchOperand_Success)
1285 const MCExpr *SubExprVal;
1286 if (getParser().parseExpression(SubExprVal))
1287 return MatchOperand_ParseFail;
1289 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1290 return MatchOperand_Success;
1293 // No weird AArch64MCExpr prefix
1294 return getParser().parseExpression(ExprVal)
1295 ? MatchOperand_ParseFail : MatchOperand_Success;
1298 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1299 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1300 AArch64AsmParser::OperandMatchResultTy
1301 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1302 uint32_t NumLanes) {
1303 SMLoc Loc = Parser.getTok().getLoc();
1305 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1306 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1307 Parser.Lex(); // Eat '['
1309 if (Parser.getTok().isNot(AsmToken::Integer)) {
1310 Error(Parser.getTok().getLoc(), "expected lane number");
1311 return MatchOperand_ParseFail;
1314 if (Parser.getTok().getIntVal() >= NumLanes) {
1315 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1316 return MatchOperand_ParseFail;
1319 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1321 SMLoc S = Parser.getTok().getLoc();
1322 Parser.Lex(); // Eat actual lane
1323 SMLoc E = Parser.getTok().getLoc();
1324 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1327 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1328 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1329 return MatchOperand_ParseFail;
1332 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1333 Parser.Lex(); // Eat ']'
1335 return MatchOperand_Success;
1338 AArch64AsmParser::OperandMatchResultTy
1339 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1340 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1343 if (getLexer().isNot(AsmToken::Identifier)) {
1344 Error(Parser.getTok().getLoc(),
1345 "expected relocation specifier in operand after ':'");
1346 return MatchOperand_ParseFail;
1349 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1350 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1351 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1352 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1353 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1354 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1355 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1356 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1357 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1358 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1359 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1360 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1361 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1362 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1363 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1364 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1365 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1366 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1367 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1368 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1369 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1370 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1371 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1372 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1373 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1374 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1375 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1376 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1377 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1378 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1379 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1380 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1381 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1382 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1383 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1384 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1385 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1386 .Default(AArch64MCExpr::VK_AARCH64_None);
1388 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1389 Error(Parser.getTok().getLoc(),
1390 "expected relocation specifier in operand after ':'");
1391 return MatchOperand_ParseFail;
1393 Parser.Lex(); // Eat identifier
1395 if (getLexer().isNot(AsmToken::Colon)) {
1396 Error(Parser.getTok().getLoc(),
1397 "expected ':' after relocation specifier");
1398 return MatchOperand_ParseFail;
1401 return MatchOperand_Success;
1404 AArch64AsmParser::OperandMatchResultTy
1405 AArch64AsmParser::ParseImmWithLSLOperand(
1406 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1407 // FIXME?: I want to live in a world where immediates must start with
1408 // #. Please don't dash my hopes (well, do if you have a good reason).
1409 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1411 SMLoc S = Parser.getTok().getLoc();
1412 Parser.Lex(); // Eat '#'
1415 if (ParseImmediate(Imm) != MatchOperand_Success)
1416 return MatchOperand_ParseFail;
1417 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1418 SMLoc E = Parser.getTok().getLoc();
1419 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1420 return MatchOperand_Success;
1426 // The optional operand must be "lsl #N" where N is non-negative.
1427 if (Parser.getTok().is(AsmToken::Identifier)
1428 && Parser.getTok().getIdentifier().lower() == "lsl") {
1431 if (Parser.getTok().is(AsmToken::Hash)) {
1434 if (Parser.getTok().isNot(AsmToken::Integer)) {
1435 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1436 return MatchOperand_ParseFail;
1441 int64_t ShiftAmount = Parser.getTok().getIntVal();
1443 if (ShiftAmount < 0) {
1444 Error(Parser.getTok().getLoc(), "positive shift amount required");
1445 return MatchOperand_ParseFail;
1447 Parser.Lex(); // Eat the number
1449 SMLoc E = Parser.getTok().getLoc();
1450 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1452 return MatchOperand_Success;
1456 AArch64AsmParser::OperandMatchResultTy
1457 AArch64AsmParser::ParseCondCodeOperand(
1458 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1459 if (Parser.getTok().isNot(AsmToken::Identifier))
1460 return MatchOperand_NoMatch;
1462 StringRef Tok = Parser.getTok().getIdentifier();
1463 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1465 if (CondCode == A64CC::Invalid)
1466 return MatchOperand_NoMatch;
1468 SMLoc S = Parser.getTok().getLoc();
1469 Parser.Lex(); // Eat condition code
1470 SMLoc E = Parser.getTok().getLoc();
1472 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1473 return MatchOperand_Success;
1476 AArch64AsmParser::OperandMatchResultTy
1477 AArch64AsmParser::ParseCRxOperand(
1478 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1479 SMLoc S = Parser.getTok().getLoc();
1480 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1481 Error(S, "Expected cN operand where 0 <= N <= 15");
1482 return MatchOperand_ParseFail;
1485 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1486 StringRef Tok(LowerTok);
1487 if (Tok[0] != 'c') {
1488 Error(S, "Expected cN operand where 0 <= N <= 15");
1489 return MatchOperand_ParseFail;
1493 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1494 if (BadNum || CRNum > 15) {
1495 Error(S, "Expected cN operand where 0 <= N <= 15");
1496 return MatchOperand_ParseFail;
1499 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1502 SMLoc E = Parser.getTok().getLoc();
1504 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1505 return MatchOperand_Success;
1508 AArch64AsmParser::OperandMatchResultTy
1509 AArch64AsmParser::ParseFPImmOperand(
1510 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1512 // FIXME?: I want to live in a world where immediates must start with
1513 // #. Please don't dash my hopes (well, do if you have a good reason).
1514 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1516 SMLoc S = Parser.getTok().getLoc();
1517 Parser.Lex(); // Eat '#'
1519 bool Negative = false;
1520 if (Parser.getTok().is(AsmToken::Minus)) {
1522 Parser.Lex(); // Eat '-'
1523 } else if (Parser.getTok().is(AsmToken::Plus)) {
1524 Parser.Lex(); // Eat '+'
1527 if (Parser.getTok().isNot(AsmToken::Real)) {
1528 Error(S, "Expected floating-point immediate");
1529 return MatchOperand_ParseFail;
1532 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1533 if (Negative) RealVal.changeSign();
1534 double DblVal = RealVal.convertToDouble();
1536 Parser.Lex(); // Eat real number
1537 SMLoc E = Parser.getTok().getLoc();
1539 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1540 return MatchOperand_Success;
1544 // Automatically generated
1545 static unsigned MatchRegisterName(StringRef Name);
1548 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1550 SMLoc &LayoutLoc) const {
1551 const AsmToken &Tok = Parser.getTok();
1553 if (Tok.isNot(AsmToken::Identifier))
1556 std::string LowerReg = Tok.getString().lower();
1557 size_t DotPos = LowerReg.find('.');
1559 bool IsVec128 = false;
1560 SMLoc S = Tok.getLoc();
1561 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1563 if (DotPos == std::string::npos) {
1564 Layout = StringRef();
1566 // Everything afterwards needs to be a literal token, expected to be
1567 // '.2d','.b' etc for vector registers.
1569 // This StringSwitch validates the input and (perhaps more importantly)
1570 // gives us a permanent string to use in the token (a pointer into LowerReg
1571 // would go out of scope when we return).
1572 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1573 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1575 // See if it's a 128-bit layout first.
1576 Layout = StringSwitch<const char *>(LayoutText)
1577 .Case(".d", ".d").Case(".2d", ".2d")
1578 .Case(".s", ".s").Case(".4s", ".4s")
1579 .Case(".h", ".h").Case(".8h", ".8h")
1580 .Case(".b", ".b").Case(".16b", ".16b")
1583 if (Layout.size() != 0)
1586 Layout = StringSwitch<const char *>(LayoutText)
1594 if (Layout.size() == 0) {
1595 // If we've still not pinned it down the register is malformed.
1600 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1601 if (RegNum == AArch64::NoRegister) {
1602 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1603 .Case("ip0", AArch64::X16)
1604 .Case("ip1", AArch64::X17)
1605 .Case("fp", AArch64::X29)
1606 .Case("lr", AArch64::X30)
1607 .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1608 .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1609 .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1610 .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1611 .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1612 .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1613 .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1614 .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1615 .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1616 .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1617 .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1618 .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1619 .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1620 .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1621 .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1622 .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1623 .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1624 .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1625 .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1626 .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1627 .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1628 .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1629 .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1630 .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1631 .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1632 .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1633 .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1634 .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1635 .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1636 .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1637 .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1638 .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1639 .Default(AArch64::NoRegister);
1641 if (RegNum == AArch64::NoRegister)
1647 AArch64AsmParser::OperandMatchResultTy
1648 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1649 uint32_t &NumLanes) {
1652 SMLoc RegEndLoc, LayoutLoc;
1653 SMLoc S = Parser.getTok().getLoc();
1655 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1656 return MatchOperand_NoMatch;
1658 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1660 if (Layout.size() != 0) {
1661 unsigned long long TmpLanes = 0;
1662 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1663 if (TmpLanes != 0) {
1664 NumLanes = TmpLanes;
1666 // If the number of lanes isn't specified explicitly, a valid instruction
1667 // will have an element specifier and be capable of acting on the entire
1669 switch (Layout.back()) {
1670 default: llvm_unreachable("Invalid layout specifier");
1671 case 'b': NumLanes = 16; break;
1672 case 'h': NumLanes = 8; break;
1673 case 's': NumLanes = 4; break;
1674 case 'd': NumLanes = 2; break;
1678 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1682 return MatchOperand_Success;
1686 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1688 // This callback is used for things like DWARF frame directives in
1689 // assembly. They don't care about things like NEON layouts or lanes, they
1690 // just want to be able to produce the DWARF register number.
1691 StringRef LayoutSpec;
1692 SMLoc RegEndLoc, LayoutLoc;
1693 StartLoc = Parser.getTok().getLoc();
1695 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1699 EndLoc = Parser.getTok().getLoc();
1704 AArch64AsmParser::OperandMatchResultTy
1705 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1706 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1707 // Since these operands occur in very limited circumstances, without
1708 // alternatives, we actually signal an error if there is no match. If relaxing
1709 // this, beware of unintended consequences: an immediate will be accepted
1710 // during matching, no matter how it gets into the AArch64Operand.
1711 const AsmToken &Tok = Parser.getTok();
1712 SMLoc S = Tok.getLoc();
1714 if (Tok.is(AsmToken::Identifier)) {
1716 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1719 Error(S, "operand specifier not recognised");
1720 return MatchOperand_ParseFail;
1723 Parser.Lex(); // We're done with the identifier. Eat it
1725 SMLoc E = Parser.getTok().getLoc();
1726 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1727 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1728 return MatchOperand_Success;
1729 } else if (Tok.is(AsmToken::Hash)) {
1732 const MCExpr *ImmVal;
1733 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1734 return MatchOperand_ParseFail;
1736 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1737 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1738 Error(S, "Invalid immediate for instruction");
1739 return MatchOperand_ParseFail;
1742 SMLoc E = Parser.getTok().getLoc();
1743 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1744 return MatchOperand_Success;
1747 Error(S, "unexpected operand for instruction");
1748 return MatchOperand_ParseFail;
1751 AArch64AsmParser::OperandMatchResultTy
1752 AArch64AsmParser::ParseSysRegOperand(
1753 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1754 const AsmToken &Tok = Parser.getTok();
1756 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1757 // kind of string: SPSel is valid for two different forms of MSR with two
1758 // different encodings. There's no collision at the moment, but the potential
1760 if (!Tok.is(AsmToken::Identifier)) {
1761 return MatchOperand_NoMatch;
1764 SMLoc S = Tok.getLoc();
1765 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1766 Parser.Lex(); // Eat identifier
1768 return MatchOperand_Success;
1771 AArch64AsmParser::OperandMatchResultTy
1772 AArch64AsmParser::ParseLSXAddressOperand(
1773 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1774 SMLoc S = Parser.getTok().getLoc();
1777 SMLoc RegEndLoc, LayoutLoc;
1779 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1780 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1781 || Layout.size() != 0) {
1782 // Check Layout.size because we don't want to let "x3.4s" or similar
1784 return MatchOperand_NoMatch;
1786 Parser.Lex(); // Eat register
1788 if (Parser.getTok().is(AsmToken::RBrac)) {
1790 SMLoc E = Parser.getTok().getLoc();
1791 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1792 return MatchOperand_Success;
1795 // Otherwise, only ", #0" is valid
1797 if (Parser.getTok().isNot(AsmToken::Comma)) {
1798 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1799 return MatchOperand_ParseFail;
1801 Parser.Lex(); // Eat ','
1803 if (Parser.getTok().isNot(AsmToken::Hash)) {
1804 Error(Parser.getTok().getLoc(), "expected '#0'");
1805 return MatchOperand_ParseFail;
1807 Parser.Lex(); // Eat '#'
1809 if (Parser.getTok().isNot(AsmToken::Integer)
1810 || Parser.getTok().getIntVal() != 0 ) {
1811 Error(Parser.getTok().getLoc(), "expected '#0'");
1812 return MatchOperand_ParseFail;
1814 Parser.Lex(); // Eat '0'
1816 SMLoc E = Parser.getTok().getLoc();
1817 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1818 return MatchOperand_Success;
1821 AArch64AsmParser::OperandMatchResultTy
1822 AArch64AsmParser::ParseShiftExtend(
1823 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1824 StringRef IDVal = Parser.getTok().getIdentifier();
1825 std::string LowerID = IDVal.lower();
1827 A64SE::ShiftExtSpecifiers Spec =
1828 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1829 .Case("lsl", A64SE::LSL)
1830 .Case("msl", A64SE::MSL)
1831 .Case("lsr", A64SE::LSR)
1832 .Case("asr", A64SE::ASR)
1833 .Case("ror", A64SE::ROR)
1834 .Case("uxtb", A64SE::UXTB)
1835 .Case("uxth", A64SE::UXTH)
1836 .Case("uxtw", A64SE::UXTW)
1837 .Case("uxtx", A64SE::UXTX)
1838 .Case("sxtb", A64SE::SXTB)
1839 .Case("sxth", A64SE::SXTH)
1840 .Case("sxtw", A64SE::SXTW)
1841 .Case("sxtx", A64SE::SXTX)
1842 .Default(A64SE::Invalid);
1844 if (Spec == A64SE::Invalid)
1845 return MatchOperand_NoMatch;
1849 S = Parser.getTok().getLoc();
1852 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1853 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1854 // The shift amount can be omitted for the extending versions, but not real
1856 // add x0, x0, x0, uxtb
1857 // is valid, and equivalent to
1858 // add x0, x0, x0, uxtb #0
1860 if (Parser.getTok().is(AsmToken::Comma) ||
1861 Parser.getTok().is(AsmToken::EndOfStatement) ||
1862 Parser.getTok().is(AsmToken::RBrac)) {
1863 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1865 return MatchOperand_Success;
1869 // Eat # at beginning of immediate
1870 if (!Parser.getTok().is(AsmToken::Hash)) {
1871 Error(Parser.getTok().getLoc(),
1872 "expected #imm after shift specifier");
1873 return MatchOperand_ParseFail;
1877 // Make sure we do actually have a number
1878 if (!Parser.getTok().is(AsmToken::Integer)) {
1879 Error(Parser.getTok().getLoc(),
1880 "expected integer shift amount");
1881 return MatchOperand_ParseFail;
1883 unsigned Amount = Parser.getTok().getIntVal();
1885 E = Parser.getTok().getLoc();
1887 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1890 return MatchOperand_Success;
1893 // FIXME: We would really like to be able to tablegen'erate this.
1894 bool AArch64AsmParser::
1895 validateInstruction(MCInst &Inst,
1896 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1897 switch (Inst.getOpcode()) {
1898 case AArch64::BFIwwii:
1899 case AArch64::BFIxxii:
1900 case AArch64::SBFIZwwii:
1901 case AArch64::SBFIZxxii:
1902 case AArch64::UBFIZwwii:
1903 case AArch64::UBFIZxxii: {
1904 unsigned ImmOps = Inst.getNumOperands() - 2;
1905 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1906 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1908 if (ImmR != 0 && ImmS >= ImmR) {
1909 return Error(Operands[4]->getStartLoc(),
1910 "requested insert overflows register");
1914 case AArch64::BFXILwwii:
1915 case AArch64::BFXILxxii:
1916 case AArch64::SBFXwwii:
1917 case AArch64::SBFXxxii:
1918 case AArch64::UBFXwwii:
1919 case AArch64::UBFXxxii: {
1920 unsigned ImmOps = Inst.getNumOperands() - 2;
1921 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1922 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1923 int64_t RegWidth = 0;
1924 switch (Inst.getOpcode()) {
1925 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1928 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1933 if (ImmS >= RegWidth || ImmS < ImmR) {
1934 return Error(Operands[4]->getStartLoc(),
1935 "requested extract overflows register");
1939 case AArch64::ICix: {
1940 int64_t ImmVal = Inst.getOperand(0).getImm();
1941 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1942 if (!A64IC::NeedsRegister(ICOp)) {
1943 return Error(Operands[1]->getStartLoc(),
1944 "specified IC op does not use a register");
1948 case AArch64::ICi: {
1949 int64_t ImmVal = Inst.getOperand(0).getImm();
1950 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1951 if (A64IC::NeedsRegister(ICOp)) {
1952 return Error(Operands[1]->getStartLoc(),
1953 "specified IC op requires a register");
1957 case AArch64::TLBIix: {
1958 int64_t ImmVal = Inst.getOperand(0).getImm();
1959 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1960 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1961 return Error(Operands[1]->getStartLoc(),
1962 "specified TLBI op does not use a register");
1966 case AArch64::TLBIi: {
1967 int64_t ImmVal = Inst.getOperand(0).getImm();
1968 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1969 if (A64TLBI::NeedsRegister(TLBIOp)) {
1970 return Error(Operands[1]->getStartLoc(),
1971 "specified TLBI op requires a register");
1981 // Parses the instruction *together with* all operands, appending each parsed
1982 // operand to the "Operands" list
1983 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1984 StringRef Name, SMLoc NameLoc,
1985 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1986 size_t CondCodePos = Name.find('.');
1988 StringRef Mnemonic = Name.substr(0, CondCodePos);
1989 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1991 if (CondCodePos != StringRef::npos) {
1992 // We have a condition code
1993 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1994 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1995 A64CC::CondCodes Code;
1997 Code = A64StringToCondCode(CondStr);
1999 if (Code == A64CC::Invalid) {
2000 Error(S, "invalid condition code");
2001 Parser.eatToEndOfStatement();
2005 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2007 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
2008 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2009 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2012 // Now we parse the operands of this instruction
2013 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2014 // Read the first operand.
2015 if (ParseOperand(Operands, Mnemonic)) {
2016 Parser.eatToEndOfStatement();
2020 while (getLexer().is(AsmToken::Comma)) {
2021 Parser.Lex(); // Eat the comma.
2023 // Parse and remember the operand.
2024 if (ParseOperand(Operands, Mnemonic)) {
2025 Parser.eatToEndOfStatement();
2030 // After successfully parsing some operands there are two special cases to
2031 // consider (i.e. notional operands not separated by commas). Both are due
2032 // to memory specifiers:
2033 // + An RBrac will end an address for load/store/prefetch
2034 // + An '!' will indicate a pre-indexed operation.
2036 // It's someone else's responsibility to make sure these tokens are sane
2037 // in the given context!
2038 if (Parser.getTok().is(AsmToken::RBrac)) {
2039 SMLoc Loc = Parser.getTok().getLoc();
2040 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2044 if (Parser.getTok().is(AsmToken::Exclaim)) {
2045 SMLoc Loc = Parser.getTok().getLoc();
2046 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2052 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2053 SMLoc Loc = getLexer().getLoc();
2054 Parser.eatToEndOfStatement();
2055 return Error(Loc, "expected comma before next operand");
2058 // Eat the EndOfStatement
2064 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2065 StringRef IDVal = DirectiveID.getIdentifier();
2066 if (IDVal == ".hword")
2067 return ParseDirectiveWord(2, DirectiveID.getLoc());
2068 else if (IDVal == ".word")
2069 return ParseDirectiveWord(4, DirectiveID.getLoc());
2070 else if (IDVal == ".xword")
2071 return ParseDirectiveWord(8, DirectiveID.getLoc());
2072 else if (IDVal == ".tlsdesccall")
2073 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2078 /// parseDirectiveWord
2079 /// ::= .word [ expression (, expression)* ]
2080 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2081 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2083 const MCExpr *Value;
2084 if (getParser().parseExpression(Value))
2087 getParser().getStreamer().EmitValue(Value, Size);
2089 if (getLexer().is(AsmToken::EndOfStatement))
2092 // FIXME: Improve diagnostic.
2093 if (getLexer().isNot(AsmToken::Comma))
2094 return Error(L, "unexpected token in directive");
2103 // parseDirectiveTLSDescCall:
2104 // ::= .tlsdesccall symbol
2105 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2107 if (getParser().parseIdentifier(Name))
2108 return Error(L, "expected symbol after directive");
2110 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2111 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2114 Inst.setOpcode(AArch64::TLSDESCCALL);
2115 Inst.addOperand(MCOperand::CreateExpr(Expr));
2117 getParser().getStreamer().EmitInstruction(Inst);
2122 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2123 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2124 MCStreamer &Out, unsigned &ErrorInfo,
2125 bool MatchingInlineAsm) {
2127 unsigned MatchResult;
2128 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2131 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2132 return Error(IDLoc, "too few operands for instruction");
2134 switch (MatchResult) {
2137 if (validateInstruction(Inst, Operands))
2140 Out.EmitInstruction(Inst);
2142 case Match_MissingFeature:
2143 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2145 case Match_InvalidOperand: {
2146 SMLoc ErrorLoc = IDLoc;
2147 if (ErrorInfo != ~0U) {
2148 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2149 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2152 return Error(ErrorLoc, "invalid operand for instruction");
2154 case Match_MnemonicFail:
2155 return Error(IDLoc, "invalid instruction");
2157 case Match_AddSubRegExtendSmall:
2158 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2159 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2160 case Match_AddSubRegExtendLarge:
2161 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2162 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2163 case Match_AddSubRegShift32:
2164 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2165 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2166 case Match_AddSubRegShift64:
2167 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2168 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2169 case Match_AddSubSecondSource:
2170 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2171 "expected compatible register, symbol or integer in range [0, 4095]");
2172 case Match_CVTFixedPos32:
2173 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2174 "expected integer in range [1, 32]");
2175 case Match_CVTFixedPos64:
2176 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2177 "expected integer in range [1, 64]");
2178 case Match_CondCode:
2179 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2180 "expected AArch64 condition code");
2182 // Any situation which allows a nontrivial floating-point constant also
2183 // allows a register.
2184 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2185 "expected compatible register or floating-point constant");
2187 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2188 "expected floating-point constant #0.0 or invalid register type");
2190 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2191 "expected label or encodable integer pc offset");
2193 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2194 "expected lane specifier '[1]'");
2195 case Match_LoadStoreExtend32_1:
2196 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2197 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2198 case Match_LoadStoreExtend32_2:
2199 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2200 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2201 case Match_LoadStoreExtend32_4:
2202 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2203 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2204 case Match_LoadStoreExtend32_8:
2205 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2206 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2207 case Match_LoadStoreExtend32_16:
2208 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2209 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2210 case Match_LoadStoreExtend64_1:
2211 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2212 "expected 'lsl' or 'sxtx' with optional shift of #0");
2213 case Match_LoadStoreExtend64_2:
2214 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2215 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2216 case Match_LoadStoreExtend64_4:
2217 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2218 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2219 case Match_LoadStoreExtend64_8:
2220 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2221 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2222 case Match_LoadStoreExtend64_16:
2223 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2224 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2225 case Match_LoadStoreSImm7_4:
2226 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2227 "expected integer multiple of 4 in range [-256, 252]");
2228 case Match_LoadStoreSImm7_8:
2229 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2230 "expected integer multiple of 8 in range [-512, 508]");
2231 case Match_LoadStoreSImm7_16:
2232 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2233 "expected integer multiple of 16 in range [-1024, 1016]");
2234 case Match_LoadStoreSImm9:
2235 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2236 "expected integer in range [-256, 255]");
2237 case Match_LoadStoreUImm12_1:
2238 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2239 "expected symbolic reference or integer in range [0, 4095]");
2240 case Match_LoadStoreUImm12_2:
2241 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2242 "expected symbolic reference or integer in range [0, 8190]");
2243 case Match_LoadStoreUImm12_4:
2244 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2245 "expected symbolic reference or integer in range [0, 16380]");
2246 case Match_LoadStoreUImm12_8:
2247 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2248 "expected symbolic reference or integer in range [0, 32760]");
2249 case Match_LoadStoreUImm12_16:
2250 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2251 "expected symbolic reference or integer in range [0, 65520]");
2252 case Match_LogicalSecondSource:
2253 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2254 "expected compatible register or logical immediate");
2255 case Match_MOVWUImm16:
2256 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2257 "expected relocated symbol or integer in range [0, 65535]");
2259 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2260 "expected readable system register");
2262 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2263 "expected writable system register or pstate");
2264 case Match_NamedImm_at:
2265 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2266 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2267 case Match_NamedImm_dbarrier:
2268 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2269 "expected integer in range [0, 15] or symbolic barrier operand");
2270 case Match_NamedImm_dc:
2271 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2272 "expected symbolic 'dc' operand");
2273 case Match_NamedImm_ic:
2274 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2275 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2276 case Match_NamedImm_isb:
2277 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2278 "expected integer in range [0, 15] or 'sy'");
2279 case Match_NamedImm_prefetch:
2280 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2281 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2282 case Match_NamedImm_tlbi:
2283 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2284 "expected translation buffer invalidation operand");
2286 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2287 "expected integer in range [0, 65535]");
2289 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2290 "expected integer in range [0, 7]");
2292 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2293 "expected integer in range [0, 15]");
2295 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2296 "expected integer in range [0, 31]");
2298 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2299 "expected integer in range [0, 63]");
2301 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2302 "expected integer in range [0, 127]");
2304 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2305 "expected integer in range [<lsb>, 31]");
2307 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2308 "expected integer in range [<lsb>, 63]");
2310 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2311 "expected integer in range [1, 8]");
2312 case Match_ShrImm16:
2313 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2314 "expected integer in range [1, 16]");
2315 case Match_ShrImm32:
2316 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2317 "expected integer in range [1, 32]");
2318 case Match_ShrImm64:
2319 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2320 "expected integer in range [1, 64]");
2323 llvm_unreachable("Implement any new match types added!");
2327 void AArch64Operand::print(raw_ostream &OS) const {
2330 OS << "<CondCode: " << CondCode.Code << ">";
2333 OS << "<fpimm: " << FPImm.Val << ">";
2336 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2337 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2340 getImm()->print(OS);
2343 OS << "<register " << getReg() << '>';
2346 OS << '\'' << getToken() << '\'';
2349 OS << "<shift: type=" << ShiftExtend.ShiftType
2350 << ", amount=" << ShiftExtend.Amount << ">";
2353 StringRef Name(SysReg.Data, SysReg.Length);
2354 OS << "<sysreg: " << Name << '>';
2358 llvm_unreachable("No idea how to print this kind of operand");
2363 void AArch64Operand::dump() const {
2368 /// Force static initialization.
2369 extern "C" void LLVMInitializeAArch64AsmParser() {
2370 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2373 #define GET_REGISTER_MATCHER
2374 #define GET_MATCHER_IMPLEMENTATION
2375 #include "AArch64GenAsmMatcher.inc"