1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "MCTargetDesc/AArch64MCExpr.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCInst.h"
20 #include "llvm/MC/MCSubtargetInfo.h"
21 #include "llvm/MC/MCTargetAsmParser.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCStreamer.h"
25 #include "llvm/MC/MCParser/MCAsmLexer.h"
26 #include "llvm/MC/MCParser/MCAsmParser.h"
27 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Support/TargetRegistry.h"
38 class AArch64AsmParser : public MCTargetAsmParser {
42 #define GET_ASSEMBLER_HEADER
43 #include "AArch64GenAsmMatcher.inc"
46 enum AArch64MatchResultTy {
47 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
48 #define GET_OPERAND_DIAGNOSTIC_TYPES
49 #include "AArch64GenAsmMatcher.inc"
52 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
53 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
54 MCAsmParserExtension::Initialize(_Parser);
56 // Initialize the set of available features.
57 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
60 // These are the public interface of the MCTargetAsmParser
61 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
62 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
64 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
66 bool ParseDirective(AsmToken DirectiveID);
67 bool ParseDirectiveTLSDescCall(SMLoc L);
68 bool ParseDirectiveWord(unsigned Size, SMLoc L);
70 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
71 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
72 MCStreamer&Out, unsigned &ErrorInfo,
73 bool MatchingInlineAsm);
75 // The rest of the sub-parsers have more freedom over interface: they return
76 // an OperandMatchResultTy because it's less ambiguous than true/false or
77 // -1/0/1 even if it is more verbose
79 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
82 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
84 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
87 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
91 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
95 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
98 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
101 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
106 template<typename SomeNamedImmMapper> OperandMatchResultTy
107 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
108 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
112 ParseNamedImmOperand(const NamedImmMapper &Mapper,
113 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
116 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
119 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
124 bool validateInstruction(MCInst &Inst,
125 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
127 /// Scan the next token (which had better be an identifier) and determine
128 /// whether it represents a general-purpose or vector register. It returns
129 /// true if an identifier was found and populates its reference arguments. It
130 /// does not consume the token.
132 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
133 SMLoc &LayoutLoc) const;
141 /// Instances of this class represent a parsed AArch64 machine instruction.
142 class AArch64Operand : public MCParsedAsmOperand {
145 k_ImmWithLSL, // #uimm {, LSL #amt }
146 k_CondCode, // eq/ne/...
147 k_FPImmediate, // Limited-precision floating-point imm
148 k_Immediate, // Including expressions referencing symbols
151 k_SysReg, // The register operand of MRS and MSR instructions
152 k_Token, // The mnemonic; other raw tokens the auto-generated
153 k_WrappedRegister // Load/store exclusive permit a wrapped register.
156 SMLoc StartLoc, EndLoc;
161 unsigned ShiftAmount;
166 A64CC::CondCodes Code;
182 A64SE::ShiftExtSpecifiers ShiftType;
198 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
199 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
202 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
205 SMLoc getStartLoc() const { return StartLoc; }
206 SMLoc getEndLoc() const { return EndLoc; }
207 void print(raw_ostream&) const;
210 StringRef getToken() const {
211 assert(Kind == k_Token && "Invalid access!");
212 return StringRef(Tok.Data, Tok.Length);
215 unsigned getReg() const {
216 assert((Kind == k_Register || Kind == k_WrappedRegister)
217 && "Invalid access!");
221 const MCExpr *getImm() const {
222 assert(Kind == k_Immediate && "Invalid access!");
226 A64CC::CondCodes getCondCode() const {
227 assert(Kind == k_CondCode && "Invalid access!");
228 return CondCode.Code;
231 static bool isNonConstantExpr(const MCExpr *E,
232 AArch64MCExpr::VariantKind &Variant) {
233 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
234 Variant = A64E->getKind();
236 } else if (!isa<MCConstantExpr>(E)) {
237 Variant = AArch64MCExpr::VK_AARCH64_None;
244 bool isCondCode() const { return Kind == k_CondCode; }
245 bool isToken() const { return Kind == k_Token; }
246 bool isReg() const { return Kind == k_Register; }
247 bool isImm() const { return Kind == k_Immediate; }
248 bool isMem() const { return false; }
249 bool isFPImm() const { return Kind == k_FPImmediate; }
250 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
251 bool isSysReg() const { return Kind == k_SysReg; }
252 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
253 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
255 bool isAddSubImmLSL0() const {
256 if (!isImmWithLSL()) return false;
257 if (ImmWithLSL.ShiftAmount != 0) return false;
259 AArch64MCExpr::VariantKind Variant;
260 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
261 return Variant == AArch64MCExpr::VK_AARCH64_LO12
262 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
263 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
264 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
265 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
266 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
269 // Otherwise it should be a real immediate in range:
270 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
271 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
274 bool isAddSubImmLSL12() const {
275 if (!isImmWithLSL()) return false;
276 if (ImmWithLSL.ShiftAmount != 12) return false;
278 AArch64MCExpr::VariantKind Variant;
279 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
280 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
281 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
284 // Otherwise it should be a real immediate in range:
285 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
286 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
289 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
290 if (!isShiftOrExtend()) return false;
292 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
293 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
296 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
299 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
302 bool isAdrpLabel() const {
303 if (!isImm()) return false;
305 AArch64MCExpr::VariantKind Variant;
306 if (isNonConstantExpr(getImm(), Variant)) {
307 return Variant == AArch64MCExpr::VK_AARCH64_None
308 || Variant == AArch64MCExpr::VK_AARCH64_GOT
309 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
310 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
313 return isLabel<21, 4096>();
316 template<unsigned RegWidth> bool isBitfieldWidth() const {
317 if (!isImm()) return false;
319 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
320 if (!CE) return false;
322 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
325 template<int RegWidth>
326 bool isCVTFixedPos() const {
327 if (!isImm()) return false;
329 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
330 if (!CE) return false;
332 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
335 bool isFMOVImm() const {
336 if (!isFPImm()) return false;
338 APFloat RealVal(FPImm.Val);
340 return A64Imms::isFPImm(RealVal, ImmVal);
343 bool isFPZero() const {
344 if (!isFPImm()) return false;
346 APFloat RealVal(FPImm.Val);
347 return RealVal.isPosZero();
350 template<unsigned field_width, unsigned scale>
351 bool isLabel() const {
352 if (!isImm()) return false;
354 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
356 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
357 int64_t Val = CE->getValue();
358 int64_t Min = - (scale * (1LL << (field_width - 1)));
359 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
360 return (Val % scale) == 0 && Val >= Min && Val <= Max;
363 // N.b. this disallows explicit relocation specifications via an
364 // AArch64MCExpr. Users needing that behaviour
368 bool isLane1() const {
369 if (!isImm()) return false;
371 // Because it's come through custom assembly parsing, it must always be a
372 // constant expression.
373 return cast<MCConstantExpr>(getImm())->getValue() == 1;
376 bool isLoadLitLabel() const {
377 if (!isImm()) return false;
379 AArch64MCExpr::VariantKind Variant;
380 if (isNonConstantExpr(getImm(), Variant)) {
381 return Variant == AArch64MCExpr::VK_AARCH64_None
382 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
385 return isLabel<19, 4>();
388 template<unsigned RegWidth> bool isLogicalImm() const {
389 if (!isImm()) return false;
391 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
392 if (!CE) return false;
395 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
398 template<unsigned RegWidth> bool isLogicalImmMOV() const {
399 if (!isLogicalImm<RegWidth>()) return false;
401 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
403 // The move alias for ORR is only valid if the immediate cannot be
404 // represented with a move (immediate) instruction; they take priority.
406 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
407 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
410 template<int MemSize>
411 bool isOffsetUImm12() const {
412 if (!isImm()) return false;
414 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
416 // Assume they know what they're doing for now if they've given us a
417 // non-constant expression. In principle we could check for ridiculous
418 // things that can't possibly work or relocations that would almost
419 // certainly break resulting code.
423 int64_t Val = CE->getValue();
425 // Must be a multiple of the access size in bytes.
426 if ((Val & (MemSize - 1)) != 0) return false;
428 // Must be 12-bit unsigned
429 return Val >= 0 && Val <= 0xfff * MemSize;
432 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
433 bool isShift() const {
434 if (!isShiftOrExtend()) return false;
436 if (ShiftExtend.ShiftType != SHKind)
439 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
442 bool isMOVN32Imm() const {
443 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
444 AArch64MCExpr::VK_AARCH64_SABS_G0,
445 AArch64MCExpr::VK_AARCH64_SABS_G1,
446 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
447 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
448 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
449 AArch64MCExpr::VK_AARCH64_TPREL_G1,
450 AArch64MCExpr::VK_AARCH64_TPREL_G0,
452 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
454 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
457 bool isMOVN64Imm() const {
458 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
459 AArch64MCExpr::VK_AARCH64_SABS_G0,
460 AArch64MCExpr::VK_AARCH64_SABS_G1,
461 AArch64MCExpr::VK_AARCH64_SABS_G2,
462 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
463 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
464 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
465 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
466 AArch64MCExpr::VK_AARCH64_TPREL_G2,
467 AArch64MCExpr::VK_AARCH64_TPREL_G1,
468 AArch64MCExpr::VK_AARCH64_TPREL_G0,
470 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
472 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
476 bool isMOVZ32Imm() const {
477 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
478 AArch64MCExpr::VK_AARCH64_ABS_G0,
479 AArch64MCExpr::VK_AARCH64_ABS_G1,
480 AArch64MCExpr::VK_AARCH64_SABS_G0,
481 AArch64MCExpr::VK_AARCH64_SABS_G1,
482 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
483 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
484 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
485 AArch64MCExpr::VK_AARCH64_TPREL_G1,
486 AArch64MCExpr::VK_AARCH64_TPREL_G0,
488 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
490 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
493 bool isMOVZ64Imm() const {
494 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
495 AArch64MCExpr::VK_AARCH64_ABS_G0,
496 AArch64MCExpr::VK_AARCH64_ABS_G1,
497 AArch64MCExpr::VK_AARCH64_ABS_G2,
498 AArch64MCExpr::VK_AARCH64_ABS_G3,
499 AArch64MCExpr::VK_AARCH64_SABS_G0,
500 AArch64MCExpr::VK_AARCH64_SABS_G1,
501 AArch64MCExpr::VK_AARCH64_SABS_G2,
502 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
503 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
504 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
505 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
506 AArch64MCExpr::VK_AARCH64_TPREL_G2,
507 AArch64MCExpr::VK_AARCH64_TPREL_G1,
508 AArch64MCExpr::VK_AARCH64_TPREL_G0,
510 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
512 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
515 bool isMOVK32Imm() const {
516 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
517 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
518 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
519 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
520 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
521 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
522 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
523 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
525 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
527 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
530 bool isMOVK64Imm() const {
531 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
532 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
533 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
534 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
535 AArch64MCExpr::VK_AARCH64_ABS_G3,
536 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
537 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
538 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
539 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
540 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
542 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
544 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
547 bool isMoveWideImm(unsigned RegWidth,
548 AArch64MCExpr::VariantKind *PermittedModifiers,
549 unsigned NumModifiers) const {
550 if (!isImmWithLSL()) return false;
552 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
553 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
555 AArch64MCExpr::VariantKind Modifier;
556 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
557 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
558 if (!ImmWithLSL.ImplicitAmount) return false;
560 for (unsigned i = 0; i < NumModifiers; ++i)
561 if (PermittedModifiers[i] == Modifier) return true;
566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
567 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
570 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
571 bool isMoveWideMovAlias() const {
572 if (!isImm()) return false;
574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
575 if (!CE) return false;
578 uint64_t Value = CE->getValue();
580 // If this is a 32-bit instruction then all bits above 32 should be the
581 // same: either of these is fine because signed/unsigned values should be
583 if (RegWidth == 32) {
584 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
587 Value &= 0xffffffffULL;
590 return isValidImm(RegWidth, Value, UImm16, Shift);
593 bool isMSRWithReg() const {
594 if (!isSysReg()) return false;
596 bool IsKnownRegister;
597 StringRef Name(SysReg.Data, SysReg.Length);
598 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
600 return IsKnownRegister;
603 bool isMSRPState() const {
604 if (!isSysReg()) return false;
606 bool IsKnownRegister;
607 StringRef Name(SysReg.Data, SysReg.Length);
608 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
610 return IsKnownRegister;
614 if (!isSysReg()) return false;
616 // First check against specific MSR-only (write-only) registers
617 bool IsKnownRegister;
618 StringRef Name(SysReg.Data, SysReg.Length);
619 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
621 return IsKnownRegister;
624 bool isPRFM() const {
625 if (!isImm()) return false;
627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
632 return CE->getValue() >= 0 && CE->getValue() <= 31;
635 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
636 if (!isShiftOrExtend()) return false;
638 if (ShiftExtend.ShiftType != SHKind)
641 return ShiftExtend.Amount <= 4;
644 bool isRegExtendLSL() const {
645 if (!isShiftOrExtend()) return false;
647 if (ShiftExtend.ShiftType != A64SE::LSL)
650 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
653 template<int MemSize> bool isSImm7Scaled() const {
654 if (!isImm()) return false;
656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657 if (!CE) return false;
659 int64_t Val = CE->getValue();
660 if (Val % MemSize != 0) return false;
664 return Val >= -64 && Val < 64;
667 template<int BitWidth>
668 bool isSImm() const {
669 if (!isImm()) return false;
671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672 if (!CE) return false;
674 return CE->getValue() >= -(1LL << (BitWidth - 1))
675 && CE->getValue() < (1LL << (BitWidth - 1));
678 template<int bitWidth>
679 bool isUImm() const {
680 if (!isImm()) return false;
682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683 if (!CE) return false;
685 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
688 bool isUImm() const {
689 if (!isImm()) return false;
691 return isa<MCConstantExpr>(getImm());
694 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
695 unsigned ShiftAmount,
698 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
699 Op->ImmWithLSL.Val = Val;
700 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
701 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
705 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
707 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
708 Op->CondCode.Code = Code;
712 static AArch64Operand *CreateFPImm(double Val,
714 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
719 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
720 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
725 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
726 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
727 Op->Reg.RegNum = RegNum;
731 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
732 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
733 Op->Reg.RegNum = RegNum;
737 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
741 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
742 Op->ShiftExtend.ShiftType = ShiftTyp;
743 Op->ShiftExtend.Amount = Amount;
744 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
748 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
749 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
750 Op->Tok.Data = Str.data();
751 Op->Tok.Length = Str.size();
755 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
756 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
757 Op->Tok.Data = Str.data();
758 Op->Tok.Length = Str.size();
763 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
764 // Add as immediates when possible.
765 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
766 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
768 Inst.addOperand(MCOperand::CreateExpr(Expr));
771 template<unsigned RegWidth>
772 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
773 assert(N == 1 && "Invalid number of operands!");
774 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
775 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
776 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
779 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
780 assert(N == 1 && "Invalid number of operands!");
781 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
782 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
785 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
786 assert(N == 1 && "Invalid number of operands!");
788 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
789 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
791 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
794 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
795 assert(N == 1 && "Invalid number of operands!");
796 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
799 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
800 assert(N == 1 && "Invalid number of operands!");
802 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
803 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
806 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
807 assert(N == 1 && "Invalid number of operands!");
809 APFloat RealVal(FPImm.Val);
811 A64Imms::isFPImm(RealVal, ImmVal);
813 Inst.addOperand(MCOperand::CreateImm(ImmVal));
816 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
817 assert(N == 1 && "Invalid number of operands");
818 Inst.addOperand(MCOperand::CreateImm(0));
821 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
822 assert(N == 1 && "Invalid number of operands!");
823 unsigned Encoded = A64InvertCondCode(getCondCode());
824 Inst.addOperand(MCOperand::CreateImm(Encoded));
827 void addRegOperands(MCInst &Inst, unsigned N) const {
828 assert(N == 1 && "Invalid number of operands!");
829 Inst.addOperand(MCOperand::CreateReg(getReg()));
832 void addImmOperands(MCInst &Inst, unsigned N) const {
833 assert(N == 1 && "Invalid number of operands!");
834 addExpr(Inst, getImm());
837 template<int MemSize>
838 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
839 assert(N == 1 && "Invalid number of operands!");
841 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
842 uint64_t Val = CE->getValue() / MemSize;
843 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
846 template<int BitWidth>
847 void addSImmOperands(MCInst &Inst, unsigned N) const {
848 assert(N == 1 && "Invalid number of operands!");
850 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
851 uint64_t Val = CE->getValue();
852 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
855 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
856 assert (N == 1 && "Invalid number of operands!");
858 addExpr(Inst, ImmWithLSL.Val);
861 template<unsigned field_width, unsigned scale>
862 void addLabelOperands(MCInst &Inst, unsigned N) const {
863 assert(N == 1 && "Invalid number of operands!");
865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
868 addExpr(Inst, Imm.Val);
872 int64_t Val = CE->getValue();
873 assert(Val % scale == 0 && "Unaligned immediate in instruction");
876 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
879 template<int MemSize>
880 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
881 assert(N == 1 && "Invalid number of operands!");
883 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
884 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
886 Inst.addOperand(MCOperand::CreateExpr(getImm()));
890 template<unsigned RegWidth>
891 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
892 assert(N == 1 && "Invalid number of operands");
893 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
896 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
898 Inst.addOperand(MCOperand::CreateImm(Bits));
901 void addMRSOperands(MCInst &Inst, unsigned N) const {
902 assert(N == 1 && "Invalid number of operands!");
905 StringRef Name(SysReg.Data, SysReg.Length);
906 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
908 Inst.addOperand(MCOperand::CreateImm(Bits));
911 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
912 assert(N == 1 && "Invalid number of operands!");
915 StringRef Name(SysReg.Data, SysReg.Length);
916 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
918 Inst.addOperand(MCOperand::CreateImm(Bits));
921 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
922 assert(N == 1 && "Invalid number of operands!");
925 StringRef Name(SysReg.Data, SysReg.Length);
926 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
928 Inst.addOperand(MCOperand::CreateImm(Bits));
931 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
932 assert(N == 2 && "Invalid number of operands!");
934 addExpr(Inst, ImmWithLSL.Val);
936 AArch64MCExpr::VariantKind Variant;
937 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
938 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
942 // We know it's relocated
944 case AArch64MCExpr::VK_AARCH64_ABS_G0:
945 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
946 case AArch64MCExpr::VK_AARCH64_SABS_G0:
947 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
948 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
949 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
950 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
951 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
952 Inst.addOperand(MCOperand::CreateImm(0));
954 case AArch64MCExpr::VK_AARCH64_ABS_G1:
955 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
956 case AArch64MCExpr::VK_AARCH64_SABS_G1:
957 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
958 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
959 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
960 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
961 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
962 Inst.addOperand(MCOperand::CreateImm(1));
964 case AArch64MCExpr::VK_AARCH64_ABS_G2:
965 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
966 case AArch64MCExpr::VK_AARCH64_SABS_G2:
967 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
968 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
969 Inst.addOperand(MCOperand::CreateImm(2));
971 case AArch64MCExpr::VK_AARCH64_ABS_G3:
972 Inst.addOperand(MCOperand::CreateImm(3));
974 default: llvm_unreachable("Inappropriate move wide relocation");
978 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
979 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
980 assert(N == 2 && "Invalid number of operands!");
983 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
984 uint64_t Value = CE->getValue();
986 if (RegWidth == 32) {
987 Value &= 0xffffffffULL;
990 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
992 assert(Valid && "Invalid immediates should have been weeded out by now");
994 Inst.addOperand(MCOperand::CreateImm(UImm16));
995 Inst.addOperand(MCOperand::CreateImm(Shift));
998 void addPRFMOperands(MCInst &Inst, unsigned N) const {
999 assert(N == 1 && "Invalid number of operands!");
1001 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1002 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1003 && "PRFM operand should be 5-bits");
1005 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1008 // For Add-sub (extended register) operands.
1009 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1010 assert(N == 1 && "Invalid number of operands!");
1012 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1015 // For the extend in load-store (register offset) instructions.
1016 template<unsigned MemSize>
1017 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1018 addAddrRegExtendOperands(Inst, N, MemSize);
1021 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1022 unsigned MemSize) const {
1023 assert(N == 1 && "Invalid number of operands!");
1025 // First bit of Option is set in instruction classes, the high two bits are
1027 unsigned OptionHi = 0;
1028 switch (ShiftExtend.ShiftType) {
1038 llvm_unreachable("Invalid extend type for register offset");
1042 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1044 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1047 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1049 void addShiftOperands(MCInst &Inst, unsigned N) const {
1050 assert(N == 1 && "Invalid number of operands!");
1052 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1056 } // end anonymous namespace.
1058 AArch64AsmParser::OperandMatchResultTy
1059 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1060 StringRef Mnemonic) {
1062 // See if the operand has a custom parser
1063 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1065 // It could either succeed, fail or just not care.
1066 if (ResTy != MatchOperand_NoMatch)
1069 switch (getLexer().getKind()) {
1071 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1072 return MatchOperand_ParseFail;
1073 case AsmToken::Identifier: {
1074 // It might be in the LSL/UXTB family ...
1075 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1077 // We can only continue if no tokens were eaten.
1078 if (GotShift != MatchOperand_NoMatch)
1081 // ... or it might be a register ...
1082 uint32_t NumLanes = 0;
1083 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1084 assert(GotReg != MatchOperand_ParseFail
1085 && "register parsing shouldn't partially succeed");
1087 if (GotReg == MatchOperand_Success) {
1088 if (Parser.getTok().is(AsmToken::LBrac))
1089 return ParseNEONLane(Operands, NumLanes);
1091 return MatchOperand_Success;
1094 // ... or it might be a symbolish thing
1097 case AsmToken::LParen: // E.g. (strcmp-4)
1098 case AsmToken::Integer: // 1f, 2b labels
1099 case AsmToken::String: // quoted labels
1100 case AsmToken::Dot: // . is Current location
1101 case AsmToken::Dollar: // $ is PC
1102 case AsmToken::Colon: {
1103 SMLoc StartLoc = Parser.getTok().getLoc();
1105 const MCExpr *ImmVal = 0;
1107 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1108 return MatchOperand_ParseFail;
1110 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1111 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1112 return MatchOperand_Success;
1114 case AsmToken::Hash: { // Immediates
1115 SMLoc StartLoc = Parser.getTok().getLoc();
1117 const MCExpr *ImmVal = 0;
1120 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1121 return MatchOperand_ParseFail;
1123 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1124 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1125 return MatchOperand_Success;
1127 case AsmToken::LBrac: {
1128 SMLoc Loc = Parser.getTok().getLoc();
1129 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1130 Parser.Lex(); // Eat '['
1132 // There's no comma after a '[', so we can parse the next operand
1134 return ParseOperand(Operands, Mnemonic);
1136 // The following will likely be useful later, but not in very early cases
1137 case AsmToken::LCurly: // Weird SIMD lists
1138 llvm_unreachable("Don't know how to deal with '{' in operand");
1139 return MatchOperand_ParseFail;
1143 AArch64AsmParser::OperandMatchResultTy
1144 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1145 if (getLexer().is(AsmToken::Colon)) {
1146 AArch64MCExpr::VariantKind RefKind;
1148 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1149 if (ResTy != MatchOperand_Success)
1152 const MCExpr *SubExprVal;
1153 if (getParser().ParseExpression(SubExprVal))
1154 return MatchOperand_ParseFail;
1156 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1157 return MatchOperand_Success;
1160 // No weird AArch64MCExpr prefix
1161 return getParser().ParseExpression(ExprVal)
1162 ? MatchOperand_ParseFail : MatchOperand_Success;
1165 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1166 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1167 AArch64AsmParser::OperandMatchResultTy
1168 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1169 uint32_t NumLanes) {
1170 SMLoc Loc = Parser.getTok().getLoc();
1172 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1173 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1174 Parser.Lex(); // Eat '['
1176 if (Parser.getTok().isNot(AsmToken::Integer)) {
1177 Error(Parser.getTok().getLoc(), "expected lane number");
1178 return MatchOperand_ParseFail;
1181 if (Parser.getTok().getIntVal() >= NumLanes) {
1182 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1183 return MatchOperand_ParseFail;
1186 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1188 SMLoc S = Parser.getTok().getLoc();
1189 Parser.Lex(); // Eat actual lane
1190 SMLoc E = Parser.getTok().getLoc();
1191 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1194 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1195 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1196 return MatchOperand_ParseFail;
1199 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1200 Parser.Lex(); // Eat ']'
1202 return MatchOperand_Success;
1205 AArch64AsmParser::OperandMatchResultTy
1206 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1207 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1210 if (getLexer().isNot(AsmToken::Identifier)) {
1211 Error(Parser.getTok().getLoc(),
1212 "expected relocation specifier in operand after ':'");
1213 return MatchOperand_ParseFail;
1216 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1217 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1218 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1219 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1220 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1221 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1222 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1223 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1224 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1225 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1226 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1227 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1228 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1229 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1230 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1231 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1232 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1233 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1234 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1235 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1236 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1237 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1238 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1239 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1240 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1241 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1242 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1243 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1244 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1245 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1246 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1247 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1248 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1249 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1250 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1251 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1252 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1253 .Default(AArch64MCExpr::VK_AARCH64_None);
1255 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1256 Error(Parser.getTok().getLoc(),
1257 "expected relocation specifier in operand after ':'");
1258 return MatchOperand_ParseFail;
1260 Parser.Lex(); // Eat identifier
1262 if (getLexer().isNot(AsmToken::Colon)) {
1263 Error(Parser.getTok().getLoc(),
1264 "expected ':' after relocation specifier");
1265 return MatchOperand_ParseFail;
1268 return MatchOperand_Success;
1271 AArch64AsmParser::OperandMatchResultTy
1272 AArch64AsmParser::ParseImmWithLSLOperand(
1273 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1274 // FIXME?: I want to live in a world where immediates must start with
1275 // #. Please don't dash my hopes (well, do if you have a good reason).
1276 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1278 SMLoc S = Parser.getTok().getLoc();
1279 Parser.Lex(); // Eat '#'
1282 if (ParseImmediate(Imm) != MatchOperand_Success)
1283 return MatchOperand_ParseFail;
1284 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1285 SMLoc E = Parser.getTok().getLoc();
1286 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1287 return MatchOperand_Success;
1293 // The optional operand must be "lsl #N" where N is non-negative.
1294 if (Parser.getTok().is(AsmToken::Identifier)
1295 && Parser.getTok().getIdentifier().lower() == "lsl") {
1298 if (Parser.getTok().is(AsmToken::Hash)) {
1301 if (Parser.getTok().isNot(AsmToken::Integer)) {
1302 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1303 return MatchOperand_ParseFail;
1308 int64_t ShiftAmount = Parser.getTok().getIntVal();
1310 if (ShiftAmount < 0) {
1311 Error(Parser.getTok().getLoc(), "positive shift amount required");
1312 return MatchOperand_ParseFail;
1314 Parser.Lex(); // Eat the number
1316 SMLoc E = Parser.getTok().getLoc();
1317 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1319 return MatchOperand_Success;
1323 AArch64AsmParser::OperandMatchResultTy
1324 AArch64AsmParser::ParseCondCodeOperand(
1325 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1326 if (Parser.getTok().isNot(AsmToken::Identifier))
1327 return MatchOperand_NoMatch;
1329 StringRef Tok = Parser.getTok().getIdentifier();
1330 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1332 if (CondCode == A64CC::Invalid)
1333 return MatchOperand_NoMatch;
1335 SMLoc S = Parser.getTok().getLoc();
1336 Parser.Lex(); // Eat condition code
1337 SMLoc E = Parser.getTok().getLoc();
1339 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1340 return MatchOperand_Success;
1343 AArch64AsmParser::OperandMatchResultTy
1344 AArch64AsmParser::ParseCRxOperand(
1345 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1346 SMLoc S = Parser.getTok().getLoc();
1347 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1348 Error(S, "Expected cN operand where 0 <= N <= 15");
1349 return MatchOperand_ParseFail;
1352 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1353 StringRef Tok(LowerTok);
1354 if (Tok[0] != 'c') {
1355 Error(S, "Expected cN operand where 0 <= N <= 15");
1356 return MatchOperand_ParseFail;
1360 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1361 if (BadNum || CRNum > 15) {
1362 Error(S, "Expected cN operand where 0 <= N <= 15");
1363 return MatchOperand_ParseFail;
1366 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1369 SMLoc E = Parser.getTok().getLoc();
1371 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1372 return MatchOperand_Success;
1375 AArch64AsmParser::OperandMatchResultTy
1376 AArch64AsmParser::ParseFPImmOperand(
1377 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1379 // FIXME?: I want to live in a world where immediates must start with
1380 // #. Please don't dash my hopes (well, do if you have a good reason).
1381 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1383 SMLoc S = Parser.getTok().getLoc();
1384 Parser.Lex(); // Eat '#'
1386 bool Negative = false;
1387 if (Parser.getTok().is(AsmToken::Minus)) {
1389 Parser.Lex(); // Eat '-'
1390 } else if (Parser.getTok().is(AsmToken::Plus)) {
1391 Parser.Lex(); // Eat '+'
1394 if (Parser.getTok().isNot(AsmToken::Real)) {
1395 Error(S, "Expected floating-point immediate");
1396 return MatchOperand_ParseFail;
1399 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1400 if (Negative) RealVal.changeSign();
1401 double DblVal = RealVal.convertToDouble();
1403 Parser.Lex(); // Eat real number
1404 SMLoc E = Parser.getTok().getLoc();
1406 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1407 return MatchOperand_Success;
1411 // Automatically generated
1412 static unsigned MatchRegisterName(StringRef Name);
1415 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1417 SMLoc &LayoutLoc) const {
1418 const AsmToken &Tok = Parser.getTok();
1420 if (Tok.isNot(AsmToken::Identifier))
1423 std::string LowerReg = Tok.getString().lower();
1424 size_t DotPos = LowerReg.find('.');
1426 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1427 if (RegNum == AArch64::NoRegister) {
1428 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1429 .Case("ip0", AArch64::X16)
1430 .Case("ip1", AArch64::X17)
1431 .Case("fp", AArch64::X29)
1432 .Case("lr", AArch64::X30)
1433 .Default(AArch64::NoRegister);
1435 if (RegNum == AArch64::NoRegister)
1438 SMLoc S = Tok.getLoc();
1439 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1441 if (DotPos == StringRef::npos) {
1442 Layout = StringRef();
1444 // Everything afterwards needs to be a literal token, expected to be
1445 // '.2d','.b' etc for vector registers.
1447 // This StringSwitch validates the input and (perhaps more importantly)
1448 // gives us a permanent string to use in the token (a pointer into LowerReg
1449 // would go out of scope when we return).
1450 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1451 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1452 Layout = StringSwitch<const char *>(LayoutText)
1453 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1454 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1455 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1456 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1459 if (Layout.size() == 0) {
1460 // Malformed register
1468 AArch64AsmParser::OperandMatchResultTy
1469 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1470 uint32_t &NumLanes) {
1473 SMLoc RegEndLoc, LayoutLoc;
1474 SMLoc S = Parser.getTok().getLoc();
1476 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1477 return MatchOperand_NoMatch;
1479 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1481 if (Layout.size() != 0) {
1482 unsigned long long TmpLanes = 0;
1483 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1484 if (TmpLanes != 0) {
1485 NumLanes = TmpLanes;
1487 // If the number of lanes isn't specified explicitly, a valid instruction
1488 // will have an element specifier and be capable of acting on the entire
1490 switch (Layout.back()) {
1491 default: llvm_unreachable("Invalid layout specifier");
1492 case 'b': NumLanes = 16; break;
1493 case 'h': NumLanes = 8; break;
1494 case 's': NumLanes = 4; break;
1495 case 'd': NumLanes = 2; break;
1499 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1503 return MatchOperand_Success;
1507 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1509 // This callback is used for things like DWARF frame directives in
1510 // assembly. They don't care about things like NEON layouts or lanes, they
1511 // just want to be able to produce the DWARF register number.
1512 StringRef LayoutSpec;
1513 SMLoc RegEndLoc, LayoutLoc;
1514 StartLoc = Parser.getTok().getLoc();
1516 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1520 EndLoc = Parser.getTok().getLoc();
1525 AArch64AsmParser::OperandMatchResultTy
1526 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1527 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1528 // Since these operands occur in very limited circumstances, without
1529 // alternatives, we actually signal an error if there is no match. If relaxing
1530 // this, beware of unintended consequences: an immediate will be accepted
1531 // during matching, no matter how it gets into the AArch64Operand.
1532 const AsmToken &Tok = Parser.getTok();
1533 SMLoc S = Tok.getLoc();
1535 if (Tok.is(AsmToken::Identifier)) {
1537 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1540 Error(S, "operand specifier not recognised");
1541 return MatchOperand_ParseFail;
1544 Parser.Lex(); // We're done with the identifier. Eat it
1546 SMLoc E = Parser.getTok().getLoc();
1547 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1548 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1549 return MatchOperand_Success;
1550 } else if (Tok.is(AsmToken::Hash)) {
1553 const MCExpr *ImmVal;
1554 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1555 return MatchOperand_ParseFail;
1557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1558 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1559 Error(S, "Invalid immediate for instruction");
1560 return MatchOperand_ParseFail;
1563 SMLoc E = Parser.getTok().getLoc();
1564 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1565 return MatchOperand_Success;
1568 Error(S, "unexpected operand for instruction");
1569 return MatchOperand_ParseFail;
1572 AArch64AsmParser::OperandMatchResultTy
1573 AArch64AsmParser::ParseSysRegOperand(
1574 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1575 const AsmToken &Tok = Parser.getTok();
1577 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1578 // kind of string: SPSel is valid for two different forms of MSR with two
1579 // different encodings. There's no collision at the moment, but the potential
1581 if (!Tok.is(AsmToken::Identifier)) {
1582 return MatchOperand_NoMatch;
1585 SMLoc S = Tok.getLoc();
1586 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1587 Parser.Lex(); // Eat identifier
1589 return MatchOperand_Success;
1592 AArch64AsmParser::OperandMatchResultTy
1593 AArch64AsmParser::ParseLSXAddressOperand(
1594 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1595 SMLoc S = Parser.getTok().getLoc();
1598 SMLoc RegEndLoc, LayoutLoc;
1600 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1601 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1602 || Layout.size() != 0) {
1603 // Check Layout.size because we don't want to let "x3.4s" or similar
1605 return MatchOperand_NoMatch;
1607 Parser.Lex(); // Eat register
1609 if (Parser.getTok().is(AsmToken::RBrac)) {
1611 SMLoc E = Parser.getTok().getLoc();
1612 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1613 return MatchOperand_Success;
1616 // Otherwise, only ", #0" is valid
1618 if (Parser.getTok().isNot(AsmToken::Comma)) {
1619 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1620 return MatchOperand_ParseFail;
1622 Parser.Lex(); // Eat ','
1624 if (Parser.getTok().isNot(AsmToken::Hash)) {
1625 Error(Parser.getTok().getLoc(), "expected '#0'");
1626 return MatchOperand_ParseFail;
1628 Parser.Lex(); // Eat '#'
1630 if (Parser.getTok().isNot(AsmToken::Integer)
1631 || Parser.getTok().getIntVal() != 0 ) {
1632 Error(Parser.getTok().getLoc(), "expected '#0'");
1633 return MatchOperand_ParseFail;
1635 Parser.Lex(); // Eat '0'
1637 SMLoc E = Parser.getTok().getLoc();
1638 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1639 return MatchOperand_Success;
1642 AArch64AsmParser::OperandMatchResultTy
1643 AArch64AsmParser::ParseShiftExtend(
1644 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1645 StringRef IDVal = Parser.getTok().getIdentifier();
1646 std::string LowerID = IDVal.lower();
1648 A64SE::ShiftExtSpecifiers Spec =
1649 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1650 .Case("lsl", A64SE::LSL)
1651 .Case("lsr", A64SE::LSR)
1652 .Case("asr", A64SE::ASR)
1653 .Case("ror", A64SE::ROR)
1654 .Case("uxtb", A64SE::UXTB)
1655 .Case("uxth", A64SE::UXTH)
1656 .Case("uxtw", A64SE::UXTW)
1657 .Case("uxtx", A64SE::UXTX)
1658 .Case("sxtb", A64SE::SXTB)
1659 .Case("sxth", A64SE::SXTH)
1660 .Case("sxtw", A64SE::SXTW)
1661 .Case("sxtx", A64SE::SXTX)
1662 .Default(A64SE::Invalid);
1664 if (Spec == A64SE::Invalid)
1665 return MatchOperand_NoMatch;
1669 S = Parser.getTok().getLoc();
1672 if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1673 Spec != A64SE::ASR && Spec != A64SE::ROR) {
1674 // The shift amount can be omitted for the extending versions, but not real
1676 // add x0, x0, x0, uxtb
1677 // is valid, and equivalent to
1678 // add x0, x0, x0, uxtb #0
1680 if (Parser.getTok().is(AsmToken::Comma) ||
1681 Parser.getTok().is(AsmToken::EndOfStatement) ||
1682 Parser.getTok().is(AsmToken::RBrac)) {
1683 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1685 return MatchOperand_Success;
1689 // Eat # at beginning of immediate
1690 if (!Parser.getTok().is(AsmToken::Hash)) {
1691 Error(Parser.getTok().getLoc(),
1692 "expected #imm after shift specifier");
1693 return MatchOperand_ParseFail;
1697 // Make sure we do actually have a number
1698 if (!Parser.getTok().is(AsmToken::Integer)) {
1699 Error(Parser.getTok().getLoc(),
1700 "expected integer shift amount");
1701 return MatchOperand_ParseFail;
1703 unsigned Amount = Parser.getTok().getIntVal();
1705 E = Parser.getTok().getLoc();
1707 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1710 return MatchOperand_Success;
1713 // FIXME: We would really like to be able to tablegen'erate this.
1714 bool AArch64AsmParser::
1715 validateInstruction(MCInst &Inst,
1716 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1717 switch (Inst.getOpcode()) {
1718 case AArch64::BFIwwii:
1719 case AArch64::BFIxxii:
1720 case AArch64::SBFIZwwii:
1721 case AArch64::SBFIZxxii:
1722 case AArch64::UBFIZwwii:
1723 case AArch64::UBFIZxxii: {
1724 unsigned ImmOps = Inst.getNumOperands() - 2;
1725 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1726 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1729 // Bitfield inserts are preferred disassembly if ImmS < ImmR. However,
1730 // there is this one case where insert is valid syntax but the bfx
1731 // disassembly should be used: e.g. "sbfiz w0, w0, #0, #1".
1733 } else if (ImmS >= ImmR) {
1734 return Error(Operands[4]->getStartLoc(),
1735 "requested insert overflows register");
1739 case AArch64::BFXILwwii:
1740 case AArch64::BFXILxxii:
1741 case AArch64::SBFXwwii:
1742 case AArch64::SBFXxxii:
1743 case AArch64::UBFXwwii:
1744 case AArch64::UBFXxxii: {
1745 unsigned ImmOps = Inst.getNumOperands() - 2;
1746 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1747 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1748 int64_t RegWidth = 0;
1749 switch (Inst.getOpcode()) {
1750 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1753 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1758 if (ImmS >= RegWidth || ImmS < ImmR) {
1759 return Error(Operands[4]->getStartLoc(),
1760 "requested extract overflows register");
1764 case AArch64::ICix: {
1765 int64_t ImmVal = Inst.getOperand(0).getImm();
1766 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1767 if (!A64IC::NeedsRegister(ICOp)) {
1768 return Error(Operands[1]->getStartLoc(),
1769 "specified IC op does not use a register");
1773 case AArch64::ICi: {
1774 int64_t ImmVal = Inst.getOperand(0).getImm();
1775 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1776 if (A64IC::NeedsRegister(ICOp)) {
1777 return Error(Operands[1]->getStartLoc(),
1778 "specified IC op requires a register");
1782 case AArch64::TLBIix: {
1783 int64_t ImmVal = Inst.getOperand(0).getImm();
1784 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1785 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1786 return Error(Operands[1]->getStartLoc(),
1787 "specified TLBI op does not use a register");
1791 case AArch64::TLBIi: {
1792 int64_t ImmVal = Inst.getOperand(0).getImm();
1793 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1794 if (A64TLBI::NeedsRegister(TLBIOp)) {
1795 return Error(Operands[1]->getStartLoc(),
1796 "specified TLBI op requires a register");
1806 // Parses the instruction *together with* all operands, appending each parsed
1807 // operand to the "Operands" list
1808 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1809 StringRef Name, SMLoc NameLoc,
1810 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1811 size_t CondCodePos = Name.find('.');
1813 StringRef Mnemonic = Name.substr(0, CondCodePos);
1814 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1816 if (CondCodePos != StringRef::npos) {
1817 // We have a condition code
1818 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1819 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1820 A64CC::CondCodes Code;
1822 Code = A64StringToCondCode(CondStr);
1824 if (Code == A64CC::Invalid) {
1825 Error(S, "invalid condition code");
1826 Parser.EatToEndOfStatement();
1830 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1832 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1833 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1834 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1837 // Now we parse the operands of this instruction
1838 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1839 // Read the first operand.
1840 if (ParseOperand(Operands, Mnemonic)) {
1841 Parser.EatToEndOfStatement();
1845 while (getLexer().is(AsmToken::Comma)) {
1846 Parser.Lex(); // Eat the comma.
1848 // Parse and remember the operand.
1849 if (ParseOperand(Operands, Mnemonic)) {
1850 Parser.EatToEndOfStatement();
1855 // After successfully parsing some operands there are two special cases to
1856 // consider (i.e. notional operands not separated by commas). Both are due
1857 // to memory specifiers:
1858 // + An RBrac will end an address for load/store/prefetch
1859 // + An '!' will indicate a pre-indexed operation.
1861 // It's someone else's responsibility to make sure these tokens are sane
1862 // in the given context!
1863 if (Parser.getTok().is(AsmToken::RBrac)) {
1864 SMLoc Loc = Parser.getTok().getLoc();
1865 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1869 if (Parser.getTok().is(AsmToken::Exclaim)) {
1870 SMLoc Loc = Parser.getTok().getLoc();
1871 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1877 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1878 SMLoc Loc = getLexer().getLoc();
1879 Parser.EatToEndOfStatement();
1880 return Error(Loc, "expected comma before next operand");
1883 // Eat the EndOfStatement
1889 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1890 StringRef IDVal = DirectiveID.getIdentifier();
1891 if (IDVal == ".hword")
1892 return ParseDirectiveWord(2, DirectiveID.getLoc());
1893 else if (IDVal == ".word")
1894 return ParseDirectiveWord(4, DirectiveID.getLoc());
1895 else if (IDVal == ".xword")
1896 return ParseDirectiveWord(8, DirectiveID.getLoc());
1897 else if (IDVal == ".tlsdesccall")
1898 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1903 /// parseDirectiveWord
1904 /// ::= .word [ expression (, expression)* ]
1905 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1906 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1908 const MCExpr *Value;
1909 if (getParser().ParseExpression(Value))
1912 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1914 if (getLexer().is(AsmToken::EndOfStatement))
1917 // FIXME: Improve diagnostic.
1918 if (getLexer().isNot(AsmToken::Comma))
1919 return Error(L, "unexpected token in directive");
1928 // parseDirectiveTLSDescCall:
1929 // ::= .tlsdesccall symbol
1930 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1932 if (getParser().ParseIdentifier(Name))
1933 return Error(L, "expected symbol after directive");
1935 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1936 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1939 Inst.setOpcode(AArch64::TLSDESCCALL);
1940 Inst.addOperand(MCOperand::CreateExpr(Expr));
1942 getParser().getStreamer().EmitInstruction(Inst);
1947 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1948 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1949 MCStreamer &Out, unsigned &ErrorInfo,
1950 bool MatchingInlineAsm) {
1952 unsigned MatchResult;
1953 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
1956 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
1957 return Error(IDLoc, "too few operands for instruction");
1959 switch (MatchResult) {
1962 if (validateInstruction(Inst, Operands))
1965 Out.EmitInstruction(Inst);
1967 case Match_MissingFeature:
1968 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1970 case Match_InvalidOperand: {
1971 SMLoc ErrorLoc = IDLoc;
1972 if (ErrorInfo != ~0U) {
1973 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1974 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1977 return Error(ErrorLoc, "invalid operand for instruction");
1979 case Match_MnemonicFail:
1980 return Error(IDLoc, "invalid instruction");
1982 case Match_AddSubRegExtendSmall:
1983 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1984 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
1985 case Match_AddSubRegExtendLarge:
1986 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1987 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
1988 case Match_AddSubRegShift32:
1989 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1990 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
1991 case Match_AddSubRegShift64:
1992 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1993 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
1994 case Match_AddSubSecondSource:
1995 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1996 "expected compatible register, symbol or integer in range [0, 4095]");
1997 case Match_CVTFixedPos32:
1998 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1999 "expected integer in range [1, 32]");
2000 case Match_CVTFixedPos64:
2001 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2002 "expected integer in range [1, 64]");
2003 case Match_CondCode:
2004 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2005 "expected AArch64 condition code");
2007 // Any situation which allows a nontrivial floating-point constant also
2008 // allows a register.
2009 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2010 "expected compatible register or floating-point constant");
2012 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2013 "expected floating-point constant #0.0");
2015 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2016 "expected label or encodable integer pc offset");
2018 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2019 "expected lane specifier '[1]'");
2020 case Match_LoadStoreExtend32_1:
2021 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2022 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2023 case Match_LoadStoreExtend32_2:
2024 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2025 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2026 case Match_LoadStoreExtend32_4:
2027 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2028 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2029 case Match_LoadStoreExtend32_8:
2030 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2031 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2032 case Match_LoadStoreExtend32_16:
2033 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2034 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2035 case Match_LoadStoreExtend64_1:
2036 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2037 "expected 'lsl' or 'sxtx' with optional shift of #0");
2038 case Match_LoadStoreExtend64_2:
2039 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2040 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2041 case Match_LoadStoreExtend64_4:
2042 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2043 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2044 case Match_LoadStoreExtend64_8:
2045 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2046 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2047 case Match_LoadStoreExtend64_16:
2048 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2049 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2050 case Match_LoadStoreSImm7_4:
2051 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2052 "expected integer multiple of 4 in range [-256, 252]");
2053 case Match_LoadStoreSImm7_8:
2054 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2055 "expected integer multiple of 8 in range [-512, 508]");
2056 case Match_LoadStoreSImm7_16:
2057 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2058 "expected integer multiple of 16 in range [-1024, 1016]");
2059 case Match_LoadStoreSImm9:
2060 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2061 "expected integer in range [-256, 255]");
2062 case Match_LoadStoreUImm12_1:
2063 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2064 "expected symbolic reference or integer in range [0, 4095]");
2065 case Match_LoadStoreUImm12_2:
2066 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2067 "expected symbolic reference or integer in range [0, 8190]");
2068 case Match_LoadStoreUImm12_4:
2069 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2070 "expected symbolic reference or integer in range [0, 16380]");
2071 case Match_LoadStoreUImm12_8:
2072 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2073 "expected symbolic reference or integer in range [0, 32760]");
2074 case Match_LoadStoreUImm12_16:
2075 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2076 "expected symbolic reference or integer in range [0, 65520]");
2077 case Match_LogicalSecondSource:
2078 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2079 "expected compatible register or logical immediate");
2080 case Match_MOVWUImm16:
2081 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2082 "expected relocated symbol or integer in range [0, 65535]");
2084 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2085 "expected readable system register");
2087 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2088 "expected writable system register or pstate");
2089 case Match_NamedImm_at:
2090 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2091 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2092 case Match_NamedImm_dbarrier:
2093 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2094 "expected integer in range [0, 15] or symbolic barrier operand");
2095 case Match_NamedImm_dc:
2096 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2097 "expected symbolic 'dc' operand");
2098 case Match_NamedImm_ic:
2099 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2100 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2101 case Match_NamedImm_isb:
2102 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2103 "expected integer in range [0, 15] or 'sy'");
2104 case Match_NamedImm_prefetch:
2105 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2106 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2107 case Match_NamedImm_tlbi:
2108 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2109 "expected translation buffer invalidation operand");
2111 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112 "expected integer in range [0, 65535]");
2114 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115 "expected integer in range [0, 7]");
2117 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118 "expected integer in range [0, 15]");
2120 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121 "expected integer in range [0, 31]");
2123 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124 "expected integer in range [0, 63]");
2126 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127 "expected integer in range [0, 127]");
2129 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130 "expected integer in range [<lsb>, 31]");
2132 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133 "expected integer in range [<lsb>, 63]");
2136 llvm_unreachable("Implement any new match types added!");
2140 void AArch64Operand::print(raw_ostream &OS) const {
2143 OS << "<CondCode: " << CondCode.Code << ">";
2146 OS << "<fpimm: " << FPImm.Val << ">";
2149 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2150 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2153 getImm()->print(OS);
2156 OS << "<register " << getReg() << '>';
2159 OS << '\'' << getToken() << '\'';
2162 OS << "<shift: type=" << ShiftExtend.ShiftType
2163 << ", amount=" << ShiftExtend.Amount << ">";
2166 StringRef Name(SysReg.Data, SysReg.Length);
2167 OS << "<sysreg: " << Name << '>';
2171 llvm_unreachable("No idea how to print this kind of operand");
2176 void AArch64Operand::dump() const {
2181 /// Force static initialization.
2182 extern "C" void LLVMInitializeAArch64AsmParser() {
2183 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2186 #define GET_REGISTER_MATCHER
2187 #define GET_MATCHER_IMPLEMENTATION
2188 #include "AArch64GenAsmMatcher.inc"