1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Support/TargetRegistry.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
58 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
59 MCAsmParserExtension::Initialize(_Parser);
61 // Initialize the set of available features.
62 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
65 // These are the public interface of the MCTargetAsmParser
66 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
67 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
69 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
71 bool ParseDirective(AsmToken DirectiveID);
72 bool ParseDirectiveTLSDescCall(SMLoc L);
73 bool ParseDirectiveWord(unsigned Size, SMLoc L);
75 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
77 MCStreamer&Out, unsigned &ErrorInfo,
78 bool MatchingInlineAsm);
80 // The rest of the sub-parsers have more freedom over interface: they return
81 // an OperandMatchResultTy because it's less ambiguous than true/false or
82 // -1/0/1 even if it is more verbose
84 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
87 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
89 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
92 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
96 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
100 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
103 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
106 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
109 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111 template<typename SomeNamedImmMapper> OperandMatchResultTy
112 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
113 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
117 ParseNamedImmOperand(const NamedImmMapper &Mapper,
118 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
121 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
124 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
127 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
129 bool validateInstruction(MCInst &Inst,
130 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
132 /// Scan the next token (which had better be an identifier) and determine
133 /// whether it represents a general-purpose or vector register. It returns
134 /// true if an identifier was found and populates its reference arguments. It
135 /// does not consume the token.
137 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
138 SMLoc &LayoutLoc) const;
146 /// Instances of this class represent a parsed AArch64 machine instruction.
147 class AArch64Operand : public MCParsedAsmOperand {
150 k_ImmWithLSL, // #uimm {, LSL #amt }
151 k_CondCode, // eq/ne/...
152 k_FPImmediate, // Limited-precision floating-point imm
153 k_Immediate, // Including expressions referencing symbols
156 k_SysReg, // The register operand of MRS and MSR instructions
157 k_Token, // The mnemonic; other raw tokens the auto-generated
158 k_WrappedRegister // Load/store exclusive permit a wrapped register.
161 SMLoc StartLoc, EndLoc;
166 unsigned ShiftAmount;
171 A64CC::CondCodes Code;
187 A64SE::ShiftExtSpecifiers ShiftType;
203 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
204 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
207 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
210 SMLoc getStartLoc() const { return StartLoc; }
211 SMLoc getEndLoc() const { return EndLoc; }
212 void print(raw_ostream&) const;
215 StringRef getToken() const {
216 assert(Kind == k_Token && "Invalid access!");
217 return StringRef(Tok.Data, Tok.Length);
220 unsigned getReg() const {
221 assert((Kind == k_Register || Kind == k_WrappedRegister)
222 && "Invalid access!");
226 const MCExpr *getImm() const {
227 assert(Kind == k_Immediate && "Invalid access!");
231 A64CC::CondCodes getCondCode() const {
232 assert(Kind == k_CondCode && "Invalid access!");
233 return CondCode.Code;
236 static bool isNonConstantExpr(const MCExpr *E,
237 AArch64MCExpr::VariantKind &Variant) {
238 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
239 Variant = A64E->getKind();
241 } else if (!isa<MCConstantExpr>(E)) {
242 Variant = AArch64MCExpr::VK_AARCH64_None;
249 bool isCondCode() const { return Kind == k_CondCode; }
250 bool isToken() const { return Kind == k_Token; }
251 bool isReg() const { return Kind == k_Register; }
252 bool isImm() const { return Kind == k_Immediate; }
253 bool isMem() const { return false; }
254 bool isFPImm() const { return Kind == k_FPImmediate; }
255 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
256 bool isSysReg() const { return Kind == k_SysReg; }
257 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
258 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
260 bool isAddSubImmLSL0() const {
261 if (!isImmWithLSL()) return false;
262 if (ImmWithLSL.ShiftAmount != 0) return false;
264 AArch64MCExpr::VariantKind Variant;
265 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
266 return Variant == AArch64MCExpr::VK_AARCH64_LO12
267 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
268 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
269 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
270 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
271 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
274 // Otherwise it should be a real immediate in range:
275 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
276 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
279 bool isAddSubImmLSL12() const {
280 if (!isImmWithLSL()) return false;
281 if (ImmWithLSL.ShiftAmount != 12) return false;
283 AArch64MCExpr::VariantKind Variant;
284 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
285 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
286 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
289 // Otherwise it should be a real immediate in range:
290 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
291 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
294 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
295 if (!isShiftOrExtend()) return false;
297 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
298 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
301 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
304 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
307 bool isAdrpLabel() const {
308 if (!isImm()) return false;
310 AArch64MCExpr::VariantKind Variant;
311 if (isNonConstantExpr(getImm(), Variant)) {
312 return Variant == AArch64MCExpr::VK_AARCH64_None
313 || Variant == AArch64MCExpr::VK_AARCH64_GOT
314 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
315 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
318 return isLabel<21, 4096>();
321 template<unsigned RegWidth> bool isBitfieldWidth() const {
322 if (!isImm()) return false;
324 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
325 if (!CE) return false;
327 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
330 template<int RegWidth>
331 bool isCVTFixedPos() const {
332 if (!isImm()) return false;
334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
335 if (!CE) return false;
337 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
340 bool isFMOVImm() const {
341 if (!isFPImm()) return false;
343 APFloat RealVal(FPImm.Val);
345 return A64Imms::isFPImm(RealVal, ImmVal);
348 bool isFPZero() const {
349 if (!isFPImm()) return false;
351 APFloat RealVal(FPImm.Val);
352 return RealVal.isPosZero();
355 template<unsigned field_width, unsigned scale>
356 bool isLabel() const {
357 if (!isImm()) return false;
359 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
361 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
362 int64_t Val = CE->getValue();
363 int64_t Min = - (scale * (1LL << (field_width - 1)));
364 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
365 return (Val % scale) == 0 && Val >= Min && Val <= Max;
368 // N.b. this disallows explicit relocation specifications via an
369 // AArch64MCExpr. Users needing that behaviour
373 bool isLane1() const {
374 if (!isImm()) return false;
376 // Because it's come through custom assembly parsing, it must always be a
377 // constant expression.
378 return cast<MCConstantExpr>(getImm())->getValue() == 1;
381 bool isLoadLitLabel() const {
382 if (!isImm()) return false;
384 AArch64MCExpr::VariantKind Variant;
385 if (isNonConstantExpr(getImm(), Variant)) {
386 return Variant == AArch64MCExpr::VK_AARCH64_None
387 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
390 return isLabel<19, 4>();
393 template<unsigned RegWidth> bool isLogicalImm() const {
394 if (!isImm()) return false;
396 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
397 if (!CE) return false;
400 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
403 template<unsigned RegWidth> bool isLogicalImmMOV() const {
404 if (!isLogicalImm<RegWidth>()) return false;
406 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
408 // The move alias for ORR is only valid if the immediate cannot be
409 // represented with a move (immediate) instruction; they take priority.
411 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
412 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
415 template<int MemSize>
416 bool isOffsetUImm12() const {
417 if (!isImm()) return false;
419 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
421 // Assume they know what they're doing for now if they've given us a
422 // non-constant expression. In principle we could check for ridiculous
423 // things that can't possibly work or relocations that would almost
424 // certainly break resulting code.
428 int64_t Val = CE->getValue();
430 // Must be a multiple of the access size in bytes.
431 if ((Val & (MemSize - 1)) != 0) return false;
433 // Must be 12-bit unsigned
434 return Val >= 0 && Val <= 0xfff * MemSize;
437 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
438 bool isShift() const {
439 if (!isShiftOrExtend()) return false;
441 if (ShiftExtend.ShiftType != SHKind)
444 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
447 bool isMOVN32Imm() const {
448 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
449 AArch64MCExpr::VK_AARCH64_SABS_G0,
450 AArch64MCExpr::VK_AARCH64_SABS_G1,
451 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
452 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
453 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
454 AArch64MCExpr::VK_AARCH64_TPREL_G1,
455 AArch64MCExpr::VK_AARCH64_TPREL_G0,
457 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
459 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
462 bool isMOVN64Imm() const {
463 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
464 AArch64MCExpr::VK_AARCH64_SABS_G0,
465 AArch64MCExpr::VK_AARCH64_SABS_G1,
466 AArch64MCExpr::VK_AARCH64_SABS_G2,
467 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
468 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
469 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
470 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
471 AArch64MCExpr::VK_AARCH64_TPREL_G2,
472 AArch64MCExpr::VK_AARCH64_TPREL_G1,
473 AArch64MCExpr::VK_AARCH64_TPREL_G0,
475 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
477 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
481 bool isMOVZ32Imm() const {
482 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
483 AArch64MCExpr::VK_AARCH64_ABS_G0,
484 AArch64MCExpr::VK_AARCH64_ABS_G1,
485 AArch64MCExpr::VK_AARCH64_SABS_G0,
486 AArch64MCExpr::VK_AARCH64_SABS_G1,
487 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
488 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
489 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
490 AArch64MCExpr::VK_AARCH64_TPREL_G1,
491 AArch64MCExpr::VK_AARCH64_TPREL_G0,
493 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
495 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
498 bool isMOVZ64Imm() const {
499 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
500 AArch64MCExpr::VK_AARCH64_ABS_G0,
501 AArch64MCExpr::VK_AARCH64_ABS_G1,
502 AArch64MCExpr::VK_AARCH64_ABS_G2,
503 AArch64MCExpr::VK_AARCH64_ABS_G3,
504 AArch64MCExpr::VK_AARCH64_SABS_G0,
505 AArch64MCExpr::VK_AARCH64_SABS_G1,
506 AArch64MCExpr::VK_AARCH64_SABS_G2,
507 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
508 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
509 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
510 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
511 AArch64MCExpr::VK_AARCH64_TPREL_G2,
512 AArch64MCExpr::VK_AARCH64_TPREL_G1,
513 AArch64MCExpr::VK_AARCH64_TPREL_G0,
515 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
517 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
520 bool isMOVK32Imm() const {
521 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
522 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
523 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
524 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
525 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
526 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
527 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
528 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
530 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
532 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
535 bool isMOVK64Imm() const {
536 static AArch64MCExpr::VariantKind PermittedModifiers[] = {
537 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
538 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
539 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
540 AArch64MCExpr::VK_AARCH64_ABS_G3,
541 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
542 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
543 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
544 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
545 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
547 unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
549 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
552 bool isMoveWideImm(unsigned RegWidth,
553 AArch64MCExpr::VariantKind *PermittedModifiers,
554 unsigned NumModifiers) const {
555 if (!isImmWithLSL()) return false;
557 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
558 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
560 AArch64MCExpr::VariantKind Modifier;
561 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
562 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
563 if (!ImmWithLSL.ImplicitAmount) return false;
565 for (unsigned i = 0; i < NumModifiers; ++i)
566 if (PermittedModifiers[i] == Modifier) return true;
571 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
572 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
575 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
576 bool isMoveWideMovAlias() const {
577 if (!isImm()) return false;
579 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
580 if (!CE) return false;
583 uint64_t Value = CE->getValue();
585 // If this is a 32-bit instruction then all bits above 32 should be the
586 // same: either of these is fine because signed/unsigned values should be
588 if (RegWidth == 32) {
589 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
592 Value &= 0xffffffffULL;
595 return isValidImm(RegWidth, Value, UImm16, Shift);
598 bool isMSRWithReg() const {
599 if (!isSysReg()) return false;
601 bool IsKnownRegister;
602 StringRef Name(SysReg.Data, SysReg.Length);
603 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
605 return IsKnownRegister;
608 bool isMSRPState() const {
609 if (!isSysReg()) return false;
611 bool IsKnownRegister;
612 StringRef Name(SysReg.Data, SysReg.Length);
613 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
615 return IsKnownRegister;
619 if (!isSysReg()) return false;
621 // First check against specific MSR-only (write-only) registers
622 bool IsKnownRegister;
623 StringRef Name(SysReg.Data, SysReg.Length);
624 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
626 return IsKnownRegister;
629 bool isPRFM() const {
630 if (!isImm()) return false;
632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637 return CE->getValue() >= 0 && CE->getValue() <= 31;
640 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
641 if (!isShiftOrExtend()) return false;
643 if (ShiftExtend.ShiftType != SHKind)
646 return ShiftExtend.Amount <= 4;
649 bool isRegExtendLSL() const {
650 if (!isShiftOrExtend()) return false;
652 if (ShiftExtend.ShiftType != A64SE::LSL)
655 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
658 template<int MemSize> bool isSImm7Scaled() const {
659 if (!isImm()) return false;
661 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662 if (!CE) return false;
664 int64_t Val = CE->getValue();
665 if (Val % MemSize != 0) return false;
669 return Val >= -64 && Val < 64;
672 template<int BitWidth>
673 bool isSImm() const {
674 if (!isImm()) return false;
676 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677 if (!CE) return false;
679 return CE->getValue() >= -(1LL << (BitWidth - 1))
680 && CE->getValue() < (1LL << (BitWidth - 1));
683 template<int bitWidth>
684 bool isUImm() const {
685 if (!isImm()) return false;
687 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688 if (!CE) return false;
690 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
693 bool isUImm() const {
694 if (!isImm()) return false;
696 return isa<MCConstantExpr>(getImm());
699 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
700 unsigned ShiftAmount,
703 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
704 Op->ImmWithLSL.Val = Val;
705 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
706 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
710 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
712 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
713 Op->CondCode.Code = Code;
717 static AArch64Operand *CreateFPImm(double Val,
719 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
724 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
725 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
730 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
731 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
732 Op->Reg.RegNum = RegNum;
736 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
737 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
738 Op->Reg.RegNum = RegNum;
742 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
746 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
747 Op->ShiftExtend.ShiftType = ShiftTyp;
748 Op->ShiftExtend.Amount = Amount;
749 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
753 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
754 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
755 Op->Tok.Data = Str.data();
756 Op->Tok.Length = Str.size();
760 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
761 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
762 Op->Tok.Data = Str.data();
763 Op->Tok.Length = Str.size();
768 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
769 // Add as immediates when possible.
770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
771 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
773 Inst.addOperand(MCOperand::CreateExpr(Expr));
776 template<unsigned RegWidth>
777 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
778 assert(N == 1 && "Invalid number of operands!");
779 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
780 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
781 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
784 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
785 assert(N == 1 && "Invalid number of operands!");
786 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
787 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
790 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
791 assert(N == 1 && "Invalid number of operands!");
793 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
794 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
796 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
799 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
800 assert(N == 1 && "Invalid number of operands!");
801 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
804 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
805 assert(N == 1 && "Invalid number of operands!");
807 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
808 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
811 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
812 assert(N == 1 && "Invalid number of operands!");
814 APFloat RealVal(FPImm.Val);
816 A64Imms::isFPImm(RealVal, ImmVal);
818 Inst.addOperand(MCOperand::CreateImm(ImmVal));
821 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
822 assert(N == 1 && "Invalid number of operands");
823 Inst.addOperand(MCOperand::CreateImm(0));
826 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
827 assert(N == 1 && "Invalid number of operands!");
828 unsigned Encoded = A64InvertCondCode(getCondCode());
829 Inst.addOperand(MCOperand::CreateImm(Encoded));
832 void addRegOperands(MCInst &Inst, unsigned N) const {
833 assert(N == 1 && "Invalid number of operands!");
834 Inst.addOperand(MCOperand::CreateReg(getReg()));
837 void addImmOperands(MCInst &Inst, unsigned N) const {
838 assert(N == 1 && "Invalid number of operands!");
839 addExpr(Inst, getImm());
842 template<int MemSize>
843 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
844 assert(N == 1 && "Invalid number of operands!");
846 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
847 uint64_t Val = CE->getValue() / MemSize;
848 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
851 template<int BitWidth>
852 void addSImmOperands(MCInst &Inst, unsigned N) const {
853 assert(N == 1 && "Invalid number of operands!");
855 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
856 uint64_t Val = CE->getValue();
857 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
860 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
861 assert (N == 1 && "Invalid number of operands!");
863 addExpr(Inst, ImmWithLSL.Val);
866 template<unsigned field_width, unsigned scale>
867 void addLabelOperands(MCInst &Inst, unsigned N) const {
868 assert(N == 1 && "Invalid number of operands!");
870 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
873 addExpr(Inst, Imm.Val);
877 int64_t Val = CE->getValue();
878 assert(Val % scale == 0 && "Unaligned immediate in instruction");
881 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
884 template<int MemSize>
885 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
886 assert(N == 1 && "Invalid number of operands!");
888 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
889 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
891 Inst.addOperand(MCOperand::CreateExpr(getImm()));
895 template<unsigned RegWidth>
896 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
897 assert(N == 1 && "Invalid number of operands");
898 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
901 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
903 Inst.addOperand(MCOperand::CreateImm(Bits));
906 void addMRSOperands(MCInst &Inst, unsigned N) const {
907 assert(N == 1 && "Invalid number of operands!");
910 StringRef Name(SysReg.Data, SysReg.Length);
911 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
913 Inst.addOperand(MCOperand::CreateImm(Bits));
916 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
917 assert(N == 1 && "Invalid number of operands!");
920 StringRef Name(SysReg.Data, SysReg.Length);
921 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
923 Inst.addOperand(MCOperand::CreateImm(Bits));
926 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
927 assert(N == 1 && "Invalid number of operands!");
930 StringRef Name(SysReg.Data, SysReg.Length);
931 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
933 Inst.addOperand(MCOperand::CreateImm(Bits));
936 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
937 assert(N == 2 && "Invalid number of operands!");
939 addExpr(Inst, ImmWithLSL.Val);
941 AArch64MCExpr::VariantKind Variant;
942 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
943 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
947 // We know it's relocated
949 case AArch64MCExpr::VK_AARCH64_ABS_G0:
950 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
951 case AArch64MCExpr::VK_AARCH64_SABS_G0:
952 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
953 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
954 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
955 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
956 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
957 Inst.addOperand(MCOperand::CreateImm(0));
959 case AArch64MCExpr::VK_AARCH64_ABS_G1:
960 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
961 case AArch64MCExpr::VK_AARCH64_SABS_G1:
962 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
963 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
964 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
965 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
966 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
967 Inst.addOperand(MCOperand::CreateImm(1));
969 case AArch64MCExpr::VK_AARCH64_ABS_G2:
970 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
971 case AArch64MCExpr::VK_AARCH64_SABS_G2:
972 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
973 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
974 Inst.addOperand(MCOperand::CreateImm(2));
976 case AArch64MCExpr::VK_AARCH64_ABS_G3:
977 Inst.addOperand(MCOperand::CreateImm(3));
979 default: llvm_unreachable("Inappropriate move wide relocation");
983 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
984 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
985 assert(N == 2 && "Invalid number of operands!");
988 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
989 uint64_t Value = CE->getValue();
991 if (RegWidth == 32) {
992 Value &= 0xffffffffULL;
995 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
997 assert(Valid && "Invalid immediates should have been weeded out by now");
999 Inst.addOperand(MCOperand::CreateImm(UImm16));
1000 Inst.addOperand(MCOperand::CreateImm(Shift));
1003 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1004 assert(N == 1 && "Invalid number of operands!");
1006 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1007 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1008 && "PRFM operand should be 5-bits");
1010 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1013 // For Add-sub (extended register) operands.
1014 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1015 assert(N == 1 && "Invalid number of operands!");
1017 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1020 // For the extend in load-store (register offset) instructions.
1021 template<unsigned MemSize>
1022 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1023 addAddrRegExtendOperands(Inst, N, MemSize);
1026 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1027 unsigned MemSize) const {
1028 assert(N == 1 && "Invalid number of operands!");
1030 // First bit of Option is set in instruction classes, the high two bits are
1032 unsigned OptionHi = 0;
1033 switch (ShiftExtend.ShiftType) {
1043 llvm_unreachable("Invalid extend type for register offset");
1047 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1049 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1052 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1054 void addShiftOperands(MCInst &Inst, unsigned N) const {
1055 assert(N == 1 && "Invalid number of operands!");
1057 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1061 } // end anonymous namespace.
1063 AArch64AsmParser::OperandMatchResultTy
1064 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1065 StringRef Mnemonic) {
1067 // See if the operand has a custom parser
1068 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1070 // It could either succeed, fail or just not care.
1071 if (ResTy != MatchOperand_NoMatch)
1074 switch (getLexer().getKind()) {
1076 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1077 return MatchOperand_ParseFail;
1078 case AsmToken::Identifier: {
1079 // It might be in the LSL/UXTB family ...
1080 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1082 // We can only continue if no tokens were eaten.
1083 if (GotShift != MatchOperand_NoMatch)
1086 // ... or it might be a register ...
1087 uint32_t NumLanes = 0;
1088 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1089 assert(GotReg != MatchOperand_ParseFail
1090 && "register parsing shouldn't partially succeed");
1092 if (GotReg == MatchOperand_Success) {
1093 if (Parser.getTok().is(AsmToken::LBrac))
1094 return ParseNEONLane(Operands, NumLanes);
1096 return MatchOperand_Success;
1099 // ... or it might be a symbolish thing
1102 case AsmToken::LParen: // E.g. (strcmp-4)
1103 case AsmToken::Integer: // 1f, 2b labels
1104 case AsmToken::String: // quoted labels
1105 case AsmToken::Dot: // . is Current location
1106 case AsmToken::Dollar: // $ is PC
1107 case AsmToken::Colon: {
1108 SMLoc StartLoc = Parser.getTok().getLoc();
1110 const MCExpr *ImmVal = 0;
1112 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1113 return MatchOperand_ParseFail;
1115 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1116 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1117 return MatchOperand_Success;
1119 case AsmToken::Hash: { // Immediates
1120 SMLoc StartLoc = Parser.getTok().getLoc();
1122 const MCExpr *ImmVal = 0;
1125 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1126 return MatchOperand_ParseFail;
1128 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1129 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1130 return MatchOperand_Success;
1132 case AsmToken::LBrac: {
1133 SMLoc Loc = Parser.getTok().getLoc();
1134 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1135 Parser.Lex(); // Eat '['
1137 // There's no comma after a '[', so we can parse the next operand
1139 return ParseOperand(Operands, Mnemonic);
1141 // The following will likely be useful later, but not in very early cases
1142 case AsmToken::LCurly: // Weird SIMD lists
1143 llvm_unreachable("Don't know how to deal with '{' in operand");
1144 return MatchOperand_ParseFail;
1148 AArch64AsmParser::OperandMatchResultTy
1149 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1150 if (getLexer().is(AsmToken::Colon)) {
1151 AArch64MCExpr::VariantKind RefKind;
1153 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1154 if (ResTy != MatchOperand_Success)
1157 const MCExpr *SubExprVal;
1158 if (getParser().ParseExpression(SubExprVal))
1159 return MatchOperand_ParseFail;
1161 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1162 return MatchOperand_Success;
1165 // No weird AArch64MCExpr prefix
1166 return getParser().ParseExpression(ExprVal)
1167 ? MatchOperand_ParseFail : MatchOperand_Success;
1170 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1171 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1172 AArch64AsmParser::OperandMatchResultTy
1173 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1174 uint32_t NumLanes) {
1175 SMLoc Loc = Parser.getTok().getLoc();
1177 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1178 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1179 Parser.Lex(); // Eat '['
1181 if (Parser.getTok().isNot(AsmToken::Integer)) {
1182 Error(Parser.getTok().getLoc(), "expected lane number");
1183 return MatchOperand_ParseFail;
1186 if (Parser.getTok().getIntVal() >= NumLanes) {
1187 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1188 return MatchOperand_ParseFail;
1191 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1193 SMLoc S = Parser.getTok().getLoc();
1194 Parser.Lex(); // Eat actual lane
1195 SMLoc E = Parser.getTok().getLoc();
1196 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1199 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1200 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1201 return MatchOperand_ParseFail;
1204 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1205 Parser.Lex(); // Eat ']'
1207 return MatchOperand_Success;
1210 AArch64AsmParser::OperandMatchResultTy
1211 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1212 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1215 if (getLexer().isNot(AsmToken::Identifier)) {
1216 Error(Parser.getTok().getLoc(),
1217 "expected relocation specifier in operand after ':'");
1218 return MatchOperand_ParseFail;
1221 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1222 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1223 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1224 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1225 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1226 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1227 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1228 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1229 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1230 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1231 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1232 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1233 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1234 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1235 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1236 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1237 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1238 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1239 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1240 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1241 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1242 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1243 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1244 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1245 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1246 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1247 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1248 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1249 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1250 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1251 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1252 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1253 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1254 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1255 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1256 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1257 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1258 .Default(AArch64MCExpr::VK_AARCH64_None);
1260 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1261 Error(Parser.getTok().getLoc(),
1262 "expected relocation specifier in operand after ':'");
1263 return MatchOperand_ParseFail;
1265 Parser.Lex(); // Eat identifier
1267 if (getLexer().isNot(AsmToken::Colon)) {
1268 Error(Parser.getTok().getLoc(),
1269 "expected ':' after relocation specifier");
1270 return MatchOperand_ParseFail;
1273 return MatchOperand_Success;
1276 AArch64AsmParser::OperandMatchResultTy
1277 AArch64AsmParser::ParseImmWithLSLOperand(
1278 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1279 // FIXME?: I want to live in a world where immediates must start with
1280 // #. Please don't dash my hopes (well, do if you have a good reason).
1281 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1283 SMLoc S = Parser.getTok().getLoc();
1284 Parser.Lex(); // Eat '#'
1287 if (ParseImmediate(Imm) != MatchOperand_Success)
1288 return MatchOperand_ParseFail;
1289 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1290 SMLoc E = Parser.getTok().getLoc();
1291 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1292 return MatchOperand_Success;
1298 // The optional operand must be "lsl #N" where N is non-negative.
1299 if (Parser.getTok().is(AsmToken::Identifier)
1300 && Parser.getTok().getIdentifier().lower() == "lsl") {
1303 if (Parser.getTok().is(AsmToken::Hash)) {
1306 if (Parser.getTok().isNot(AsmToken::Integer)) {
1307 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1308 return MatchOperand_ParseFail;
1313 int64_t ShiftAmount = Parser.getTok().getIntVal();
1315 if (ShiftAmount < 0) {
1316 Error(Parser.getTok().getLoc(), "positive shift amount required");
1317 return MatchOperand_ParseFail;
1319 Parser.Lex(); // Eat the number
1321 SMLoc E = Parser.getTok().getLoc();
1322 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1324 return MatchOperand_Success;
1328 AArch64AsmParser::OperandMatchResultTy
1329 AArch64AsmParser::ParseCondCodeOperand(
1330 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1331 if (Parser.getTok().isNot(AsmToken::Identifier))
1332 return MatchOperand_NoMatch;
1334 StringRef Tok = Parser.getTok().getIdentifier();
1335 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1337 if (CondCode == A64CC::Invalid)
1338 return MatchOperand_NoMatch;
1340 SMLoc S = Parser.getTok().getLoc();
1341 Parser.Lex(); // Eat condition code
1342 SMLoc E = Parser.getTok().getLoc();
1344 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1345 return MatchOperand_Success;
1348 AArch64AsmParser::OperandMatchResultTy
1349 AArch64AsmParser::ParseCRxOperand(
1350 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1351 SMLoc S = Parser.getTok().getLoc();
1352 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1353 Error(S, "Expected cN operand where 0 <= N <= 15");
1354 return MatchOperand_ParseFail;
1357 std::string LowerTok = Parser.getTok().getIdentifier().lower();
1358 StringRef Tok(LowerTok);
1359 if (Tok[0] != 'c') {
1360 Error(S, "Expected cN operand where 0 <= N <= 15");
1361 return MatchOperand_ParseFail;
1365 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1366 if (BadNum || CRNum > 15) {
1367 Error(S, "Expected cN operand where 0 <= N <= 15");
1368 return MatchOperand_ParseFail;
1371 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1374 SMLoc E = Parser.getTok().getLoc();
1376 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1377 return MatchOperand_Success;
1380 AArch64AsmParser::OperandMatchResultTy
1381 AArch64AsmParser::ParseFPImmOperand(
1382 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1384 // FIXME?: I want to live in a world where immediates must start with
1385 // #. Please don't dash my hopes (well, do if you have a good reason).
1386 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1388 SMLoc S = Parser.getTok().getLoc();
1389 Parser.Lex(); // Eat '#'
1391 bool Negative = false;
1392 if (Parser.getTok().is(AsmToken::Minus)) {
1394 Parser.Lex(); // Eat '-'
1395 } else if (Parser.getTok().is(AsmToken::Plus)) {
1396 Parser.Lex(); // Eat '+'
1399 if (Parser.getTok().isNot(AsmToken::Real)) {
1400 Error(S, "Expected floating-point immediate");
1401 return MatchOperand_ParseFail;
1404 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1405 if (Negative) RealVal.changeSign();
1406 double DblVal = RealVal.convertToDouble();
1408 Parser.Lex(); // Eat real number
1409 SMLoc E = Parser.getTok().getLoc();
1411 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1412 return MatchOperand_Success;
1416 // Automatically generated
1417 static unsigned MatchRegisterName(StringRef Name);
1420 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1422 SMLoc &LayoutLoc) const {
1423 const AsmToken &Tok = Parser.getTok();
1425 if (Tok.isNot(AsmToken::Identifier))
1428 std::string LowerReg = Tok.getString().lower();
1429 size_t DotPos = LowerReg.find('.');
1431 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1432 if (RegNum == AArch64::NoRegister) {
1433 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1434 .Case("ip0", AArch64::X16)
1435 .Case("ip1", AArch64::X17)
1436 .Case("fp", AArch64::X29)
1437 .Case("lr", AArch64::X30)
1438 .Default(AArch64::NoRegister);
1440 if (RegNum == AArch64::NoRegister)
1443 SMLoc S = Tok.getLoc();
1444 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1446 if (DotPos == StringRef::npos) {
1447 Layout = StringRef();
1449 // Everything afterwards needs to be a literal token, expected to be
1450 // '.2d','.b' etc for vector registers.
1452 // This StringSwitch validates the input and (perhaps more importantly)
1453 // gives us a permanent string to use in the token (a pointer into LowerReg
1454 // would go out of scope when we return).
1455 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1456 std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1457 Layout = StringSwitch<const char *>(LayoutText)
1458 .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1459 .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1460 .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1461 .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1464 if (Layout.size() == 0) {
1465 // Malformed register
1473 AArch64AsmParser::OperandMatchResultTy
1474 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1475 uint32_t &NumLanes) {
1478 SMLoc RegEndLoc, LayoutLoc;
1479 SMLoc S = Parser.getTok().getLoc();
1481 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1482 return MatchOperand_NoMatch;
1484 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1486 if (Layout.size() != 0) {
1487 unsigned long long TmpLanes = 0;
1488 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1489 if (TmpLanes != 0) {
1490 NumLanes = TmpLanes;
1492 // If the number of lanes isn't specified explicitly, a valid instruction
1493 // will have an element specifier and be capable of acting on the entire
1495 switch (Layout.back()) {
1496 default: llvm_unreachable("Invalid layout specifier");
1497 case 'b': NumLanes = 16; break;
1498 case 'h': NumLanes = 8; break;
1499 case 's': NumLanes = 4; break;
1500 case 'd': NumLanes = 2; break;
1504 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1508 return MatchOperand_Success;
1512 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1514 // This callback is used for things like DWARF frame directives in
1515 // assembly. They don't care about things like NEON layouts or lanes, they
1516 // just want to be able to produce the DWARF register number.
1517 StringRef LayoutSpec;
1518 SMLoc RegEndLoc, LayoutLoc;
1519 StartLoc = Parser.getTok().getLoc();
1521 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1525 EndLoc = Parser.getTok().getLoc();
1530 AArch64AsmParser::OperandMatchResultTy
1531 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1532 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1533 // Since these operands occur in very limited circumstances, without
1534 // alternatives, we actually signal an error if there is no match. If relaxing
1535 // this, beware of unintended consequences: an immediate will be accepted
1536 // during matching, no matter how it gets into the AArch64Operand.
1537 const AsmToken &Tok = Parser.getTok();
1538 SMLoc S = Tok.getLoc();
1540 if (Tok.is(AsmToken::Identifier)) {
1542 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1545 Error(S, "operand specifier not recognised");
1546 return MatchOperand_ParseFail;
1549 Parser.Lex(); // We're done with the identifier. Eat it
1551 SMLoc E = Parser.getTok().getLoc();
1552 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1553 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1554 return MatchOperand_Success;
1555 } else if (Tok.is(AsmToken::Hash)) {
1558 const MCExpr *ImmVal;
1559 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1560 return MatchOperand_ParseFail;
1562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1563 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1564 Error(S, "Invalid immediate for instruction");
1565 return MatchOperand_ParseFail;
1568 SMLoc E = Parser.getTok().getLoc();
1569 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1570 return MatchOperand_Success;
1573 Error(S, "unexpected operand for instruction");
1574 return MatchOperand_ParseFail;
1577 AArch64AsmParser::OperandMatchResultTy
1578 AArch64AsmParser::ParseSysRegOperand(
1579 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1580 const AsmToken &Tok = Parser.getTok();
1582 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1583 // kind of string: SPSel is valid for two different forms of MSR with two
1584 // different encodings. There's no collision at the moment, but the potential
1586 if (!Tok.is(AsmToken::Identifier)) {
1587 return MatchOperand_NoMatch;
1590 SMLoc S = Tok.getLoc();
1591 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1592 Parser.Lex(); // Eat identifier
1594 return MatchOperand_Success;
1597 AArch64AsmParser::OperandMatchResultTy
1598 AArch64AsmParser::ParseLSXAddressOperand(
1599 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1600 SMLoc S = Parser.getTok().getLoc();
1603 SMLoc RegEndLoc, LayoutLoc;
1605 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1606 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1607 || Layout.size() != 0) {
1608 // Check Layout.size because we don't want to let "x3.4s" or similar
1610 return MatchOperand_NoMatch;
1612 Parser.Lex(); // Eat register
1614 if (Parser.getTok().is(AsmToken::RBrac)) {
1616 SMLoc E = Parser.getTok().getLoc();
1617 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1618 return MatchOperand_Success;
1621 // Otherwise, only ", #0" is valid
1623 if (Parser.getTok().isNot(AsmToken::Comma)) {
1624 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1625 return MatchOperand_ParseFail;
1627 Parser.Lex(); // Eat ','
1629 if (Parser.getTok().isNot(AsmToken::Hash)) {
1630 Error(Parser.getTok().getLoc(), "expected '#0'");
1631 return MatchOperand_ParseFail;
1633 Parser.Lex(); // Eat '#'
1635 if (Parser.getTok().isNot(AsmToken::Integer)
1636 || Parser.getTok().getIntVal() != 0 ) {
1637 Error(Parser.getTok().getLoc(), "expected '#0'");
1638 return MatchOperand_ParseFail;
1640 Parser.Lex(); // Eat '0'
1642 SMLoc E = Parser.getTok().getLoc();
1643 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1644 return MatchOperand_Success;
1647 AArch64AsmParser::OperandMatchResultTy
1648 AArch64AsmParser::ParseShiftExtend(
1649 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1650 StringRef IDVal = Parser.getTok().getIdentifier();
1651 std::string LowerID = IDVal.lower();
1653 A64SE::ShiftExtSpecifiers Spec =
1654 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1655 .Case("lsl", A64SE::LSL)
1656 .Case("lsr", A64SE::LSR)
1657 .Case("asr", A64SE::ASR)
1658 .Case("ror", A64SE::ROR)
1659 .Case("uxtb", A64SE::UXTB)
1660 .Case("uxth", A64SE::UXTH)
1661 .Case("uxtw", A64SE::UXTW)
1662 .Case("uxtx", A64SE::UXTX)
1663 .Case("sxtb", A64SE::SXTB)
1664 .Case("sxth", A64SE::SXTH)
1665 .Case("sxtw", A64SE::SXTW)
1666 .Case("sxtx", A64SE::SXTX)
1667 .Default(A64SE::Invalid);
1669 if (Spec == A64SE::Invalid)
1670 return MatchOperand_NoMatch;
1674 S = Parser.getTok().getLoc();
1677 if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1678 Spec != A64SE::ASR && Spec != A64SE::ROR) {
1679 // The shift amount can be omitted for the extending versions, but not real
1681 // add x0, x0, x0, uxtb
1682 // is valid, and equivalent to
1683 // add x0, x0, x0, uxtb #0
1685 if (Parser.getTok().is(AsmToken::Comma) ||
1686 Parser.getTok().is(AsmToken::EndOfStatement) ||
1687 Parser.getTok().is(AsmToken::RBrac)) {
1688 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1690 return MatchOperand_Success;
1694 // Eat # at beginning of immediate
1695 if (!Parser.getTok().is(AsmToken::Hash)) {
1696 Error(Parser.getTok().getLoc(),
1697 "expected #imm after shift specifier");
1698 return MatchOperand_ParseFail;
1702 // Make sure we do actually have a number
1703 if (!Parser.getTok().is(AsmToken::Integer)) {
1704 Error(Parser.getTok().getLoc(),
1705 "expected integer shift amount");
1706 return MatchOperand_ParseFail;
1708 unsigned Amount = Parser.getTok().getIntVal();
1710 E = Parser.getTok().getLoc();
1712 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1715 return MatchOperand_Success;
1718 // FIXME: We would really like to be able to tablegen'erate this.
1719 bool AArch64AsmParser::
1720 validateInstruction(MCInst &Inst,
1721 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1722 switch (Inst.getOpcode()) {
1723 case AArch64::BFIwwii:
1724 case AArch64::BFIxxii:
1725 case AArch64::SBFIZwwii:
1726 case AArch64::SBFIZxxii:
1727 case AArch64::UBFIZwwii:
1728 case AArch64::UBFIZxxii: {
1729 unsigned ImmOps = Inst.getNumOperands() - 2;
1730 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1731 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1733 if (ImmR != 0 && ImmS >= ImmR) {
1734 return Error(Operands[4]->getStartLoc(),
1735 "requested insert overflows register");
1739 case AArch64::BFXILwwii:
1740 case AArch64::BFXILxxii:
1741 case AArch64::SBFXwwii:
1742 case AArch64::SBFXxxii:
1743 case AArch64::UBFXwwii:
1744 case AArch64::UBFXxxii: {
1745 unsigned ImmOps = Inst.getNumOperands() - 2;
1746 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1747 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1748 int64_t RegWidth = 0;
1749 switch (Inst.getOpcode()) {
1750 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1753 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1758 if (ImmS >= RegWidth || ImmS < ImmR) {
1759 return Error(Operands[4]->getStartLoc(),
1760 "requested extract overflows register");
1764 case AArch64::ICix: {
1765 int64_t ImmVal = Inst.getOperand(0).getImm();
1766 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1767 if (!A64IC::NeedsRegister(ICOp)) {
1768 return Error(Operands[1]->getStartLoc(),
1769 "specified IC op does not use a register");
1773 case AArch64::ICi: {
1774 int64_t ImmVal = Inst.getOperand(0).getImm();
1775 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1776 if (A64IC::NeedsRegister(ICOp)) {
1777 return Error(Operands[1]->getStartLoc(),
1778 "specified IC op requires a register");
1782 case AArch64::TLBIix: {
1783 int64_t ImmVal = Inst.getOperand(0).getImm();
1784 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1785 if (!A64TLBI::NeedsRegister(TLBIOp)) {
1786 return Error(Operands[1]->getStartLoc(),
1787 "specified TLBI op does not use a register");
1791 case AArch64::TLBIi: {
1792 int64_t ImmVal = Inst.getOperand(0).getImm();
1793 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1794 if (A64TLBI::NeedsRegister(TLBIOp)) {
1795 return Error(Operands[1]->getStartLoc(),
1796 "specified TLBI op requires a register");
1806 // Parses the instruction *together with* all operands, appending each parsed
1807 // operand to the "Operands" list
1808 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1809 StringRef Name, SMLoc NameLoc,
1810 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1811 size_t CondCodePos = Name.find('.');
1813 StringRef Mnemonic = Name.substr(0, CondCodePos);
1814 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1816 if (CondCodePos != StringRef::npos) {
1817 // We have a condition code
1818 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1819 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1820 A64CC::CondCodes Code;
1822 Code = A64StringToCondCode(CondStr);
1824 if (Code == A64CC::Invalid) {
1825 Error(S, "invalid condition code");
1826 Parser.EatToEndOfStatement();
1830 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1832 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
1833 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1834 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1837 // Now we parse the operands of this instruction
1838 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1839 // Read the first operand.
1840 if (ParseOperand(Operands, Mnemonic)) {
1841 Parser.EatToEndOfStatement();
1845 while (getLexer().is(AsmToken::Comma)) {
1846 Parser.Lex(); // Eat the comma.
1848 // Parse and remember the operand.
1849 if (ParseOperand(Operands, Mnemonic)) {
1850 Parser.EatToEndOfStatement();
1855 // After successfully parsing some operands there are two special cases to
1856 // consider (i.e. notional operands not separated by commas). Both are due
1857 // to memory specifiers:
1858 // + An RBrac will end an address for load/store/prefetch
1859 // + An '!' will indicate a pre-indexed operation.
1861 // It's someone else's responsibility to make sure these tokens are sane
1862 // in the given context!
1863 if (Parser.getTok().is(AsmToken::RBrac)) {
1864 SMLoc Loc = Parser.getTok().getLoc();
1865 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1869 if (Parser.getTok().is(AsmToken::Exclaim)) {
1870 SMLoc Loc = Parser.getTok().getLoc();
1871 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1877 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1878 SMLoc Loc = getLexer().getLoc();
1879 Parser.EatToEndOfStatement();
1880 return Error(Loc, "expected comma before next operand");
1883 // Eat the EndOfStatement
1889 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1890 StringRef IDVal = DirectiveID.getIdentifier();
1891 if (IDVal == ".hword")
1892 return ParseDirectiveWord(2, DirectiveID.getLoc());
1893 else if (IDVal == ".word")
1894 return ParseDirectiveWord(4, DirectiveID.getLoc());
1895 else if (IDVal == ".xword")
1896 return ParseDirectiveWord(8, DirectiveID.getLoc());
1897 else if (IDVal == ".tlsdesccall")
1898 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1903 /// parseDirectiveWord
1904 /// ::= .word [ expression (, expression)* ]
1905 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1906 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1908 const MCExpr *Value;
1909 if (getParser().ParseExpression(Value))
1912 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1914 if (getLexer().is(AsmToken::EndOfStatement))
1917 // FIXME: Improve diagnostic.
1918 if (getLexer().isNot(AsmToken::Comma))
1919 return Error(L, "unexpected token in directive");
1928 // parseDirectiveTLSDescCall:
1929 // ::= .tlsdesccall symbol
1930 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1932 if (getParser().ParseIdentifier(Name))
1933 return Error(L, "expected symbol after directive");
1935 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1936 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1939 Inst.setOpcode(AArch64::TLSDESCCALL);
1940 Inst.addOperand(MCOperand::CreateExpr(Expr));
1942 getParser().getStreamer().EmitInstruction(Inst);
1947 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1948 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1949 MCStreamer &Out, unsigned &ErrorInfo,
1950 bool MatchingInlineAsm) {
1952 unsigned MatchResult;
1953 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
1956 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
1957 return Error(IDLoc, "too few operands for instruction");
1959 switch (MatchResult) {
1962 if (validateInstruction(Inst, Operands))
1965 Out.EmitInstruction(Inst);
1967 case Match_MissingFeature:
1968 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1970 case Match_InvalidOperand: {
1971 SMLoc ErrorLoc = IDLoc;
1972 if (ErrorInfo != ~0U) {
1973 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1974 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1977 return Error(ErrorLoc, "invalid operand for instruction");
1979 case Match_MnemonicFail:
1980 return Error(IDLoc, "invalid instruction");
1982 case Match_AddSubRegExtendSmall:
1983 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1984 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
1985 case Match_AddSubRegExtendLarge:
1986 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1987 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
1988 case Match_AddSubRegShift32:
1989 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1990 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
1991 case Match_AddSubRegShift64:
1992 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1993 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
1994 case Match_AddSubSecondSource:
1995 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1996 "expected compatible register, symbol or integer in range [0, 4095]");
1997 case Match_CVTFixedPos32:
1998 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1999 "expected integer in range [1, 32]");
2000 case Match_CVTFixedPos64:
2001 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2002 "expected integer in range [1, 64]");
2003 case Match_CondCode:
2004 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2005 "expected AArch64 condition code");
2007 // Any situation which allows a nontrivial floating-point constant also
2008 // allows a register.
2009 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2010 "expected compatible register or floating-point constant");
2012 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2013 "expected floating-point constant #0.0");
2015 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2016 "expected label or encodable integer pc offset");
2018 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2019 "expected lane specifier '[1]'");
2020 case Match_LoadStoreExtend32_1:
2021 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2022 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2023 case Match_LoadStoreExtend32_2:
2024 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2025 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2026 case Match_LoadStoreExtend32_4:
2027 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2028 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2029 case Match_LoadStoreExtend32_8:
2030 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2031 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2032 case Match_LoadStoreExtend32_16:
2033 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2034 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2035 case Match_LoadStoreExtend64_1:
2036 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2037 "expected 'lsl' or 'sxtx' with optional shift of #0");
2038 case Match_LoadStoreExtend64_2:
2039 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2040 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2041 case Match_LoadStoreExtend64_4:
2042 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2043 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2044 case Match_LoadStoreExtend64_8:
2045 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2046 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2047 case Match_LoadStoreExtend64_16:
2048 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2049 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2050 case Match_LoadStoreSImm7_4:
2051 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2052 "expected integer multiple of 4 in range [-256, 252]");
2053 case Match_LoadStoreSImm7_8:
2054 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2055 "expected integer multiple of 8 in range [-512, 508]");
2056 case Match_LoadStoreSImm7_16:
2057 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2058 "expected integer multiple of 16 in range [-1024, 1016]");
2059 case Match_LoadStoreSImm9:
2060 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2061 "expected integer in range [-256, 255]");
2062 case Match_LoadStoreUImm12_1:
2063 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2064 "expected symbolic reference or integer in range [0, 4095]");
2065 case Match_LoadStoreUImm12_2:
2066 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2067 "expected symbolic reference or integer in range [0, 8190]");
2068 case Match_LoadStoreUImm12_4:
2069 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2070 "expected symbolic reference or integer in range [0, 16380]");
2071 case Match_LoadStoreUImm12_8:
2072 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2073 "expected symbolic reference or integer in range [0, 32760]");
2074 case Match_LoadStoreUImm12_16:
2075 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2076 "expected symbolic reference or integer in range [0, 65520]");
2077 case Match_LogicalSecondSource:
2078 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2079 "expected compatible register or logical immediate");
2080 case Match_MOVWUImm16:
2081 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2082 "expected relocated symbol or integer in range [0, 65535]");
2084 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2085 "expected readable system register");
2087 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2088 "expected writable system register or pstate");
2089 case Match_NamedImm_at:
2090 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2091 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2092 case Match_NamedImm_dbarrier:
2093 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2094 "expected integer in range [0, 15] or symbolic barrier operand");
2095 case Match_NamedImm_dc:
2096 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2097 "expected symbolic 'dc' operand");
2098 case Match_NamedImm_ic:
2099 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2100 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2101 case Match_NamedImm_isb:
2102 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2103 "expected integer in range [0, 15] or 'sy'");
2104 case Match_NamedImm_prefetch:
2105 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2106 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2107 case Match_NamedImm_tlbi:
2108 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2109 "expected translation buffer invalidation operand");
2111 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112 "expected integer in range [0, 65535]");
2114 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115 "expected integer in range [0, 7]");
2117 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118 "expected integer in range [0, 15]");
2120 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121 "expected integer in range [0, 31]");
2123 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124 "expected integer in range [0, 63]");
2126 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127 "expected integer in range [0, 127]");
2129 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130 "expected integer in range [<lsb>, 31]");
2132 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133 "expected integer in range [<lsb>, 63]");
2136 llvm_unreachable("Implement any new match types added!");
2140 void AArch64Operand::print(raw_ostream &OS) const {
2143 OS << "<CondCode: " << CondCode.Code << ">";
2146 OS << "<fpimm: " << FPImm.Val << ">";
2149 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2150 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2153 getImm()->print(OS);
2156 OS << "<register " << getReg() << '>';
2159 OS << '\'' << getToken() << '\'';
2162 OS << "<shift: type=" << ShiftExtend.ShiftType
2163 << ", amount=" << ShiftExtend.Amount << ">";
2166 StringRef Name(SysReg.Data, SysReg.Length);
2167 OS << "<sysreg: " << Name << '>';
2171 llvm_unreachable("No idea how to print this kind of operand");
2176 void AArch64Operand::dump() const {
2181 /// Force static initialization.
2182 extern "C" void LLVMInitializeAArch64AsmParser() {
2183 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2186 #define GET_REGISTER_MATCHER
2187 #define GET_MATCHER_IMPLEMENTATION
2188 #include "AArch64GenAsmMatcher.inc"