1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class AArch64AsmParser : public MCTargetAsmParser {
42 StringRef Mnemonic; ///< Instruction mnemonic.
46 AArch64TargetStreamer &getTargetStreamer() {
47 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
48 return static_cast<AArch64TargetStreamer &>(TS);
51 MCAsmParser &getParser() const { return Parser; }
52 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
54 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
56 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
57 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
58 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
59 int tryParseRegister();
60 int tryMatchVectorRegister(StringRef &Kind, bool expected);
61 bool parseRegister(OperandVector &Operands);
62 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
63 bool parseVectorList(OperandVector &Operands);
64 bool parseOperand(OperandVector &Operands, bool isCondCode,
67 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
68 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
69 bool showMatchError(SMLoc Loc, unsigned ErrCode);
71 bool parseDirectiveWord(unsigned Size, SMLoc L);
72 bool parseDirectiveTLSDescCall(SMLoc L);
74 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
75 bool parseDirectiveLtorg(SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
81 bool MatchingInlineAsm) override;
82 /// @name Auto-generated Match Functions
85 #define GET_ASSEMBLER_HEADER
86 #include "AArch64GenAsmMatcher.inc"
90 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
91 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
92 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
94 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
95 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
98 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
99 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
100 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
101 bool tryParseVectorRegister(OperandVector &Operands);
104 enum AArch64MatchResultTy {
105 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
106 #define GET_OPERAND_DIAGNOSTIC_TYPES
107 #include "AArch64GenAsmMatcher.inc"
109 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
110 const MCInstrInfo &MII,
111 const MCTargetOptions &Options)
112 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
113 MCAsmParserExtension::Initialize(_Parser);
114 if (Parser.getStreamer().getTargetStreamer() == nullptr)
115 new AArch64TargetStreamer(Parser.getStreamer());
117 // Initialize the set of available features.
118 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
121 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
122 SMLoc NameLoc, OperandVector &Operands) override;
123 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
124 bool ParseDirective(AsmToken DirectiveID) override;
125 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
126 unsigned Kind) override;
128 static bool classifySymbolRef(const MCExpr *Expr,
129 AArch64MCExpr::VariantKind &ELFRefKind,
130 MCSymbolRefExpr::VariantKind &DarwinRefKind,
133 } // end anonymous namespace
137 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
139 class AArch64Operand : public MCParsedAsmOperand {
157 SMLoc StartLoc, EndLoc;
162 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
170 struct VectorListOp {
173 unsigned NumElements;
174 unsigned ElementKind;
177 struct VectorIndexOp {
185 struct ShiftedImmOp {
187 unsigned ShiftAmount;
191 AArch64CC::CondCode Code;
195 unsigned Val; // Encoded 8-bit representation.
199 unsigned Val; // Not the enum since not all values have names.
205 uint64_t FeatureBits; // We need to pass through information about which
206 // core we are compiling for so that the SysReg
207 // Mappers can appropriately conditionalize.
218 struct ShiftExtendOp {
219 AArch64_AM::ShiftExtendType Type;
221 bool HasExplicitAmount;
231 struct VectorListOp VectorList;
232 struct VectorIndexOp VectorIndex;
234 struct ShiftedImmOp ShiftedImm;
235 struct CondCodeOp CondCode;
236 struct FPImmOp FPImm;
237 struct BarrierOp Barrier;
238 struct SysRegOp SysReg;
239 struct SysCRImmOp SysCRImm;
240 struct PrefetchOp Prefetch;
241 struct ShiftExtendOp ShiftExtend;
244 // Keep the MCContext around as the MCExprs may need manipulated during
245 // the add<>Operands() calls.
249 AArch64Operand(KindTy K, MCContext &_Ctx)
250 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
252 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
254 StartLoc = o.StartLoc;
264 ShiftedImm = o.ShiftedImm;
267 CondCode = o.CondCode;
279 VectorList = o.VectorList;
282 VectorIndex = o.VectorIndex;
288 SysCRImm = o.SysCRImm;
291 Prefetch = o.Prefetch;
294 ShiftExtend = o.ShiftExtend;
299 /// getStartLoc - Get the location of the first token of this operand.
300 SMLoc getStartLoc() const override { return StartLoc; }
301 /// getEndLoc - Get the location of the last token of this operand.
302 SMLoc getEndLoc() const override { return EndLoc; }
304 StringRef getToken() const {
305 assert(Kind == k_Token && "Invalid access!");
306 return StringRef(Tok.Data, Tok.Length);
309 bool isTokenSuffix() const {
310 assert(Kind == k_Token && "Invalid access!");
314 const MCExpr *getImm() const {
315 assert(Kind == k_Immediate && "Invalid access!");
319 const MCExpr *getShiftedImmVal() const {
320 assert(Kind == k_ShiftedImm && "Invalid access!");
321 return ShiftedImm.Val;
324 unsigned getShiftedImmShift() const {
325 assert(Kind == k_ShiftedImm && "Invalid access!");
326 return ShiftedImm.ShiftAmount;
329 AArch64CC::CondCode getCondCode() const {
330 assert(Kind == k_CondCode && "Invalid access!");
331 return CondCode.Code;
334 unsigned getFPImm() const {
335 assert(Kind == k_FPImm && "Invalid access!");
339 unsigned getBarrier() const {
340 assert(Kind == k_Barrier && "Invalid access!");
344 unsigned getReg() const override {
345 assert(Kind == k_Register && "Invalid access!");
349 unsigned getVectorListStart() const {
350 assert(Kind == k_VectorList && "Invalid access!");
351 return VectorList.RegNum;
354 unsigned getVectorListCount() const {
355 assert(Kind == k_VectorList && "Invalid access!");
356 return VectorList.Count;
359 unsigned getVectorIndex() const {
360 assert(Kind == k_VectorIndex && "Invalid access!");
361 return VectorIndex.Val;
364 StringRef getSysReg() const {
365 assert(Kind == k_SysReg && "Invalid access!");
366 return StringRef(SysReg.Data, SysReg.Length);
369 uint64_t getSysRegFeatureBits() const {
370 assert(Kind == k_SysReg && "Invalid access!");
371 return SysReg.FeatureBits;
374 unsigned getSysCR() const {
375 assert(Kind == k_SysCR && "Invalid access!");
379 unsigned getPrefetch() const {
380 assert(Kind == k_Prefetch && "Invalid access!");
384 AArch64_AM::ShiftExtendType getShiftExtendType() const {
385 assert(Kind == k_ShiftExtend && "Invalid access!");
386 return ShiftExtend.Type;
389 unsigned getShiftExtendAmount() const {
390 assert(Kind == k_ShiftExtend && "Invalid access!");
391 return ShiftExtend.Amount;
394 bool hasShiftExtendAmount() const {
395 assert(Kind == k_ShiftExtend && "Invalid access!");
396 return ShiftExtend.HasExplicitAmount;
399 bool isImm() const override { return Kind == k_Immediate; }
400 bool isMem() const override { return false; }
401 bool isSImm9() const {
404 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
407 int64_t Val = MCE->getValue();
408 return (Val >= -256 && Val < 256);
410 bool isSImm7s4() const {
413 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
416 int64_t Val = MCE->getValue();
417 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
419 bool isSImm7s8() const {
422 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
425 int64_t Val = MCE->getValue();
426 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
428 bool isSImm7s16() const {
431 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
434 int64_t Val = MCE->getValue();
435 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
438 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
439 AArch64MCExpr::VariantKind ELFRefKind;
440 MCSymbolRefExpr::VariantKind DarwinRefKind;
442 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
444 // If we don't understand the expression, assume the best and
445 // let the fixup and relocation code deal with it.
449 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
450 ELFRefKind == AArch64MCExpr::VK_LO12 ||
451 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
452 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
453 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
454 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
455 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
456 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
457 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
458 // Note that we don't range-check the addend. It's adjusted modulo page
459 // size when converted, so there is no "out of range" condition when using
461 return Addend >= 0 && (Addend % Scale) == 0;
462 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
463 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
464 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
471 template <int Scale> bool isUImm12Offset() const {
475 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
477 return isSymbolicUImm12Offset(getImm(), Scale);
479 int64_t Val = MCE->getValue();
480 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
483 bool isImm0_7() const {
486 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
489 int64_t Val = MCE->getValue();
490 return (Val >= 0 && Val < 8);
492 bool isImm1_8() const {
495 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
498 int64_t Val = MCE->getValue();
499 return (Val > 0 && Val < 9);
501 bool isImm0_15() const {
504 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
507 int64_t Val = MCE->getValue();
508 return (Val >= 0 && Val < 16);
510 bool isImm1_16() const {
513 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
516 int64_t Val = MCE->getValue();
517 return (Val > 0 && Val < 17);
519 bool isImm0_31() const {
522 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
525 int64_t Val = MCE->getValue();
526 return (Val >= 0 && Val < 32);
528 bool isImm1_31() const {
531 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
534 int64_t Val = MCE->getValue();
535 return (Val >= 1 && Val < 32);
537 bool isImm1_32() const {
540 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
543 int64_t Val = MCE->getValue();
544 return (Val >= 1 && Val < 33);
546 bool isImm0_63() const {
549 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
552 int64_t Val = MCE->getValue();
553 return (Val >= 0 && Val < 64);
555 bool isImm1_63() const {
558 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
561 int64_t Val = MCE->getValue();
562 return (Val >= 1 && Val < 64);
564 bool isImm1_64() const {
567 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
570 int64_t Val = MCE->getValue();
571 return (Val >= 1 && Val < 65);
573 bool isImm0_127() const {
576 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
579 int64_t Val = MCE->getValue();
580 return (Val >= 0 && Val < 128);
582 bool isImm0_255() const {
585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
588 int64_t Val = MCE->getValue();
589 return (Val >= 0 && Val < 256);
591 bool isImm0_65535() const {
594 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
597 int64_t Val = MCE->getValue();
598 return (Val >= 0 && Val < 65536);
600 bool isImm32_63() const {
603 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
606 int64_t Val = MCE->getValue();
607 return (Val >= 32 && Val < 64);
609 bool isLogicalImm32() const {
612 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
615 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
617 bool isLogicalImm64() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
625 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
626 bool isAddSubImm() const {
627 if (!isShiftedImm() && !isImm())
632 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
633 if (isShiftedImm()) {
634 unsigned Shift = ShiftedImm.ShiftAmount;
635 Expr = ShiftedImm.Val;
636 if (Shift != 0 && Shift != 12)
642 AArch64MCExpr::VariantKind ELFRefKind;
643 MCSymbolRefExpr::VariantKind DarwinRefKind;
645 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
646 DarwinRefKind, Addend)) {
647 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
648 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
649 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
650 || ELFRefKind == AArch64MCExpr::VK_LO12
651 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
652 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
653 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
654 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
655 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
656 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
657 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
660 // Otherwise it should be a real immediate in range:
661 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
662 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
664 bool isCondCode() const { return Kind == k_CondCode; }
665 bool isSIMDImmType10() const {
668 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
671 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
673 bool isBranchTarget26() const {
676 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
679 int64_t Val = MCE->getValue();
682 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
684 bool isPCRelLabel19() const {
687 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
690 int64_t Val = MCE->getValue();
693 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
695 bool isBranchTarget14() const {
698 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
701 int64_t Val = MCE->getValue();
704 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
708 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
712 AArch64MCExpr::VariantKind ELFRefKind;
713 MCSymbolRefExpr::VariantKind DarwinRefKind;
715 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
716 DarwinRefKind, Addend)) {
719 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
722 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
723 if (ELFRefKind == AllowedModifiers[i])
730 bool isMovZSymbolG3() const {
731 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
732 return isMovWSymbol(Variants);
735 bool isMovZSymbolG2() const {
736 static AArch64MCExpr::VariantKind Variants[] = {
737 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
738 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
739 return isMovWSymbol(Variants);
742 bool isMovZSymbolG1() const {
743 static AArch64MCExpr::VariantKind Variants[] = {
744 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
745 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
746 AArch64MCExpr::VK_DTPREL_G1,
748 return isMovWSymbol(Variants);
751 bool isMovZSymbolG0() const {
752 static AArch64MCExpr::VariantKind Variants[] = {
753 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
754 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
755 return isMovWSymbol(Variants);
758 bool isMovKSymbolG3() const {
759 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
760 return isMovWSymbol(Variants);
763 bool isMovKSymbolG2() const {
764 static AArch64MCExpr::VariantKind Variants[] = {
765 AArch64MCExpr::VK_ABS_G2_NC};
766 return isMovWSymbol(Variants);
769 bool isMovKSymbolG1() const {
770 static AArch64MCExpr::VariantKind Variants[] = {
771 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
772 AArch64MCExpr::VK_DTPREL_G1_NC
774 return isMovWSymbol(Variants);
777 bool isMovKSymbolG0() const {
778 static AArch64MCExpr::VariantKind Variants[] = {
779 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
780 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
782 return isMovWSymbol(Variants);
785 template<int RegWidth, int Shift>
786 bool isMOVZMovAlias() const {
787 if (!isImm()) return false;
789 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790 if (!CE) return false;
791 uint64_t Value = CE->getValue();
794 Value &= 0xffffffffULL;
796 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
797 if (Value == 0 && Shift != 0)
800 return (Value & ~(0xffffULL << Shift)) == 0;
803 template<int RegWidth, int Shift>
804 bool isMOVNMovAlias() const {
805 if (!isImm()) return false;
807 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
808 if (!CE) return false;
809 uint64_t Value = CE->getValue();
811 // MOVZ takes precedence over MOVN.
812 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
813 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
818 Value &= 0xffffffffULL;
820 return (Value & ~(0xffffULL << Shift)) == 0;
823 bool isFPImm() const { return Kind == k_FPImm; }
824 bool isBarrier() const { return Kind == k_Barrier; }
825 bool isSysReg() const { return Kind == k_SysReg; }
826 bool isMRSSystemRegister() const {
827 if (!isSysReg()) return false;
829 bool IsKnownRegister;
830 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
831 Mapper.fromString(getSysReg(), IsKnownRegister);
833 return IsKnownRegister;
835 bool isMSRSystemRegister() const {
836 if (!isSysReg()) return false;
838 bool IsKnownRegister;
839 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
840 Mapper.fromString(getSysReg(), IsKnownRegister);
842 return IsKnownRegister;
844 bool isSystemPStateField() const {
845 if (!isSysReg()) return false;
847 bool IsKnownRegister;
848 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
850 return IsKnownRegister;
852 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
853 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
854 bool isVectorRegLo() const {
855 return Kind == k_Register && Reg.isVector &&
856 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
859 bool isGPR32as64() const {
860 return Kind == k_Register && !Reg.isVector &&
861 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
864 bool isGPR64sp0() const {
865 return Kind == k_Register && !Reg.isVector &&
866 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
869 /// Is this a vector list with the type implicit (presumably attached to the
870 /// instruction itself)?
871 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
872 return Kind == k_VectorList && VectorList.Count == NumRegs &&
873 !VectorList.ElementKind;
876 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
877 bool isTypedVectorList() const {
878 if (Kind != k_VectorList)
880 if (VectorList.Count != NumRegs)
882 if (VectorList.ElementKind != ElementKind)
884 return VectorList.NumElements == NumElements;
887 bool isVectorIndex1() const {
888 return Kind == k_VectorIndex && VectorIndex.Val == 1;
890 bool isVectorIndexB() const {
891 return Kind == k_VectorIndex && VectorIndex.Val < 16;
893 bool isVectorIndexH() const {
894 return Kind == k_VectorIndex && VectorIndex.Val < 8;
896 bool isVectorIndexS() const {
897 return Kind == k_VectorIndex && VectorIndex.Val < 4;
899 bool isVectorIndexD() const {
900 return Kind == k_VectorIndex && VectorIndex.Val < 2;
902 bool isToken() const override { return Kind == k_Token; }
903 bool isTokenEqual(StringRef Str) const {
904 return Kind == k_Token && getToken() == Str;
906 bool isSysCR() const { return Kind == k_SysCR; }
907 bool isPrefetch() const { return Kind == k_Prefetch; }
908 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
909 bool isShifter() const {
910 if (!isShiftExtend())
913 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
914 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
915 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
916 ST == AArch64_AM::MSL);
918 bool isExtend() const {
919 if (!isShiftExtend())
922 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
923 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
924 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
925 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
926 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
927 ET == AArch64_AM::LSL) &&
928 getShiftExtendAmount() <= 4;
931 bool isExtend64() const {
934 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
935 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
936 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
938 bool isExtendLSL64() const {
941 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
942 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
943 ET == AArch64_AM::LSL) &&
944 getShiftExtendAmount() <= 4;
947 template<int Width> bool isMemXExtend() const {
950 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
952 (getShiftExtendAmount() == Log2_32(Width / 8) ||
953 getShiftExtendAmount() == 0);
956 template<int Width> bool isMemWExtend() const {
959 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
960 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
961 (getShiftExtendAmount() == Log2_32(Width / 8) ||
962 getShiftExtendAmount() == 0);
965 template <unsigned width>
966 bool isArithmeticShifter() const {
970 // An arithmetic shifter is LSL, LSR, or ASR.
971 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
972 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
973 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
976 template <unsigned width>
977 bool isLogicalShifter() const {
981 // A logical shifter is LSL, LSR, ASR or ROR.
982 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
983 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
984 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
985 getShiftExtendAmount() < width;
988 bool isMovImm32Shifter() const {
992 // A MOVi shifter is LSL of 0, 16, 32, or 48.
993 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
994 if (ST != AArch64_AM::LSL)
996 uint64_t Val = getShiftExtendAmount();
997 return (Val == 0 || Val == 16);
1000 bool isMovImm64Shifter() const {
1004 // A MOVi shifter is LSL of 0 or 16.
1005 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1006 if (ST != AArch64_AM::LSL)
1008 uint64_t Val = getShiftExtendAmount();
1009 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1012 bool isLogicalVecShifter() const {
1016 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1017 unsigned Shift = getShiftExtendAmount();
1018 return getShiftExtendType() == AArch64_AM::LSL &&
1019 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1022 bool isLogicalVecHalfWordShifter() const {
1023 if (!isLogicalVecShifter())
1026 // A logical vector shifter is a left shift by 0 or 8.
1027 unsigned Shift = getShiftExtendAmount();
1028 return getShiftExtendType() == AArch64_AM::LSL &&
1029 (Shift == 0 || Shift == 8);
1032 bool isMoveVecShifter() const {
1033 if (!isShiftExtend())
1036 // A logical vector shifter is a left shift by 8 or 16.
1037 unsigned Shift = getShiftExtendAmount();
1038 return getShiftExtendType() == AArch64_AM::MSL &&
1039 (Shift == 8 || Shift == 16);
1042 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1043 // to LDUR/STUR when the offset is not legal for the former but is for
1044 // the latter. As such, in addition to checking for being a legal unscaled
1045 // address, also check that it is not a legal scaled address. This avoids
1046 // ambiguity in the matcher.
1048 bool isSImm9OffsetFB() const {
1049 return isSImm9() && !isUImm12Offset<Width / 8>();
1052 bool isAdrpLabel() const {
1053 // Validation was handled during parsing, so we just sanity check that
1054 // something didn't go haywire.
1058 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059 int64_t Val = CE->getValue();
1060 int64_t Min = - (4096 * (1LL << (21 - 1)));
1061 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1062 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1068 bool isAdrLabel() const {
1069 // Validation was handled during parsing, so we just sanity check that
1070 // something didn't go haywire.
1074 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1075 int64_t Val = CE->getValue();
1076 int64_t Min = - (1LL << (21 - 1));
1077 int64_t Max = ((1LL << (21 - 1)) - 1);
1078 return Val >= Min && Val <= Max;
1084 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1085 // Add as immediates when possible. Null MCExpr = 0.
1087 Inst.addOperand(MCOperand::CreateImm(0));
1088 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1089 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1091 Inst.addOperand(MCOperand::CreateExpr(Expr));
1094 void addRegOperands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1096 Inst.addOperand(MCOperand::CreateReg(getReg()));
1099 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1100 assert(N == 1 && "Invalid number of operands!");
1102 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1104 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1105 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1106 RI->getEncodingValue(getReg()));
1108 Inst.addOperand(MCOperand::CreateReg(Reg));
1111 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1112 assert(N == 1 && "Invalid number of operands!");
1114 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1115 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1118 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1119 assert(N == 1 && "Invalid number of operands!");
1121 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1122 Inst.addOperand(MCOperand::CreateReg(getReg()));
1125 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1127 Inst.addOperand(MCOperand::CreateReg(getReg()));
1130 template <unsigned NumRegs>
1131 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!");
1133 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1134 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1135 unsigned FirstReg = FirstRegs[NumRegs - 1];
1138 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1141 template <unsigned NumRegs>
1142 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1143 assert(N == 1 && "Invalid number of operands!");
1144 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1145 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1146 unsigned FirstReg = FirstRegs[NumRegs - 1];
1149 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1152 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1153 assert(N == 1 && "Invalid number of operands!");
1154 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1157 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1162 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1163 assert(N == 1 && "Invalid number of operands!");
1164 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1167 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1172 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 1 && "Invalid number of operands!");
1174 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1177 void addImmOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 1 && "Invalid number of operands!");
1179 // If this is a pageoff symrefexpr with an addend, adjust the addend
1180 // to be only the page-offset portion. Otherwise, just add the expr
1182 addExpr(Inst, getImm());
1185 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 2 && "Invalid number of operands!");
1187 if (isShiftedImm()) {
1188 addExpr(Inst, getShiftedImmVal());
1189 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1191 addExpr(Inst, getImm());
1192 Inst.addOperand(MCOperand::CreateImm(0));
1196 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1201 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1205 addExpr(Inst, getImm());
1207 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1210 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1211 addImmOperands(Inst, N);
1215 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1220 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1223 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1226 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1229 assert(MCE && "Invalid constant immediate operand!");
1230 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1233 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1234 assert(N == 1 && "Invalid number of operands!");
1235 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1236 assert(MCE && "Invalid constant immediate operand!");
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1240 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1243 assert(MCE && "Invalid constant immediate operand!");
1244 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1247 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1249 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1250 assert(MCE && "Invalid constant immediate operand!");
1251 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1254 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1257 assert(MCE && "Invalid constant immediate operand!");
1258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1261 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264 assert(MCE && "Invalid constant immediate operand!");
1265 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1268 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1269 assert(N == 1 && "Invalid number of operands!");
1270 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1271 assert(MCE && "Invalid constant immediate operand!");
1272 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1275 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1276 assert(N == 1 && "Invalid number of operands!");
1277 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1278 assert(MCE && "Invalid constant immediate operand!");
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292 assert(MCE && "Invalid constant immediate operand!");
1293 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1296 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1303 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1306 assert(MCE && "Invalid constant immediate operand!");
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1313 assert(MCE && "Invalid constant immediate operand!");
1314 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1318 assert(N == 1 && "Invalid number of operands!");
1319 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1320 assert(MCE && "Invalid constant immediate operand!");
1321 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1324 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1327 assert(MCE && "Invalid constant immediate operand!");
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1334 assert(MCE && "Invalid constant immediate operand!");
1335 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1341 assert(MCE && "Invalid constant immediate operand!");
1342 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1345 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1348 assert(MCE && "Invalid constant immediate operand!");
1349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1352 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355 assert(MCE && "Invalid logical immediate operand!");
1356 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1357 Inst.addOperand(MCOperand::CreateImm(encoding));
1360 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1361 assert(N == 1 && "Invalid number of operands!");
1362 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1363 assert(MCE && "Invalid logical immediate operand!");
1364 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1365 Inst.addOperand(MCOperand::CreateImm(encoding));
1368 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1371 assert(MCE && "Invalid immediate operand!");
1372 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1373 Inst.addOperand(MCOperand::CreateImm(encoding));
1376 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1377 // Branch operands don't encode the low bits, so shift them off
1378 // here. If it's a label, however, just put it on directly as there's
1379 // not enough information now to do anything.
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1383 addExpr(Inst, getImm());
1386 assert(MCE && "Invalid constant immediate operand!");
1387 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1390 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1391 // Branch operands don't encode the low bits, so shift them off
1392 // here. If it's a label, however, just put it on directly as there's
1393 // not enough information now to do anything.
1394 assert(N == 1 && "Invalid number of operands!");
1395 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1397 addExpr(Inst, getImm());
1400 assert(MCE && "Invalid constant immediate operand!");
1401 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1404 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1405 // Branch operands don't encode the low bits, so shift them off
1406 // here. If it's a label, however, just put it on directly as there's
1407 // not enough information now to do anything.
1408 assert(N == 1 && "Invalid number of operands!");
1409 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1411 addExpr(Inst, getImm());
1414 assert(MCE && "Invalid constant immediate operand!");
1415 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1418 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1419 assert(N == 1 && "Invalid number of operands!");
1420 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1423 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1424 assert(N == 1 && "Invalid number of operands!");
1425 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1428 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1432 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1433 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1435 Inst.addOperand(MCOperand::CreateImm(Bits));
1438 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1439 assert(N == 1 && "Invalid number of operands!");
1442 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1443 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1445 Inst.addOperand(MCOperand::CreateImm(Bits));
1448 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1449 assert(N == 1 && "Invalid number of operands!");
1453 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1455 Inst.addOperand(MCOperand::CreateImm(Bits));
1458 void addSysCROperands(MCInst &Inst, unsigned N) const {
1459 assert(N == 1 && "Invalid number of operands!");
1460 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1463 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1465 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1468 void addShifterOperands(MCInst &Inst, unsigned N) const {
1469 assert(N == 1 && "Invalid number of operands!");
1471 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1472 Inst.addOperand(MCOperand::CreateImm(Imm));
1475 void addExtendOperands(MCInst &Inst, unsigned N) const {
1476 assert(N == 1 && "Invalid number of operands!");
1477 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1478 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1479 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1480 Inst.addOperand(MCOperand::CreateImm(Imm));
1483 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1485 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1486 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1487 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1488 Inst.addOperand(MCOperand::CreateImm(Imm));
1491 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1492 assert(N == 2 && "Invalid number of operands!");
1493 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1494 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1495 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1496 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1499 // For 8-bit load/store instructions with a register offset, both the
1500 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1501 // they're disambiguated by whether the shift was explicit or implicit rather
1503 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1504 assert(N == 2 && "Invalid number of operands!");
1505 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1506 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1507 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1508 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1512 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!");
1515 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1516 uint64_t Value = CE->getValue();
1517 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1521 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1522 assert(N == 1 && "Invalid number of operands!");
1524 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1525 uint64_t Value = CE->getValue();
1526 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1529 void print(raw_ostream &OS) const override;
1531 static std::unique_ptr<AArch64Operand>
1532 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1533 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1534 Op->Tok.Data = Str.data();
1535 Op->Tok.Length = Str.size();
1536 Op->Tok.IsSuffix = IsSuffix;
1542 static std::unique_ptr<AArch64Operand>
1543 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1544 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1545 Op->Reg.RegNum = RegNum;
1546 Op->Reg.isVector = isVector;
1552 static std::unique_ptr<AArch64Operand>
1553 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1554 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1555 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1556 Op->VectorList.RegNum = RegNum;
1557 Op->VectorList.Count = Count;
1558 Op->VectorList.NumElements = NumElements;
1559 Op->VectorList.ElementKind = ElementKind;
1565 static std::unique_ptr<AArch64Operand>
1566 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1567 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1568 Op->VectorIndex.Val = Idx;
1574 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1575 SMLoc E, MCContext &Ctx) {
1576 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1583 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1584 unsigned ShiftAmount,
1587 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1588 Op->ShiftedImm .Val = Val;
1589 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1595 static std::unique_ptr<AArch64Operand>
1596 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1597 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1598 Op->CondCode.Code = Code;
1604 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1606 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1607 Op->FPImm.Val = Val;
1613 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1615 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1616 Op->Barrier.Val = Val;
1622 static std::unique_ptr<AArch64Operand>
1623 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1624 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1625 Op->SysReg.Data = Str.data();
1626 Op->SysReg.Length = Str.size();
1627 Op->SysReg.FeatureBits = FeatureBits;
1633 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1634 SMLoc E, MCContext &Ctx) {
1635 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1636 Op->SysCRImm.Val = Val;
1642 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1644 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1645 Op->Prefetch.Val = Val;
1651 static std::unique_ptr<AArch64Operand>
1652 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1653 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1654 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1655 Op->ShiftExtend.Type = ShOp;
1656 Op->ShiftExtend.Amount = Val;
1657 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1664 } // end anonymous namespace.
1666 void AArch64Operand::print(raw_ostream &OS) const {
1669 OS << "<fpimm " << getFPImm() << "("
1670 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1674 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1676 OS << "<barrier " << Name << ">";
1678 OS << "<barrier invalid #" << getBarrier() << ">";
1682 getImm()->print(OS);
1684 case k_ShiftedImm: {
1685 unsigned Shift = getShiftedImmShift();
1686 OS << "<shiftedimm ";
1687 getShiftedImmVal()->print(OS);
1688 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1692 OS << "<condcode " << getCondCode() << ">";
1695 OS << "<register " << getReg() << ">";
1697 case k_VectorList: {
1698 OS << "<vectorlist ";
1699 unsigned Reg = getVectorListStart();
1700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1701 OS << Reg + i << " ";
1706 OS << "<vectorindex " << getVectorIndex() << ">";
1709 OS << "<sysreg: " << getSysReg() << '>';
1712 OS << "'" << getToken() << "'";
1715 OS << "c" << getSysCR();
1719 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1721 OS << "<prfop " << Name << ">";
1723 OS << "<prfop invalid #" << getPrefetch() << ">";
1726 case k_ShiftExtend: {
1727 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1728 << getShiftExtendAmount();
1729 if (!hasShiftExtendAmount())
1737 /// @name Auto-generated Match Functions
1740 static unsigned MatchRegisterName(StringRef Name);
1744 static unsigned matchVectorRegName(StringRef Name) {
1745 return StringSwitch<unsigned>(Name)
1746 .Case("v0", AArch64::Q0)
1747 .Case("v1", AArch64::Q1)
1748 .Case("v2", AArch64::Q2)
1749 .Case("v3", AArch64::Q3)
1750 .Case("v4", AArch64::Q4)
1751 .Case("v5", AArch64::Q5)
1752 .Case("v6", AArch64::Q6)
1753 .Case("v7", AArch64::Q7)
1754 .Case("v8", AArch64::Q8)
1755 .Case("v9", AArch64::Q9)
1756 .Case("v10", AArch64::Q10)
1757 .Case("v11", AArch64::Q11)
1758 .Case("v12", AArch64::Q12)
1759 .Case("v13", AArch64::Q13)
1760 .Case("v14", AArch64::Q14)
1761 .Case("v15", AArch64::Q15)
1762 .Case("v16", AArch64::Q16)
1763 .Case("v17", AArch64::Q17)
1764 .Case("v18", AArch64::Q18)
1765 .Case("v19", AArch64::Q19)
1766 .Case("v20", AArch64::Q20)
1767 .Case("v21", AArch64::Q21)
1768 .Case("v22", AArch64::Q22)
1769 .Case("v23", AArch64::Q23)
1770 .Case("v24", AArch64::Q24)
1771 .Case("v25", AArch64::Q25)
1772 .Case("v26", AArch64::Q26)
1773 .Case("v27", AArch64::Q27)
1774 .Case("v28", AArch64::Q28)
1775 .Case("v29", AArch64::Q29)
1776 .Case("v30", AArch64::Q30)
1777 .Case("v31", AArch64::Q31)
1781 static bool isValidVectorKind(StringRef Name) {
1782 return StringSwitch<bool>(Name.lower())
1792 // Accept the width neutral ones, too, for verbose syntax. If those
1793 // aren't used in the right places, the token operand won't match so
1794 // all will work out.
1802 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1803 char &ElementKind) {
1804 assert(isValidVectorKind(Name));
1806 ElementKind = Name.lower()[Name.size() - 1];
1809 if (Name.size() == 2)
1812 // Parse the lane count
1813 Name = Name.drop_front();
1814 while (isdigit(Name.front())) {
1815 NumElements = 10 * NumElements + (Name.front() - '0');
1816 Name = Name.drop_front();
1820 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1822 StartLoc = getLoc();
1823 RegNo = tryParseRegister();
1824 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1825 return (RegNo == (unsigned)-1);
1828 /// tryParseRegister - Try to parse a register name. The token must be an
1829 /// Identifier when called, and if it is a register name the token is eaten and
1830 /// the register is added to the operand list.
1831 int AArch64AsmParser::tryParseRegister() {
1832 const AsmToken &Tok = Parser.getTok();
1833 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1835 std::string lowerCase = Tok.getString().lower();
1836 unsigned RegNum = MatchRegisterName(lowerCase);
1837 // Also handle a few aliases of registers.
1839 RegNum = StringSwitch<unsigned>(lowerCase)
1840 .Case("fp", AArch64::FP)
1841 .Case("lr", AArch64::LR)
1842 .Case("x31", AArch64::XZR)
1843 .Case("w31", AArch64::WZR)
1849 Parser.Lex(); // Eat identifier token.
1853 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1854 /// kind specifier. If it is a register specifier, eat the token and return it.
1855 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1856 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1857 TokError("vector register expected");
1861 StringRef Name = Parser.getTok().getString();
1862 // If there is a kind specifier, it's separated from the register name by
1864 size_t Start = 0, Next = Name.find('.');
1865 StringRef Head = Name.slice(Start, Next);
1866 unsigned RegNum = matchVectorRegName(Head);
1868 if (Next != StringRef::npos) {
1869 Kind = Name.slice(Next, StringRef::npos);
1870 if (!isValidVectorKind(Kind)) {
1871 TokError("invalid vector kind qualifier");
1875 Parser.Lex(); // Eat the register token.
1880 TokError("vector register expected");
1884 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1885 AArch64AsmParser::OperandMatchResultTy
1886 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1889 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1890 Error(S, "Expected cN operand where 0 <= N <= 15");
1891 return MatchOperand_ParseFail;
1894 StringRef Tok = Parser.getTok().getIdentifier();
1895 if (Tok[0] != 'c' && Tok[0] != 'C') {
1896 Error(S, "Expected cN operand where 0 <= N <= 15");
1897 return MatchOperand_ParseFail;
1901 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1902 if (BadNum || CRNum > 15) {
1903 Error(S, "Expected cN operand where 0 <= N <= 15");
1904 return MatchOperand_ParseFail;
1907 Parser.Lex(); // Eat identifier token.
1909 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1910 return MatchOperand_Success;
1913 /// tryParsePrefetch - Try to parse a prefetch operand.
1914 AArch64AsmParser::OperandMatchResultTy
1915 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1917 const AsmToken &Tok = Parser.getTok();
1918 // Either an identifier for named values or a 5-bit immediate.
1919 bool Hash = Tok.is(AsmToken::Hash);
1920 if (Hash || Tok.is(AsmToken::Integer)) {
1922 Parser.Lex(); // Eat hash token.
1923 const MCExpr *ImmVal;
1924 if (getParser().parseExpression(ImmVal))
1925 return MatchOperand_ParseFail;
1927 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1929 TokError("immediate value expected for prefetch operand");
1930 return MatchOperand_ParseFail;
1932 unsigned prfop = MCE->getValue();
1934 TokError("prefetch operand out of range, [0,31] expected");
1935 return MatchOperand_ParseFail;
1938 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1939 return MatchOperand_Success;
1942 if (Tok.isNot(AsmToken::Identifier)) {
1943 TokError("pre-fetch hint expected");
1944 return MatchOperand_ParseFail;
1948 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1950 TokError("pre-fetch hint expected");
1951 return MatchOperand_ParseFail;
1954 Parser.Lex(); // Eat identifier token.
1955 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1956 return MatchOperand_Success;
1959 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1961 AArch64AsmParser::OperandMatchResultTy
1962 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1966 if (Parser.getTok().is(AsmToken::Hash)) {
1967 Parser.Lex(); // Eat hash token.
1970 if (parseSymbolicImmVal(Expr))
1971 return MatchOperand_ParseFail;
1973 AArch64MCExpr::VariantKind ELFRefKind;
1974 MCSymbolRefExpr::VariantKind DarwinRefKind;
1976 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1977 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1978 ELFRefKind == AArch64MCExpr::VK_INVALID) {
1979 // No modifier was specified at all; this is the syntax for an ELF basic
1980 // ADRP relocation (unfortunately).
1982 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1983 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1984 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1986 Error(S, "gotpage label reference not allowed an addend");
1987 return MatchOperand_ParseFail;
1988 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1989 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1990 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1991 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
1992 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
1993 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
1994 // The operand must be an @page or @gotpage qualified symbolref.
1995 Error(S, "page or gotpage label reference expected");
1996 return MatchOperand_ParseFail;
2000 // We have either a label reference possibly with addend or an immediate. The
2001 // addend is a raw value here. The linker will adjust it to only reference the
2003 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2004 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2006 return MatchOperand_Success;
2009 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2011 AArch64AsmParser::OperandMatchResultTy
2012 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2016 if (Parser.getTok().is(AsmToken::Hash)) {
2017 Parser.Lex(); // Eat hash token.
2020 if (getParser().parseExpression(Expr))
2021 return MatchOperand_ParseFail;
2023 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2024 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2026 return MatchOperand_Success;
2029 /// tryParseFPImm - A floating point immediate expression operand.
2030 AArch64AsmParser::OperandMatchResultTy
2031 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2035 if (Parser.getTok().is(AsmToken::Hash)) {
2036 Parser.Lex(); // Eat '#'
2040 // Handle negation, as that still comes through as a separate token.
2041 bool isNegative = false;
2042 if (Parser.getTok().is(AsmToken::Minus)) {
2046 const AsmToken &Tok = Parser.getTok();
2047 if (Tok.is(AsmToken::Real)) {
2048 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2049 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2050 // If we had a '-' in front, toggle the sign bit.
2051 IntVal ^= (uint64_t)isNegative << 63;
2052 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2053 Parser.Lex(); // Eat the token.
2054 // Check for out of range values. As an exception, we let Zero through,
2055 // as we handle that special case in post-processing before matching in
2056 // order to use the zero register for it.
2057 if (Val == -1 && !RealVal.isZero()) {
2058 TokError("expected compatible register or floating-point constant");
2059 return MatchOperand_ParseFail;
2061 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2062 return MatchOperand_Success;
2064 if (Tok.is(AsmToken::Integer)) {
2066 if (!isNegative && Tok.getString().startswith("0x")) {
2067 Val = Tok.getIntVal();
2068 if (Val > 255 || Val < 0) {
2069 TokError("encoded floating point value out of range");
2070 return MatchOperand_ParseFail;
2073 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2074 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2075 // If we had a '-' in front, toggle the sign bit.
2076 IntVal ^= (uint64_t)isNegative << 63;
2077 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2079 Parser.Lex(); // Eat the token.
2080 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2081 return MatchOperand_Success;
2085 return MatchOperand_NoMatch;
2087 TokError("invalid floating point immediate");
2088 return MatchOperand_ParseFail;
2091 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2092 AArch64AsmParser::OperandMatchResultTy
2093 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2096 if (Parser.getTok().is(AsmToken::Hash))
2097 Parser.Lex(); // Eat '#'
2098 else if (Parser.getTok().isNot(AsmToken::Integer))
2099 // Operand should start from # or should be integer, emit error otherwise.
2100 return MatchOperand_NoMatch;
2103 if (parseSymbolicImmVal(Imm))
2104 return MatchOperand_ParseFail;
2105 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2106 uint64_t ShiftAmount = 0;
2107 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2109 int64_t Val = MCE->getValue();
2110 if (Val > 0xfff && (Val & 0xfff) == 0) {
2111 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2115 SMLoc E = Parser.getTok().getLoc();
2116 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2118 return MatchOperand_Success;
2124 // The optional operand must be "lsl #N" where N is non-negative.
2125 if (!Parser.getTok().is(AsmToken::Identifier) ||
2126 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2127 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2128 return MatchOperand_ParseFail;
2134 if (Parser.getTok().is(AsmToken::Hash)) {
2138 if (Parser.getTok().isNot(AsmToken::Integer)) {
2139 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2140 return MatchOperand_ParseFail;
2143 int64_t ShiftAmount = Parser.getTok().getIntVal();
2145 if (ShiftAmount < 0) {
2146 Error(Parser.getTok().getLoc(), "positive shift amount required");
2147 return MatchOperand_ParseFail;
2149 Parser.Lex(); // Eat the number
2151 SMLoc E = Parser.getTok().getLoc();
2152 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2153 S, E, getContext()));
2154 return MatchOperand_Success;
2157 /// parseCondCodeString - Parse a Condition Code string.
2158 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2159 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2160 .Case("eq", AArch64CC::EQ)
2161 .Case("ne", AArch64CC::NE)
2162 .Case("cs", AArch64CC::HS)
2163 .Case("hs", AArch64CC::HS)
2164 .Case("cc", AArch64CC::LO)
2165 .Case("lo", AArch64CC::LO)
2166 .Case("mi", AArch64CC::MI)
2167 .Case("pl", AArch64CC::PL)
2168 .Case("vs", AArch64CC::VS)
2169 .Case("vc", AArch64CC::VC)
2170 .Case("hi", AArch64CC::HI)
2171 .Case("ls", AArch64CC::LS)
2172 .Case("ge", AArch64CC::GE)
2173 .Case("lt", AArch64CC::LT)
2174 .Case("gt", AArch64CC::GT)
2175 .Case("le", AArch64CC::LE)
2176 .Case("al", AArch64CC::AL)
2177 .Case("nv", AArch64CC::NV)
2178 .Default(AArch64CC::Invalid);
2182 /// parseCondCode - Parse a Condition Code operand.
2183 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2184 bool invertCondCode) {
2186 const AsmToken &Tok = Parser.getTok();
2187 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2189 StringRef Cond = Tok.getString();
2190 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2191 if (CC == AArch64CC::Invalid)
2192 return TokError("invalid condition code");
2193 Parser.Lex(); // Eat identifier token.
2195 if (invertCondCode) {
2196 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2197 return TokError("condition codes AL and NV are invalid for this instruction");
2198 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2202 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2206 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2207 /// them if present.
2208 AArch64AsmParser::OperandMatchResultTy
2209 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2210 const AsmToken &Tok = Parser.getTok();
2211 std::string LowerID = Tok.getString().lower();
2212 AArch64_AM::ShiftExtendType ShOp =
2213 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2214 .Case("lsl", AArch64_AM::LSL)
2215 .Case("lsr", AArch64_AM::LSR)
2216 .Case("asr", AArch64_AM::ASR)
2217 .Case("ror", AArch64_AM::ROR)
2218 .Case("msl", AArch64_AM::MSL)
2219 .Case("uxtb", AArch64_AM::UXTB)
2220 .Case("uxth", AArch64_AM::UXTH)
2221 .Case("uxtw", AArch64_AM::UXTW)
2222 .Case("uxtx", AArch64_AM::UXTX)
2223 .Case("sxtb", AArch64_AM::SXTB)
2224 .Case("sxth", AArch64_AM::SXTH)
2225 .Case("sxtw", AArch64_AM::SXTW)
2226 .Case("sxtx", AArch64_AM::SXTX)
2227 .Default(AArch64_AM::InvalidShiftExtend);
2229 if (ShOp == AArch64_AM::InvalidShiftExtend)
2230 return MatchOperand_NoMatch;
2232 SMLoc S = Tok.getLoc();
2235 bool Hash = getLexer().is(AsmToken::Hash);
2236 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2237 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2238 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2239 ShOp == AArch64_AM::MSL) {
2240 // We expect a number here.
2241 TokError("expected #imm after shift specifier");
2242 return MatchOperand_ParseFail;
2245 // "extend" type operatoins don't need an immediate, #0 is implicit.
2246 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2248 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2249 return MatchOperand_Success;
2253 Parser.Lex(); // Eat the '#'.
2255 // Make sure we do actually have a number
2256 if (!Parser.getTok().is(AsmToken::Integer)) {
2257 Error(Parser.getTok().getLoc(),
2258 "expected integer shift amount");
2259 return MatchOperand_ParseFail;
2262 const MCExpr *ImmVal;
2263 if (getParser().parseExpression(ImmVal))
2264 return MatchOperand_ParseFail;
2266 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2268 TokError("expected #imm after shift specifier");
2269 return MatchOperand_ParseFail;
2272 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2273 Operands.push_back(AArch64Operand::CreateShiftExtend(
2274 ShOp, MCE->getValue(), true, S, E, getContext()));
2275 return MatchOperand_Success;
2278 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2279 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2280 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2281 OperandVector &Operands) {
2282 if (Name.find('.') != StringRef::npos)
2283 return TokError("invalid operand");
2287 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2289 const AsmToken &Tok = Parser.getTok();
2290 StringRef Op = Tok.getString();
2291 SMLoc S = Tok.getLoc();
2293 const MCExpr *Expr = nullptr;
2295 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2297 Expr = MCConstantExpr::Create(op1, getContext()); \
2298 Operands.push_back( \
2299 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2300 Operands.push_back( \
2301 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2302 Operands.push_back( \
2303 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2304 Expr = MCConstantExpr::Create(op2, getContext()); \
2305 Operands.push_back( \
2306 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2309 if (Mnemonic == "ic") {
2310 if (!Op.compare_lower("ialluis")) {
2311 // SYS #0, C7, C1, #0
2312 SYS_ALIAS(0, 7, 1, 0);
2313 } else if (!Op.compare_lower("iallu")) {
2314 // SYS #0, C7, C5, #0
2315 SYS_ALIAS(0, 7, 5, 0);
2316 } else if (!Op.compare_lower("ivau")) {
2317 // SYS #3, C7, C5, #1
2318 SYS_ALIAS(3, 7, 5, 1);
2320 return TokError("invalid operand for IC instruction");
2322 } else if (Mnemonic == "dc") {
2323 if (!Op.compare_lower("zva")) {
2324 // SYS #3, C7, C4, #1
2325 SYS_ALIAS(3, 7, 4, 1);
2326 } else if (!Op.compare_lower("ivac")) {
2327 // SYS #3, C7, C6, #1
2328 SYS_ALIAS(0, 7, 6, 1);
2329 } else if (!Op.compare_lower("isw")) {
2330 // SYS #0, C7, C6, #2
2331 SYS_ALIAS(0, 7, 6, 2);
2332 } else if (!Op.compare_lower("cvac")) {
2333 // SYS #3, C7, C10, #1
2334 SYS_ALIAS(3, 7, 10, 1);
2335 } else if (!Op.compare_lower("csw")) {
2336 // SYS #0, C7, C10, #2
2337 SYS_ALIAS(0, 7, 10, 2);
2338 } else if (!Op.compare_lower("cvau")) {
2339 // SYS #3, C7, C11, #1
2340 SYS_ALIAS(3, 7, 11, 1);
2341 } else if (!Op.compare_lower("civac")) {
2342 // SYS #3, C7, C14, #1
2343 SYS_ALIAS(3, 7, 14, 1);
2344 } else if (!Op.compare_lower("cisw")) {
2345 // SYS #0, C7, C14, #2
2346 SYS_ALIAS(0, 7, 14, 2);
2348 return TokError("invalid operand for DC instruction");
2350 } else if (Mnemonic == "at") {
2351 if (!Op.compare_lower("s1e1r")) {
2352 // SYS #0, C7, C8, #0
2353 SYS_ALIAS(0, 7, 8, 0);
2354 } else if (!Op.compare_lower("s1e2r")) {
2355 // SYS #4, C7, C8, #0
2356 SYS_ALIAS(4, 7, 8, 0);
2357 } else if (!Op.compare_lower("s1e3r")) {
2358 // SYS #6, C7, C8, #0
2359 SYS_ALIAS(6, 7, 8, 0);
2360 } else if (!Op.compare_lower("s1e1w")) {
2361 // SYS #0, C7, C8, #1
2362 SYS_ALIAS(0, 7, 8, 1);
2363 } else if (!Op.compare_lower("s1e2w")) {
2364 // SYS #4, C7, C8, #1
2365 SYS_ALIAS(4, 7, 8, 1);
2366 } else if (!Op.compare_lower("s1e3w")) {
2367 // SYS #6, C7, C8, #1
2368 SYS_ALIAS(6, 7, 8, 1);
2369 } else if (!Op.compare_lower("s1e0r")) {
2370 // SYS #0, C7, C8, #3
2371 SYS_ALIAS(0, 7, 8, 2);
2372 } else if (!Op.compare_lower("s1e0w")) {
2373 // SYS #0, C7, C8, #3
2374 SYS_ALIAS(0, 7, 8, 3);
2375 } else if (!Op.compare_lower("s12e1r")) {
2376 // SYS #4, C7, C8, #4
2377 SYS_ALIAS(4, 7, 8, 4);
2378 } else if (!Op.compare_lower("s12e1w")) {
2379 // SYS #4, C7, C8, #5
2380 SYS_ALIAS(4, 7, 8, 5);
2381 } else if (!Op.compare_lower("s12e0r")) {
2382 // SYS #4, C7, C8, #6
2383 SYS_ALIAS(4, 7, 8, 6);
2384 } else if (!Op.compare_lower("s12e0w")) {
2385 // SYS #4, C7, C8, #7
2386 SYS_ALIAS(4, 7, 8, 7);
2388 return TokError("invalid operand for AT instruction");
2390 } else if (Mnemonic == "tlbi") {
2391 if (!Op.compare_lower("vmalle1is")) {
2392 // SYS #0, C8, C3, #0
2393 SYS_ALIAS(0, 8, 3, 0);
2394 } else if (!Op.compare_lower("alle2is")) {
2395 // SYS #4, C8, C3, #0
2396 SYS_ALIAS(4, 8, 3, 0);
2397 } else if (!Op.compare_lower("alle3is")) {
2398 // SYS #6, C8, C3, #0
2399 SYS_ALIAS(6, 8, 3, 0);
2400 } else if (!Op.compare_lower("vae1is")) {
2401 // SYS #0, C8, C3, #1
2402 SYS_ALIAS(0, 8, 3, 1);
2403 } else if (!Op.compare_lower("vae2is")) {
2404 // SYS #4, C8, C3, #1
2405 SYS_ALIAS(4, 8, 3, 1);
2406 } else if (!Op.compare_lower("vae3is")) {
2407 // SYS #6, C8, C3, #1
2408 SYS_ALIAS(6, 8, 3, 1);
2409 } else if (!Op.compare_lower("aside1is")) {
2410 // SYS #0, C8, C3, #2
2411 SYS_ALIAS(0, 8, 3, 2);
2412 } else if (!Op.compare_lower("vaae1is")) {
2413 // SYS #0, C8, C3, #3
2414 SYS_ALIAS(0, 8, 3, 3);
2415 } else if (!Op.compare_lower("alle1is")) {
2416 // SYS #4, C8, C3, #4
2417 SYS_ALIAS(4, 8, 3, 4);
2418 } else if (!Op.compare_lower("vale1is")) {
2419 // SYS #0, C8, C3, #5
2420 SYS_ALIAS(0, 8, 3, 5);
2421 } else if (!Op.compare_lower("vaale1is")) {
2422 // SYS #0, C8, C3, #7
2423 SYS_ALIAS(0, 8, 3, 7);
2424 } else if (!Op.compare_lower("vmalle1")) {
2425 // SYS #0, C8, C7, #0
2426 SYS_ALIAS(0, 8, 7, 0);
2427 } else if (!Op.compare_lower("alle2")) {
2428 // SYS #4, C8, C7, #0
2429 SYS_ALIAS(4, 8, 7, 0);
2430 } else if (!Op.compare_lower("vale2is")) {
2431 // SYS #4, C8, C3, #5
2432 SYS_ALIAS(4, 8, 3, 5);
2433 } else if (!Op.compare_lower("vale3is")) {
2434 // SYS #6, C8, C3, #5
2435 SYS_ALIAS(6, 8, 3, 5);
2436 } else if (!Op.compare_lower("alle3")) {
2437 // SYS #6, C8, C7, #0
2438 SYS_ALIAS(6, 8, 7, 0);
2439 } else if (!Op.compare_lower("vae1")) {
2440 // SYS #0, C8, C7, #1
2441 SYS_ALIAS(0, 8, 7, 1);
2442 } else if (!Op.compare_lower("vae2")) {
2443 // SYS #4, C8, C7, #1
2444 SYS_ALIAS(4, 8, 7, 1);
2445 } else if (!Op.compare_lower("vae3")) {
2446 // SYS #6, C8, C7, #1
2447 SYS_ALIAS(6, 8, 7, 1);
2448 } else if (!Op.compare_lower("aside1")) {
2449 // SYS #0, C8, C7, #2
2450 SYS_ALIAS(0, 8, 7, 2);
2451 } else if (!Op.compare_lower("vaae1")) {
2452 // SYS #0, C8, C7, #3
2453 SYS_ALIAS(0, 8, 7, 3);
2454 } else if (!Op.compare_lower("alle1")) {
2455 // SYS #4, C8, C7, #4
2456 SYS_ALIAS(4, 8, 7, 4);
2457 } else if (!Op.compare_lower("vale1")) {
2458 // SYS #0, C8, C7, #5
2459 SYS_ALIAS(0, 8, 7, 5);
2460 } else if (!Op.compare_lower("vale2")) {
2461 // SYS #4, C8, C7, #5
2462 SYS_ALIAS(4, 8, 7, 5);
2463 } else if (!Op.compare_lower("vale3")) {
2464 // SYS #6, C8, C7, #5
2465 SYS_ALIAS(6, 8, 7, 5);
2466 } else if (!Op.compare_lower("vaale1")) {
2467 // SYS #0, C8, C7, #7
2468 SYS_ALIAS(0, 8, 7, 7);
2469 } else if (!Op.compare_lower("ipas2e1")) {
2470 // SYS #4, C8, C4, #1
2471 SYS_ALIAS(4, 8, 4, 1);
2472 } else if (!Op.compare_lower("ipas2le1")) {
2473 // SYS #4, C8, C4, #5
2474 SYS_ALIAS(4, 8, 4, 5);
2475 } else if (!Op.compare_lower("ipas2e1is")) {
2476 // SYS #4, C8, C4, #1
2477 SYS_ALIAS(4, 8, 0, 1);
2478 } else if (!Op.compare_lower("ipas2le1is")) {
2479 // SYS #4, C8, C4, #5
2480 SYS_ALIAS(4, 8, 0, 5);
2481 } else if (!Op.compare_lower("vmalls12e1")) {
2482 // SYS #4, C8, C7, #6
2483 SYS_ALIAS(4, 8, 7, 6);
2484 } else if (!Op.compare_lower("vmalls12e1is")) {
2485 // SYS #4, C8, C3, #6
2486 SYS_ALIAS(4, 8, 3, 6);
2488 return TokError("invalid operand for TLBI instruction");
2494 Parser.Lex(); // Eat operand.
2496 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2497 bool HasRegister = false;
2499 // Check for the optional register operand.
2500 if (getLexer().is(AsmToken::Comma)) {
2501 Parser.Lex(); // Eat comma.
2503 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2504 return TokError("expected register operand");
2509 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2510 Parser.eatToEndOfStatement();
2511 return TokError("unexpected token in argument list");
2514 if (ExpectRegister && !HasRegister) {
2515 return TokError("specified " + Mnemonic + " op requires a register");
2517 else if (!ExpectRegister && HasRegister) {
2518 return TokError("specified " + Mnemonic + " op does not use a register");
2521 Parser.Lex(); // Consume the EndOfStatement
2525 AArch64AsmParser::OperandMatchResultTy
2526 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2527 const AsmToken &Tok = Parser.getTok();
2529 // Can be either a #imm style literal or an option name
2530 bool Hash = Tok.is(AsmToken::Hash);
2531 if (Hash || Tok.is(AsmToken::Integer)) {
2532 // Immediate operand.
2534 Parser.Lex(); // Eat the '#'
2535 const MCExpr *ImmVal;
2536 SMLoc ExprLoc = getLoc();
2537 if (getParser().parseExpression(ImmVal))
2538 return MatchOperand_ParseFail;
2539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2541 Error(ExprLoc, "immediate value expected for barrier operand");
2542 return MatchOperand_ParseFail;
2544 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2545 Error(ExprLoc, "barrier operand out of range");
2546 return MatchOperand_ParseFail;
2549 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2550 return MatchOperand_Success;
2553 if (Tok.isNot(AsmToken::Identifier)) {
2554 TokError("invalid operand for instruction");
2555 return MatchOperand_ParseFail;
2559 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2561 TokError("invalid barrier option name");
2562 return MatchOperand_ParseFail;
2565 // The only valid named option for ISB is 'sy'
2566 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2567 TokError("'sy' or #imm operand expected");
2568 return MatchOperand_ParseFail;
2572 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2573 Parser.Lex(); // Consume the option
2575 return MatchOperand_Success;
2578 AArch64AsmParser::OperandMatchResultTy
2579 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2580 const AsmToken &Tok = Parser.getTok();
2582 if (Tok.isNot(AsmToken::Identifier))
2583 return MatchOperand_NoMatch;
2585 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2586 STI.getFeatureBits(), getContext()));
2587 Parser.Lex(); // Eat identifier
2589 return MatchOperand_Success;
2592 /// tryParseVectorRegister - Parse a vector register operand.
2593 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2594 if (Parser.getTok().isNot(AsmToken::Identifier))
2598 // Check for a vector register specifier first.
2600 int64_t Reg = tryMatchVectorRegister(Kind, false);
2604 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2605 // If there was an explicit qualifier, that goes on as a literal text
2609 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2611 // If there is an index specifier following the register, parse that too.
2612 if (Parser.getTok().is(AsmToken::LBrac)) {
2613 SMLoc SIdx = getLoc();
2614 Parser.Lex(); // Eat left bracket token.
2616 const MCExpr *ImmVal;
2617 if (getParser().parseExpression(ImmVal))
2619 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2621 TokError("immediate value expected for vector index");
2626 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2627 Error(E, "']' expected");
2631 Parser.Lex(); // Eat right bracket token.
2633 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2640 /// parseRegister - Parse a non-vector register operand.
2641 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2643 // Try for a vector register.
2644 if (!tryParseVectorRegister(Operands))
2647 // Try for a scalar register.
2648 int64_t Reg = tryParseRegister();
2652 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2654 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2655 // as a string token in the instruction itself.
2656 if (getLexer().getKind() == AsmToken::LBrac) {
2657 SMLoc LBracS = getLoc();
2659 const AsmToken &Tok = Parser.getTok();
2660 if (Tok.is(AsmToken::Integer)) {
2661 SMLoc IntS = getLoc();
2662 int64_t Val = Tok.getIntVal();
2665 if (getLexer().getKind() == AsmToken::RBrac) {
2666 SMLoc RBracS = getLoc();
2669 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2671 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2673 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2683 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2684 bool HasELFModifier = false;
2685 AArch64MCExpr::VariantKind RefKind;
2687 if (Parser.getTok().is(AsmToken::Colon)) {
2688 Parser.Lex(); // Eat ':"
2689 HasELFModifier = true;
2691 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2692 Error(Parser.getTok().getLoc(),
2693 "expect relocation specifier in operand after ':'");
2697 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2698 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2699 .Case("lo12", AArch64MCExpr::VK_LO12)
2700 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2701 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2702 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2703 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2704 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2705 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2706 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2707 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2708 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2709 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2710 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2711 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2712 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2713 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2714 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2715 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2716 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2717 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2718 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2719 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2720 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2721 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2722 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2723 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2724 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2725 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2726 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2727 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2728 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2729 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2730 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2731 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2732 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2733 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2734 .Default(AArch64MCExpr::VK_INVALID);
2736 if (RefKind == AArch64MCExpr::VK_INVALID) {
2737 Error(Parser.getTok().getLoc(),
2738 "expect relocation specifier in operand after ':'");
2742 Parser.Lex(); // Eat identifier
2744 if (Parser.getTok().isNot(AsmToken::Colon)) {
2745 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2748 Parser.Lex(); // Eat ':'
2751 if (getParser().parseExpression(ImmVal))
2755 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2760 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2761 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2762 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2764 Parser.Lex(); // Eat left bracket token.
2766 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2769 int64_t PrevReg = FirstReg;
2772 if (Parser.getTok().is(AsmToken::Minus)) {
2773 Parser.Lex(); // Eat the minus.
2775 SMLoc Loc = getLoc();
2777 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2780 // Any Kind suffices must match on all regs in the list.
2781 if (Kind != NextKind)
2782 return Error(Loc, "mismatched register size suffix");
2784 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2786 if (Space == 0 || Space > 3) {
2787 return Error(Loc, "invalid number of vectors");
2793 while (Parser.getTok().is(AsmToken::Comma)) {
2794 Parser.Lex(); // Eat the comma token.
2796 SMLoc Loc = getLoc();
2798 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2801 // Any Kind suffices must match on all regs in the list.
2802 if (Kind != NextKind)
2803 return Error(Loc, "mismatched register size suffix");
2805 // Registers must be incremental (with wraparound at 31)
2806 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2807 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2808 return Error(Loc, "registers must be sequential");
2815 if (Parser.getTok().isNot(AsmToken::RCurly))
2816 return Error(getLoc(), "'}' expected");
2817 Parser.Lex(); // Eat the '}' token.
2820 return Error(S, "invalid number of vectors");
2822 unsigned NumElements = 0;
2823 char ElementKind = 0;
2825 parseValidVectorKind(Kind, NumElements, ElementKind);
2827 Operands.push_back(AArch64Operand::CreateVectorList(
2828 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2830 // If there is an index specifier following the list, parse that too.
2831 if (Parser.getTok().is(AsmToken::LBrac)) {
2832 SMLoc SIdx = getLoc();
2833 Parser.Lex(); // Eat left bracket token.
2835 const MCExpr *ImmVal;
2836 if (getParser().parseExpression(ImmVal))
2838 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2840 TokError("immediate value expected for vector index");
2845 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2846 Error(E, "']' expected");
2850 Parser.Lex(); // Eat right bracket token.
2852 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2858 AArch64AsmParser::OperandMatchResultTy
2859 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2860 const AsmToken &Tok = Parser.getTok();
2861 if (!Tok.is(AsmToken::Identifier))
2862 return MatchOperand_NoMatch;
2864 unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2866 MCContext &Ctx = getContext();
2867 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2868 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2869 return MatchOperand_NoMatch;
2872 Parser.Lex(); // Eat register
2874 if (Parser.getTok().isNot(AsmToken::Comma)) {
2876 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2877 return MatchOperand_Success;
2879 Parser.Lex(); // Eat comma.
2881 if (Parser.getTok().is(AsmToken::Hash))
2882 Parser.Lex(); // Eat hash
2884 if (Parser.getTok().isNot(AsmToken::Integer)) {
2885 Error(getLoc(), "index must be absent or #0");
2886 return MatchOperand_ParseFail;
2889 const MCExpr *ImmVal;
2890 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2891 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2892 Error(getLoc(), "index must be absent or #0");
2893 return MatchOperand_ParseFail;
2897 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2898 return MatchOperand_Success;
2901 /// parseOperand - Parse a arm instruction operand. For now this parses the
2902 /// operand regardless of the mnemonic.
2903 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2904 bool invertCondCode) {
2905 // Check if the current operand has a custom associated parser, if so, try to
2906 // custom parse the operand, or fallback to the general approach.
2907 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2908 if (ResTy == MatchOperand_Success)
2910 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2911 // there was a match, but an error occurred, in which case, just return that
2912 // the operand parsing failed.
2913 if (ResTy == MatchOperand_ParseFail)
2916 // Nothing custom, so do general case parsing.
2918 switch (getLexer().getKind()) {
2922 if (parseSymbolicImmVal(Expr))
2923 return Error(S, "invalid operand");
2925 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2926 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2929 case AsmToken::LBrac: {
2930 SMLoc Loc = Parser.getTok().getLoc();
2931 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2933 Parser.Lex(); // Eat '['
2935 // There's no comma after a '[', so we can parse the next operand
2937 return parseOperand(Operands, false, false);
2939 case AsmToken::LCurly:
2940 return parseVectorList(Operands);
2941 case AsmToken::Identifier: {
2942 // If we're expecting a Condition Code operand, then just parse that.
2944 return parseCondCode(Operands, invertCondCode);
2946 // If it's a register name, parse it.
2947 if (!parseRegister(Operands))
2950 // This could be an optional "shift" or "extend" operand.
2951 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2952 // We can only continue if no tokens were eaten.
2953 if (GotShift != MatchOperand_NoMatch)
2956 // This was not a register so parse other operands that start with an
2957 // identifier (like labels) as expressions and create them as immediates.
2958 const MCExpr *IdVal;
2960 if (getParser().parseExpression(IdVal))
2963 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2964 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2967 case AsmToken::Integer:
2968 case AsmToken::Real:
2969 case AsmToken::Hash: {
2970 // #42 -> immediate.
2972 if (getLexer().is(AsmToken::Hash))
2975 // Parse a negative sign
2976 bool isNegative = false;
2977 if (Parser.getTok().is(AsmToken::Minus)) {
2979 // We need to consume this token only when we have a Real, otherwise
2980 // we let parseSymbolicImmVal take care of it
2981 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2985 // The only Real that should come through here is a literal #0.0 for
2986 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2987 // so convert the value.
2988 const AsmToken &Tok = Parser.getTok();
2989 if (Tok.is(AsmToken::Real)) {
2990 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2991 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2992 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2993 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2994 Mnemonic != "fcmlt")
2995 return TokError("unexpected floating point literal");
2996 else if (IntVal != 0 || isNegative)
2997 return TokError("expected floating-point constant #0.0");
2998 Parser.Lex(); // Eat the token.
3001 AArch64Operand::CreateToken("#0", false, S, getContext()));
3003 AArch64Operand::CreateToken(".0", false, S, getContext()));
3007 const MCExpr *ImmVal;
3008 if (parseSymbolicImmVal(ImmVal))
3011 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3012 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3015 case AsmToken::Equal: {
3016 SMLoc Loc = Parser.getTok().getLoc();
3017 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3018 return Error(Loc, "unexpected token in operand");
3019 Parser.Lex(); // Eat '='
3020 const MCExpr *SubExprVal;
3021 if (getParser().parseExpression(SubExprVal))
3024 MCContext& Ctx = getContext();
3025 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3026 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3027 if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3028 static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3029 bool IsXReg = AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3030 Operands[1]->getReg());
3031 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3032 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3033 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3037 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3038 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3039 Operands.push_back(AArch64Operand::CreateImm(
3040 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3042 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3043 ShiftAmt, true, S, E, Ctx));
3047 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3048 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3049 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3055 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3057 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3058 StringRef Name, SMLoc NameLoc,
3059 OperandVector &Operands) {
3060 Name = StringSwitch<StringRef>(Name.lower())
3061 .Case("beq", "b.eq")
3062 .Case("bne", "b.ne")
3063 .Case("bhs", "b.hs")
3064 .Case("bcs", "b.cs")
3065 .Case("blo", "b.lo")
3066 .Case("bcc", "b.cc")
3067 .Case("bmi", "b.mi")
3068 .Case("bpl", "b.pl")
3069 .Case("bvs", "b.vs")
3070 .Case("bvc", "b.vc")
3071 .Case("bhi", "b.hi")
3072 .Case("bls", "b.ls")
3073 .Case("bge", "b.ge")
3074 .Case("blt", "b.lt")
3075 .Case("bgt", "b.gt")
3076 .Case("ble", "b.le")
3077 .Case("bal", "b.al")
3078 .Case("bnv", "b.nv")
3081 // Create the leading tokens for the mnemonic, split by '.' characters.
3082 size_t Start = 0, Next = Name.find('.');
3083 StringRef Head = Name.slice(Start, Next);
3085 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3086 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3087 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3088 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3089 Parser.eatToEndOfStatement();
3094 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3097 // Handle condition codes for a branch mnemonic
3098 if (Head == "b" && Next != StringRef::npos) {
3100 Next = Name.find('.', Start + 1);
3101 Head = Name.slice(Start + 1, Next);
3103 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3104 (Head.data() - Name.data()));
3105 AArch64CC::CondCode CC = parseCondCodeString(Head);
3106 if (CC == AArch64CC::Invalid)
3107 return Error(SuffixLoc, "invalid condition code");
3109 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3111 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3114 // Add the remaining tokens in the mnemonic.
3115 while (Next != StringRef::npos) {
3117 Next = Name.find('.', Start + 1);
3118 Head = Name.slice(Start, Next);
3119 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3120 (Head.data() - Name.data()) + 1);
3122 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3125 // Conditional compare instructions have a Condition Code operand, which needs
3126 // to be parsed and an immediate operand created.
3127 bool condCodeFourthOperand =
3128 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3129 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3130 Head == "csinc" || Head == "csinv" || Head == "csneg");
3132 // These instructions are aliases to some of the conditional select
3133 // instructions. However, the condition code is inverted in the aliased
3136 // FIXME: Is this the correct way to handle these? Or should the parser
3137 // generate the aliased instructions directly?
3138 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3139 bool condCodeThirdOperand =
3140 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3142 // Read the remaining operands.
3143 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3144 // Read the first operand.
3145 if (parseOperand(Operands, false, false)) {
3146 Parser.eatToEndOfStatement();
3151 while (getLexer().is(AsmToken::Comma)) {
3152 Parser.Lex(); // Eat the comma.
3154 // Parse and remember the operand.
3155 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3156 (N == 3 && condCodeThirdOperand) ||
3157 (N == 2 && condCodeSecondOperand),
3158 condCodeSecondOperand || condCodeThirdOperand)) {
3159 Parser.eatToEndOfStatement();
3163 // After successfully parsing some operands there are two special cases to
3164 // consider (i.e. notional operands not separated by commas). Both are due
3165 // to memory specifiers:
3166 // + An RBrac will end an address for load/store/prefetch
3167 // + An '!' will indicate a pre-indexed operation.
3169 // It's someone else's responsibility to make sure these tokens are sane
3170 // in the given context!
3171 if (Parser.getTok().is(AsmToken::RBrac)) {
3172 SMLoc Loc = Parser.getTok().getLoc();
3173 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3178 if (Parser.getTok().is(AsmToken::Exclaim)) {
3179 SMLoc Loc = Parser.getTok().getLoc();
3180 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3189 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3190 SMLoc Loc = Parser.getTok().getLoc();
3191 Parser.eatToEndOfStatement();
3192 return Error(Loc, "unexpected token in argument list");
3195 Parser.Lex(); // Consume the EndOfStatement
3199 // FIXME: This entire function is a giant hack to provide us with decent
3200 // operand range validation/diagnostics until TableGen/MC can be extended
3201 // to support autogeneration of this kind of validation.
3202 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3203 SmallVectorImpl<SMLoc> &Loc) {
3204 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3205 // Check for indexed addressing modes w/ the base register being the
3206 // same as a destination/source register or pair load where
3207 // the Rt == Rt2. All of those are undefined behaviour.
3208 switch (Inst.getOpcode()) {
3209 case AArch64::LDPSWpre:
3210 case AArch64::LDPWpost:
3211 case AArch64::LDPWpre:
3212 case AArch64::LDPXpost:
3213 case AArch64::LDPXpre: {
3214 unsigned Rt = Inst.getOperand(1).getReg();
3215 unsigned Rt2 = Inst.getOperand(2).getReg();
3216 unsigned Rn = Inst.getOperand(3).getReg();
3217 if (RI->isSubRegisterEq(Rn, Rt))
3218 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3219 "is also a destination");
3220 if (RI->isSubRegisterEq(Rn, Rt2))
3221 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3222 "is also a destination");
3225 case AArch64::LDPDi:
3226 case AArch64::LDPQi:
3227 case AArch64::LDPSi:
3228 case AArch64::LDPSWi:
3229 case AArch64::LDPWi:
3230 case AArch64::LDPXi: {
3231 unsigned Rt = Inst.getOperand(0).getReg();
3232 unsigned Rt2 = Inst.getOperand(1).getReg();
3234 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3237 case AArch64::LDPDpost:
3238 case AArch64::LDPDpre:
3239 case AArch64::LDPQpost:
3240 case AArch64::LDPQpre:
3241 case AArch64::LDPSpost:
3242 case AArch64::LDPSpre:
3243 case AArch64::LDPSWpost: {
3244 unsigned Rt = Inst.getOperand(1).getReg();
3245 unsigned Rt2 = Inst.getOperand(2).getReg();
3247 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3250 case AArch64::STPDpost:
3251 case AArch64::STPDpre:
3252 case AArch64::STPQpost:
3253 case AArch64::STPQpre:
3254 case AArch64::STPSpost:
3255 case AArch64::STPSpre:
3256 case AArch64::STPWpost:
3257 case AArch64::STPWpre:
3258 case AArch64::STPXpost:
3259 case AArch64::STPXpre: {
3260 unsigned Rt = Inst.getOperand(1).getReg();
3261 unsigned Rt2 = Inst.getOperand(2).getReg();
3262 unsigned Rn = Inst.getOperand(3).getReg();
3263 if (RI->isSubRegisterEq(Rn, Rt))
3264 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3265 "is also a source");
3266 if (RI->isSubRegisterEq(Rn, Rt2))
3267 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3268 "is also a source");
3271 case AArch64::LDRBBpre:
3272 case AArch64::LDRBpre:
3273 case AArch64::LDRHHpre:
3274 case AArch64::LDRHpre:
3275 case AArch64::LDRSBWpre:
3276 case AArch64::LDRSBXpre:
3277 case AArch64::LDRSHWpre:
3278 case AArch64::LDRSHXpre:
3279 case AArch64::LDRSWpre:
3280 case AArch64::LDRWpre:
3281 case AArch64::LDRXpre:
3282 case AArch64::LDRBBpost:
3283 case AArch64::LDRBpost:
3284 case AArch64::LDRHHpost:
3285 case AArch64::LDRHpost:
3286 case AArch64::LDRSBWpost:
3287 case AArch64::LDRSBXpost:
3288 case AArch64::LDRSHWpost:
3289 case AArch64::LDRSHXpost:
3290 case AArch64::LDRSWpost:
3291 case AArch64::LDRWpost:
3292 case AArch64::LDRXpost: {
3293 unsigned Rt = Inst.getOperand(1).getReg();
3294 unsigned Rn = Inst.getOperand(2).getReg();
3295 if (RI->isSubRegisterEq(Rn, Rt))
3296 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3297 "is also a source");
3300 case AArch64::STRBBpost:
3301 case AArch64::STRBpost:
3302 case AArch64::STRHHpost:
3303 case AArch64::STRHpost:
3304 case AArch64::STRWpost:
3305 case AArch64::STRXpost:
3306 case AArch64::STRBBpre:
3307 case AArch64::STRBpre:
3308 case AArch64::STRHHpre:
3309 case AArch64::STRHpre:
3310 case AArch64::STRWpre:
3311 case AArch64::STRXpre: {
3312 unsigned Rt = Inst.getOperand(1).getReg();
3313 unsigned Rn = Inst.getOperand(2).getReg();
3314 if (RI->isSubRegisterEq(Rn, Rt))
3315 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3316 "is also a source");
3321 // Now check immediate ranges. Separate from the above as there is overlap
3322 // in the instructions being checked and this keeps the nested conditionals
3324 switch (Inst.getOpcode()) {
3325 case AArch64::ADDSWri:
3326 case AArch64::ADDSXri:
3327 case AArch64::ADDWri:
3328 case AArch64::ADDXri:
3329 case AArch64::SUBSWri:
3330 case AArch64::SUBSXri:
3331 case AArch64::SUBWri:
3332 case AArch64::SUBXri: {
3333 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3334 // some slight duplication here.
3335 if (Inst.getOperand(2).isExpr()) {
3336 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3337 AArch64MCExpr::VariantKind ELFRefKind;
3338 MCSymbolRefExpr::VariantKind DarwinRefKind;
3340 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3341 return Error(Loc[2], "invalid immediate expression");
3344 // Only allow these with ADDXri.
3345 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3346 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3347 Inst.getOpcode() == AArch64::ADDXri)
3350 // Only allow these with ADDXri/ADDWri
3351 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3352 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3353 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3354 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3355 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3356 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3357 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3358 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3359 (Inst.getOpcode() == AArch64::ADDXri ||
3360 Inst.getOpcode() == AArch64::ADDWri))
3363 // Don't allow expressions in the immediate field otherwise
3364 return Error(Loc[2], "invalid immediate expression");
3373 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3375 case Match_MissingFeature:
3377 "instruction requires a CPU feature not currently enabled");
3378 case Match_InvalidOperand:
3379 return Error(Loc, "invalid operand for instruction");
3380 case Match_InvalidSuffix:
3381 return Error(Loc, "invalid type suffix for instruction");
3382 case Match_InvalidCondCode:
3383 return Error(Loc, "expected AArch64 condition code");
3384 case Match_AddSubRegExtendSmall:
3386 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3387 case Match_AddSubRegExtendLarge:
3389 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3390 case Match_AddSubSecondSource:
3392 "expected compatible register, symbol or integer in range [0, 4095]");
3393 case Match_LogicalSecondSource:
3394 return Error(Loc, "expected compatible register or logical immediate");
3395 case Match_InvalidMovImm32Shift:
3396 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3397 case Match_InvalidMovImm64Shift:
3398 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3399 case Match_AddSubRegShift32:
3401 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3402 case Match_AddSubRegShift64:
3404 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3405 case Match_InvalidFPImm:
3407 "expected compatible register or floating-point constant");
3408 case Match_InvalidMemoryIndexedSImm9:
3409 return Error(Loc, "index must be an integer in range [-256, 255].");
3410 case Match_InvalidMemoryIndexed4SImm7:
3411 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3412 case Match_InvalidMemoryIndexed8SImm7:
3413 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3414 case Match_InvalidMemoryIndexed16SImm7:
3415 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3416 case Match_InvalidMemoryWExtend8:
3418 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3419 case Match_InvalidMemoryWExtend16:
3421 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3422 case Match_InvalidMemoryWExtend32:
3424 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3425 case Match_InvalidMemoryWExtend64:
3427 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3428 case Match_InvalidMemoryWExtend128:
3430 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3431 case Match_InvalidMemoryXExtend8:
3433 "expected 'lsl' or 'sxtx' with optional shift of #0");
3434 case Match_InvalidMemoryXExtend16:
3436 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3437 case Match_InvalidMemoryXExtend32:
3439 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3440 case Match_InvalidMemoryXExtend64:
3442 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3443 case Match_InvalidMemoryXExtend128:
3445 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3446 case Match_InvalidMemoryIndexed1:
3447 return Error(Loc, "index must be an integer in range [0, 4095].");
3448 case Match_InvalidMemoryIndexed2:
3449 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3450 case Match_InvalidMemoryIndexed4:
3451 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3452 case Match_InvalidMemoryIndexed8:
3453 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3454 case Match_InvalidMemoryIndexed16:
3455 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3456 case Match_InvalidImm0_7:
3457 return Error(Loc, "immediate must be an integer in range [0, 7].");
3458 case Match_InvalidImm0_15:
3459 return Error(Loc, "immediate must be an integer in range [0, 15].");
3460 case Match_InvalidImm0_31:
3461 return Error(Loc, "immediate must be an integer in range [0, 31].");
3462 case Match_InvalidImm0_63:
3463 return Error(Loc, "immediate must be an integer in range [0, 63].");
3464 case Match_InvalidImm0_127:
3465 return Error(Loc, "immediate must be an integer in range [0, 127].");
3466 case Match_InvalidImm0_65535:
3467 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3468 case Match_InvalidImm1_8:
3469 return Error(Loc, "immediate must be an integer in range [1, 8].");
3470 case Match_InvalidImm1_16:
3471 return Error(Loc, "immediate must be an integer in range [1, 16].");
3472 case Match_InvalidImm1_32:
3473 return Error(Loc, "immediate must be an integer in range [1, 32].");
3474 case Match_InvalidImm1_64:
3475 return Error(Loc, "immediate must be an integer in range [1, 64].");
3476 case Match_InvalidIndex1:
3477 return Error(Loc, "expected lane specifier '[1]'");
3478 case Match_InvalidIndexB:
3479 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3480 case Match_InvalidIndexH:
3481 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3482 case Match_InvalidIndexS:
3483 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3484 case Match_InvalidIndexD:
3485 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3486 case Match_InvalidLabel:
3487 return Error(Loc, "expected label or encodable integer pc offset");
3489 return Error(Loc, "expected readable system register");
3491 return Error(Loc, "expected writable system register or pstate");
3492 case Match_MnemonicFail:
3493 return Error(Loc, "unrecognized instruction mnemonic");
3495 llvm_unreachable("unexpected error code!");
3499 static const char *getSubtargetFeatureName(unsigned Val);
3501 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3502 OperandVector &Operands,
3504 unsigned &ErrorInfo,
3505 bool MatchingInlineAsm) {
3506 assert(!Operands.empty() && "Unexpect empty operand list!");
3507 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3508 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3510 StringRef Tok = Op.getToken();
3511 unsigned NumOperands = Operands.size();
3513 if (NumOperands == 4 && Tok == "lsl") {
3514 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3515 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3516 if (Op2.isReg() && Op3.isImm()) {
3517 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3519 uint64_t Op3Val = Op3CE->getValue();
3520 uint64_t NewOp3Val = 0;
3521 uint64_t NewOp4Val = 0;
3522 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3524 NewOp3Val = (32 - Op3Val) & 0x1f;
3525 NewOp4Val = 31 - Op3Val;
3527 NewOp3Val = (64 - Op3Val) & 0x3f;
3528 NewOp4Val = 63 - Op3Val;
3531 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3532 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3534 Operands[0] = AArch64Operand::CreateToken(
3535 "ubfm", false, Op.getStartLoc(), getContext());
3536 Operands.push_back(AArch64Operand::CreateImm(
3537 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3538 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3539 Op3.getEndLoc(), getContext());
3542 } else if (NumOperands == 5) {
3543 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3544 // UBFIZ -> UBFM aliases.
3545 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3546 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3547 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3548 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3550 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3551 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3552 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3554 if (Op3CE && Op4CE) {
3555 uint64_t Op3Val = Op3CE->getValue();
3556 uint64_t Op4Val = Op4CE->getValue();
3558 uint64_t RegWidth = 0;
3559 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3565 if (Op3Val >= RegWidth)
3566 return Error(Op3.getStartLoc(),
3567 "expected integer in range [0, 31]");
3568 if (Op4Val < 1 || Op4Val > RegWidth)
3569 return Error(Op4.getStartLoc(),
3570 "expected integer in range [1, 32]");
3572 uint64_t NewOp3Val = 0;
3573 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3575 NewOp3Val = (32 - Op3Val) & 0x1f;
3577 NewOp3Val = (64 - Op3Val) & 0x3f;
3579 uint64_t NewOp4Val = Op4Val - 1;
3581 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3582 return Error(Op4.getStartLoc(),
3583 "requested insert overflows register");
3585 const MCExpr *NewOp3 =
3586 MCConstantExpr::Create(NewOp3Val, getContext());
3587 const MCExpr *NewOp4 =
3588 MCConstantExpr::Create(NewOp4Val, getContext());
3589 Operands[3] = AArch64Operand::CreateImm(
3590 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3591 Operands[4] = AArch64Operand::CreateImm(
3592 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3594 Operands[0] = AArch64Operand::CreateToken(
3595 "bfm", false, Op.getStartLoc(), getContext());
3596 else if (Tok == "sbfiz")
3597 Operands[0] = AArch64Operand::CreateToken(
3598 "sbfm", false, Op.getStartLoc(), getContext());
3599 else if (Tok == "ubfiz")
3600 Operands[0] = AArch64Operand::CreateToken(
3601 "ubfm", false, Op.getStartLoc(), getContext());
3603 llvm_unreachable("No valid mnemonic for alias?");
3607 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3608 // UBFX -> UBFM aliases.
3609 } else if (NumOperands == 5 &&
3610 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3611 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3612 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3613 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3615 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3616 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3617 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3619 if (Op3CE && Op4CE) {
3620 uint64_t Op3Val = Op3CE->getValue();
3621 uint64_t Op4Val = Op4CE->getValue();
3623 uint64_t RegWidth = 0;
3624 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3630 if (Op3Val >= RegWidth)
3631 return Error(Op3.getStartLoc(),
3632 "expected integer in range [0, 31]");
3633 if (Op4Val < 1 || Op4Val > RegWidth)
3634 return Error(Op4.getStartLoc(),
3635 "expected integer in range [1, 32]");
3637 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3639 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3640 return Error(Op4.getStartLoc(),
3641 "requested extract overflows register");
3643 const MCExpr *NewOp4 =
3644 MCConstantExpr::Create(NewOp4Val, getContext());
3645 Operands[4] = AArch64Operand::CreateImm(
3646 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3648 Operands[0] = AArch64Operand::CreateToken(
3649 "bfm", false, Op.getStartLoc(), getContext());
3650 else if (Tok == "sbfx")
3651 Operands[0] = AArch64Operand::CreateToken(
3652 "sbfm", false, Op.getStartLoc(), getContext());
3653 else if (Tok == "ubfx")
3654 Operands[0] = AArch64Operand::CreateToken(
3655 "ubfm", false, Op.getStartLoc(), getContext());
3657 llvm_unreachable("No valid mnemonic for alias?");
3662 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3663 // InstAlias can't quite handle this since the reg classes aren't
3665 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3666 // The source register can be Wn here, but the matcher expects a
3667 // GPR64. Twiddle it here if necessary.
3668 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3670 unsigned Reg = getXRegFromWReg(Op.getReg());
3671 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3672 Op.getEndLoc(), getContext());
3675 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3676 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3677 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3679 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3681 // The source register can be Wn here, but the matcher expects a
3682 // GPR64. Twiddle it here if necessary.
3683 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3685 unsigned Reg = getXRegFromWReg(Op.getReg());
3686 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3687 Op.getEndLoc(), getContext());
3691 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3692 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3693 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3695 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3697 // The source register can be Wn here, but the matcher expects a
3698 // GPR32. Twiddle it here if necessary.
3699 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3701 unsigned Reg = getWRegFromXReg(Op.getReg());
3702 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3703 Op.getEndLoc(), getContext());
3708 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3709 if (NumOperands == 3 && Tok == "fmov") {
3710 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3711 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3712 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3714 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3718 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3719 Op.getEndLoc(), getContext());
3724 // First try to match against the secondary set of tables containing the
3725 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3726 unsigned MatchResult =
3727 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3729 // If that fails, try against the alternate table containing long-form NEON:
3730 // "fadd v0.2s, v1.2s, v2.2s"
3731 if (MatchResult != Match_Success)
3733 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3735 switch (MatchResult) {
3736 case Match_Success: {
3737 // Perform range checking and other semantic validations
3738 SmallVector<SMLoc, 8> OperandLocs;
3739 NumOperands = Operands.size();
3740 for (unsigned i = 1; i < NumOperands; ++i)
3741 OperandLocs.push_back(Operands[i]->getStartLoc());
3742 if (validateInstruction(Inst, OperandLocs))
3746 Out.EmitInstruction(Inst, STI);
3749 case Match_MissingFeature: {
3750 assert(ErrorInfo && "Unknown missing feature!");
3751 // Special case the error message for the very common case where only
3752 // a single subtarget feature is missing (neon, e.g.).
3753 std::string Msg = "instruction requires:";
3755 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3756 if (ErrorInfo & Mask) {
3758 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3762 return Error(IDLoc, Msg);
3764 case Match_MnemonicFail:
3765 return showMatchError(IDLoc, MatchResult);
3766 case Match_InvalidOperand: {
3767 SMLoc ErrorLoc = IDLoc;
3768 if (ErrorInfo != ~0U) {
3769 if (ErrorInfo >= Operands.size())
3770 return Error(IDLoc, "too few operands for instruction");
3772 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3773 if (ErrorLoc == SMLoc())
3776 // If the match failed on a suffix token operand, tweak the diagnostic
3778 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3779 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3780 MatchResult = Match_InvalidSuffix;
3782 return showMatchError(ErrorLoc, MatchResult);
3784 case Match_InvalidMemoryIndexed1:
3785 case Match_InvalidMemoryIndexed2:
3786 case Match_InvalidMemoryIndexed4:
3787 case Match_InvalidMemoryIndexed8:
3788 case Match_InvalidMemoryIndexed16:
3789 case Match_InvalidCondCode:
3790 case Match_AddSubRegExtendSmall:
3791 case Match_AddSubRegExtendLarge:
3792 case Match_AddSubSecondSource:
3793 case Match_LogicalSecondSource:
3794 case Match_AddSubRegShift32:
3795 case Match_AddSubRegShift64:
3796 case Match_InvalidMovImm32Shift:
3797 case Match_InvalidMovImm64Shift:
3798 case Match_InvalidFPImm:
3799 case Match_InvalidMemoryWExtend8:
3800 case Match_InvalidMemoryWExtend16:
3801 case Match_InvalidMemoryWExtend32:
3802 case Match_InvalidMemoryWExtend64:
3803 case Match_InvalidMemoryWExtend128:
3804 case Match_InvalidMemoryXExtend8:
3805 case Match_InvalidMemoryXExtend16:
3806 case Match_InvalidMemoryXExtend32:
3807 case Match_InvalidMemoryXExtend64:
3808 case Match_InvalidMemoryXExtend128:
3809 case Match_InvalidMemoryIndexed4SImm7:
3810 case Match_InvalidMemoryIndexed8SImm7:
3811 case Match_InvalidMemoryIndexed16SImm7:
3812 case Match_InvalidMemoryIndexedSImm9:
3813 case Match_InvalidImm0_7:
3814 case Match_InvalidImm0_15:
3815 case Match_InvalidImm0_31:
3816 case Match_InvalidImm0_63:
3817 case Match_InvalidImm0_127:
3818 case Match_InvalidImm0_65535:
3819 case Match_InvalidImm1_8:
3820 case Match_InvalidImm1_16:
3821 case Match_InvalidImm1_32:
3822 case Match_InvalidImm1_64:
3823 case Match_InvalidIndex1:
3824 case Match_InvalidIndexB:
3825 case Match_InvalidIndexH:
3826 case Match_InvalidIndexS:
3827 case Match_InvalidIndexD:
3828 case Match_InvalidLabel:
3831 if (ErrorInfo >= Operands.size())
3832 return Error(IDLoc, "too few operands for instruction");
3833 // Any time we get here, there's nothing fancy to do. Just get the
3834 // operand SMLoc and display the diagnostic.
3835 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3836 if (ErrorLoc == SMLoc())
3838 return showMatchError(ErrorLoc, MatchResult);
3842 llvm_unreachable("Implement any new match types added!");
3846 /// ParseDirective parses the arm specific directives
3847 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3848 StringRef IDVal = DirectiveID.getIdentifier();
3849 SMLoc Loc = DirectiveID.getLoc();
3850 if (IDVal == ".hword")
3851 return parseDirectiveWord(2, Loc);
3852 if (IDVal == ".word")
3853 return parseDirectiveWord(4, Loc);
3854 if (IDVal == ".xword")
3855 return parseDirectiveWord(8, Loc);
3856 if (IDVal == ".tlsdesccall")
3857 return parseDirectiveTLSDescCall(Loc);
3858 if (IDVal == ".ltorg" || IDVal == ".pool")
3859 return parseDirectiveLtorg(Loc);
3860 return parseDirectiveLOH(IDVal, Loc);
3863 /// parseDirectiveWord
3864 /// ::= .word [ expression (, expression)* ]
3865 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3866 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3868 const MCExpr *Value;
3869 if (getParser().parseExpression(Value))
3872 getParser().getStreamer().EmitValue(Value, Size);
3874 if (getLexer().is(AsmToken::EndOfStatement))
3877 // FIXME: Improve diagnostic.
3878 if (getLexer().isNot(AsmToken::Comma))
3879 return Error(L, "unexpected token in directive");
3888 // parseDirectiveTLSDescCall:
3889 // ::= .tlsdesccall symbol
3890 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3892 if (getParser().parseIdentifier(Name))
3893 return Error(L, "expected symbol after directive");
3895 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3896 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3897 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3900 Inst.setOpcode(AArch64::TLSDESCCALL);
3901 Inst.addOperand(MCOperand::CreateExpr(Expr));
3903 getParser().getStreamer().EmitInstruction(Inst, STI);
3907 /// ::= .loh <lohName | lohId> label1, ..., labelN
3908 /// The number of arguments depends on the loh identifier.
3909 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3910 if (IDVal != MCLOHDirectiveName())
3913 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3914 if (getParser().getTok().isNot(AsmToken::Integer))
3915 return TokError("expected an identifier or a number in directive");
3916 // We successfully get a numeric value for the identifier.
3917 // Check if it is valid.
3918 int64_t Id = getParser().getTok().getIntVal();
3919 Kind = (MCLOHType)Id;
3920 // Check that Id does not overflow MCLOHType.
3921 if (!isValidMCLOHType(Kind) || Id != Kind)
3922 return TokError("invalid numeric identifier in directive");
3924 StringRef Name = getTok().getIdentifier();
3925 // We successfully parse an identifier.
3926 // Check if it is a recognized one.
3927 int Id = MCLOHNameToId(Name);
3930 return TokError("invalid identifier in directive");
3931 Kind = (MCLOHType)Id;
3933 // Consume the identifier.
3935 // Get the number of arguments of this LOH.
3936 int NbArgs = MCLOHIdToNbArgs(Kind);
3938 assert(NbArgs != -1 && "Invalid number of arguments");
3940 SmallVector<MCSymbol *, 3> Args;
3941 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3943 if (getParser().parseIdentifier(Name))
3944 return TokError("expected identifier in directive");
3945 Args.push_back(getContext().GetOrCreateSymbol(Name));
3947 if (Idx + 1 == NbArgs)
3949 if (getLexer().isNot(AsmToken::Comma))
3950 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3953 if (getLexer().isNot(AsmToken::EndOfStatement))
3954 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3956 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3960 /// parseDirectiveLtorg
3961 /// ::= .ltorg | .pool
3962 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
3963 getTargetStreamer().emitCurrentConstantPool();
3968 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
3969 AArch64MCExpr::VariantKind &ELFRefKind,
3970 MCSymbolRefExpr::VariantKind &DarwinRefKind,
3972 ELFRefKind = AArch64MCExpr::VK_INVALID;
3973 DarwinRefKind = MCSymbolRefExpr::VK_None;
3976 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
3977 ELFRefKind = AE->getKind();
3978 Expr = AE->getSubExpr();
3981 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3983 // It's a simple symbol reference with no addend.
3984 DarwinRefKind = SE->getKind();
3988 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3992 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3995 DarwinRefKind = SE->getKind();
3997 if (BE->getOpcode() != MCBinaryExpr::Add &&
3998 BE->getOpcode() != MCBinaryExpr::Sub)
4001 // See if the addend is is a constant, otherwise there's more going
4002 // on here than we can deal with.
4003 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4007 Addend = AddendExpr->getValue();
4008 if (BE->getOpcode() == MCBinaryExpr::Sub)
4011 // It's some symbol reference + a constant addend, but really
4012 // shouldn't use both Darwin and ELF syntax.
4013 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4014 DarwinRefKind == MCSymbolRefExpr::VK_None;
4017 /// Force static initialization.
4018 extern "C" void LLVMInitializeAArch64AsmParser() {
4019 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4020 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4022 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4023 RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4026 #define GET_REGISTER_MATCHER
4027 #define GET_SUBTARGET_FEATURE_NAME
4028 #define GET_MATCHER_IMPLEMENTATION
4029 #include "AArch64GenAsmMatcher.inc"
4031 // Define this matcher function after the auto-generated include so we
4032 // have the match class enum definitions.
4033 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4035 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4036 // If the kind is a token for a literal immediate, check if our asm
4037 // operand matches. This is for InstAliases which have a fixed-value
4038 // immediate in the syntax.
4039 int64_t ExpectedVal;
4042 return Match_InvalidOperand;
4084 return Match_InvalidOperand;
4085 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4087 return Match_InvalidOperand;
4088 if (CE->getValue() == ExpectedVal)
4089 return Match_Success;
4090 return Match_InvalidOperand;