1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "llvm/ADT/APFloat.h"
12 #include "llvm/ADT/SmallString.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/MC/MCParser/MCAsmLexer.h"
19 #include "llvm/MC/MCParser/MCAsmParser.h"
20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCStreamer.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCSymbol.h"
25 #include "llvm/MC/MCTargetAsmParser.h"
26 #include "llvm/Support/SourceMgr.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
35 class X86AsmParser : public MCTargetAsmParser {
38 ParseInstructionInfo *InstInfo;
40 MCAsmParser &getParser() const { return Parser; }
42 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
44 bool Error(SMLoc L, const Twine &Msg,
45 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(),
46 bool MatchingInlineAsm = false) {
47 if (MatchingInlineAsm) return true;
48 return Parser.Error(L, Msg, Ranges);
51 X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) {
56 X86Operand *ParseOperand();
57 X86Operand *ParseATTOperand();
58 X86Operand *ParseIntelOperand();
59 X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc);
60 X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind);
61 X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc);
62 X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size);
63 X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
65 bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp,
66 SmallString<64> &Err);
68 bool ParseDirectiveWord(unsigned Size, SMLoc L);
69 bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
71 bool processInstruction(MCInst &Inst,
72 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
74 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
75 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
76 MCStreamer &Out, unsigned &ErrorInfo,
77 bool MatchingInlineAsm);
79 /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
80 /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
81 bool isSrcOp(X86Operand &Op);
83 /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi)
84 /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
85 bool isDstOp(X86Operand &Op);
87 bool is64BitMode() const {
88 // FIXME: Can tablegen auto-generate this?
89 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
92 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit));
93 setAvailableFeatures(FB);
96 /// @name Auto-generated Matcher Functions
99 #define GET_ASSEMBLER_HEADER
100 #include "X86GenAsmMatcher.inc"
105 X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
106 : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
108 // Initialize the set of available features.
109 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
111 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
113 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
115 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
117 virtual bool ParseDirective(AsmToken DirectiveID);
119 bool isParsingIntelSyntax() {
120 return getParser().getAssemblerDialect();
123 } // end anonymous namespace
125 /// @name Auto-generated Match Functions
128 static unsigned MatchRegisterName(StringRef Name);
132 static bool isImmSExti16i8Value(uint64_t Value) {
133 return (( Value <= 0x000000000000007FULL)||
134 (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
135 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
138 static bool isImmSExti32i8Value(uint64_t Value) {
139 return (( Value <= 0x000000000000007FULL)||
140 (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
141 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
144 static bool isImmZExtu32u8Value(uint64_t Value) {
145 return (Value <= 0x00000000000000FFULL);
148 static bool isImmSExti64i8Value(uint64_t Value) {
149 return (( Value <= 0x000000000000007FULL)||
150 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
153 static bool isImmSExti64i32Value(uint64_t Value) {
154 return (( Value <= 0x000000007FFFFFFFULL)||
155 (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
159 /// X86Operand - Instances of this class represent a parsed X86 machine
161 struct X86Operand : public MCParsedAsmOperand {
169 SMLoc StartLoc, EndLoc;
204 X86Operand(KindTy K, SMLoc Start, SMLoc End)
205 : Kind(K), StartLoc(Start), EndLoc(End) {}
207 /// getStartLoc - Get the location of the first token of this operand.
208 SMLoc getStartLoc() const { return StartLoc; }
209 /// getEndLoc - Get the location of the last token of this operand.
210 SMLoc getEndLoc() const { return EndLoc; }
211 /// getLocRange - Get the range between the first and last token of this
213 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
214 /// getOffsetOfLoc - Get the location of the offset operator.
215 SMLoc getOffsetOfLoc() const { return OffsetOfLoc; }
217 virtual void print(raw_ostream &OS) const {}
219 StringRef getToken() const {
220 assert(Kind == Token && "Invalid access!");
221 return StringRef(Tok.Data, Tok.Length);
223 void setTokenValue(StringRef Value) {
224 assert(Kind == Token && "Invalid access!");
225 Tok.Data = Value.data();
226 Tok.Length = Value.size();
229 unsigned getReg() const {
230 assert(Kind == Register && "Invalid access!");
234 const MCExpr *getImm() const {
235 assert(Kind == Immediate && "Invalid access!");
239 bool needAsmRewrite() const {
240 assert(Kind == Immediate && "Invalid access!");
241 return Imm.NeedAsmRewrite;
244 const MCExpr *getMemDisp() const {
245 assert(Kind == Memory && "Invalid access!");
248 unsigned getMemSegReg() const {
249 assert(Kind == Memory && "Invalid access!");
252 unsigned getMemBaseReg() const {
253 assert(Kind == Memory && "Invalid access!");
256 unsigned getMemIndexReg() const {
257 assert(Kind == Memory && "Invalid access!");
260 unsigned getMemScale() const {
261 assert(Kind == Memory && "Invalid access!");
265 bool isToken() const {return Kind == Token; }
267 bool isImm() const { return Kind == Immediate; }
269 bool isImmSExti16i8() const {
273 // If this isn't a constant expr, just assume it fits and let relaxation
275 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
279 // Otherwise, check the value is in a range that makes sense for this
281 return isImmSExti16i8Value(CE->getValue());
283 bool isImmSExti32i8() const {
287 // If this isn't a constant expr, just assume it fits and let relaxation
289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
293 // Otherwise, check the value is in a range that makes sense for this
295 return isImmSExti32i8Value(CE->getValue());
297 bool isImmZExtu32u8() const {
301 // If this isn't a constant expr, just assume it fits and let relaxation
303 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
307 // Otherwise, check the value is in a range that makes sense for this
309 return isImmZExtu32u8Value(CE->getValue());
311 bool isImmSExti64i8() const {
315 // If this isn't a constant expr, just assume it fits and let relaxation
317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
321 // Otherwise, check the value is in a range that makes sense for this
323 return isImmSExti64i8Value(CE->getValue());
325 bool isImmSExti64i32() const {
329 // If this isn't a constant expr, just assume it fits and let relaxation
331 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
335 // Otherwise, check the value is in a range that makes sense for this
337 return isImmSExti64i32Value(CE->getValue());
340 unsigned getMemSize() const {
341 assert(Kind == Memory && "Invalid access!");
345 bool isOffsetOf() const {
346 return OffsetOfLoc.getPointer();
349 bool needAddressOf() const {
353 bool needSizeDirective() const {
354 assert(Kind == Memory && "Invalid access!");
355 return Mem.NeedSizeDir;
358 bool isMem() const { return Kind == Memory; }
359 bool isMem8() const {
360 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
362 bool isMem16() const {
363 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
365 bool isMem32() const {
366 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
368 bool isMem64() const {
369 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
371 bool isMem80() const {
372 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
374 bool isMem128() const {
375 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
377 bool isMem256() const {
378 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
381 bool isMemVX32() const {
382 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
383 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
385 bool isMemVY32() const {
386 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
387 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
389 bool isMemVX64() const {
390 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
391 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
393 bool isMemVY64() const {
394 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
395 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
398 bool isAbsMem() const {
399 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
400 !getMemIndexReg() && getMemScale() == 1;
403 bool isReg() const { return Kind == Register; }
405 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
406 // Add as immediates when possible.
407 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
408 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
410 Inst.addOperand(MCOperand::CreateExpr(Expr));
413 void addRegOperands(MCInst &Inst, unsigned N) const {
414 assert(N == 1 && "Invalid number of operands!");
415 Inst.addOperand(MCOperand::CreateReg(getReg()));
418 void addImmOperands(MCInst &Inst, unsigned N) const {
419 assert(N == 1 && "Invalid number of operands!");
420 addExpr(Inst, getImm());
423 void addMem8Operands(MCInst &Inst, unsigned N) const {
424 addMemOperands(Inst, N);
426 void addMem16Operands(MCInst &Inst, unsigned N) const {
427 addMemOperands(Inst, N);
429 void addMem32Operands(MCInst &Inst, unsigned N) const {
430 addMemOperands(Inst, N);
432 void addMem64Operands(MCInst &Inst, unsigned N) const {
433 addMemOperands(Inst, N);
435 void addMem80Operands(MCInst &Inst, unsigned N) const {
436 addMemOperands(Inst, N);
438 void addMem128Operands(MCInst &Inst, unsigned N) const {
439 addMemOperands(Inst, N);
441 void addMem256Operands(MCInst &Inst, unsigned N) const {
442 addMemOperands(Inst, N);
444 void addMemVX32Operands(MCInst &Inst, unsigned N) const {
445 addMemOperands(Inst, N);
447 void addMemVY32Operands(MCInst &Inst, unsigned N) const {
448 addMemOperands(Inst, N);
450 void addMemVX64Operands(MCInst &Inst, unsigned N) const {
451 addMemOperands(Inst, N);
453 void addMemVY64Operands(MCInst &Inst, unsigned N) const {
454 addMemOperands(Inst, N);
457 void addMemOperands(MCInst &Inst, unsigned N) const {
458 assert((N == 5) && "Invalid number of operands!");
459 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
460 Inst.addOperand(MCOperand::CreateImm(getMemScale()));
461 Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
462 addExpr(Inst, getMemDisp());
463 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
466 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
467 assert((N == 1) && "Invalid number of operands!");
468 // Add as immediates when possible.
469 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
470 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
472 Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
475 static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
476 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
477 X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
478 Res->Tok.Data = Str.data();
479 Res->Tok.Length = Str.size();
483 static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
484 bool AddressOf = false,
485 SMLoc OffsetOfLoc = SMLoc()) {
486 X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
487 Res->Reg.RegNo = RegNo;
488 Res->AddressOf = AddressOf;
489 Res->OffsetOfLoc = OffsetOfLoc;
493 static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc,
494 bool NeedRewrite = true){
495 X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
497 Res->Imm.NeedAsmRewrite = NeedRewrite;
501 /// Create an absolute memory operand.
502 static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
503 unsigned Size = 0, bool NeedSizeDir = false) {
504 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
506 Res->Mem.Disp = Disp;
507 Res->Mem.BaseReg = 0;
508 Res->Mem.IndexReg = 0;
510 Res->Mem.Size = Size;
511 Res->Mem.NeedSizeDir = NeedSizeDir;
512 Res->AddressOf = false;
516 /// Create a generalized memory operand.
517 static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
518 unsigned BaseReg, unsigned IndexReg,
519 unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
520 unsigned Size = 0, bool NeedSizeDir = false) {
521 // We should never just have a displacement, that should be parsed as an
522 // absolute memory operand.
523 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
525 // The scale should always be one of {1,2,4,8}.
526 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
528 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
529 Res->Mem.SegReg = SegReg;
530 Res->Mem.Disp = Disp;
531 Res->Mem.BaseReg = BaseReg;
532 Res->Mem.IndexReg = IndexReg;
533 Res->Mem.Scale = Scale;
534 Res->Mem.Size = Size;
535 Res->Mem.NeedSizeDir = NeedSizeDir;
536 Res->AddressOf = false;
541 } // end anonymous namespace.
543 bool X86AsmParser::isSrcOp(X86Operand &Op) {
544 unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI;
546 return (Op.isMem() &&
547 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) &&
548 isa<MCConstantExpr>(Op.Mem.Disp) &&
549 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
550 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0);
553 bool X86AsmParser::isDstOp(X86Operand &Op) {
554 unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
557 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
558 isa<MCConstantExpr>(Op.Mem.Disp) &&
559 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
560 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0;
563 bool X86AsmParser::ParseRegister(unsigned &RegNo,
564 SMLoc &StartLoc, SMLoc &EndLoc) {
566 const AsmToken &PercentTok = Parser.getTok();
567 StartLoc = PercentTok.getLoc();
569 // If we encounter a %, ignore it. This code handles registers with and
570 // without the prefix, unprefixed registers can occur in cfi directives.
571 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
572 Parser.Lex(); // Eat percent token.
574 const AsmToken &Tok = Parser.getTok();
575 EndLoc = Tok.getEndLoc();
577 if (Tok.isNot(AsmToken::Identifier)) {
578 if (isParsingIntelSyntax()) return true;
579 return Error(StartLoc, "invalid register name",
580 SMRange(StartLoc, EndLoc));
583 RegNo = MatchRegisterName(Tok.getString());
585 // If the match failed, try the register name as lowercase.
587 RegNo = MatchRegisterName(Tok.getString().lower());
589 if (!is64BitMode()) {
590 // FIXME: This should be done using Requires<In32BitMode> and
591 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
593 // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
595 if (RegNo == X86::RIZ ||
596 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
597 X86II::isX86_64NonExtLowByteReg(RegNo) ||
598 X86II::isX86_64ExtendedReg(RegNo))
599 return Error(StartLoc, "register %"
600 + Tok.getString() + " is only available in 64-bit mode",
601 SMRange(StartLoc, EndLoc));
604 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
605 if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
607 Parser.Lex(); // Eat 'st'
609 // Check to see if we have '(4)' after %st.
610 if (getLexer().isNot(AsmToken::LParen))
615 const AsmToken &IntTok = Parser.getTok();
616 if (IntTok.isNot(AsmToken::Integer))
617 return Error(IntTok.getLoc(), "expected stack index");
618 switch (IntTok.getIntVal()) {
619 case 0: RegNo = X86::ST0; break;
620 case 1: RegNo = X86::ST1; break;
621 case 2: RegNo = X86::ST2; break;
622 case 3: RegNo = X86::ST3; break;
623 case 4: RegNo = X86::ST4; break;
624 case 5: RegNo = X86::ST5; break;
625 case 6: RegNo = X86::ST6; break;
626 case 7: RegNo = X86::ST7; break;
627 default: return Error(IntTok.getLoc(), "invalid stack index");
630 if (getParser().Lex().isNot(AsmToken::RParen))
631 return Error(Parser.getTok().getLoc(), "expected ')'");
633 EndLoc = Parser.getTok().getEndLoc();
634 Parser.Lex(); // Eat ')'
638 EndLoc = Parser.getTok().getEndLoc();
640 // If this is "db[0-7]", match it as an alias
642 if (RegNo == 0 && Tok.getString().size() == 3 &&
643 Tok.getString().startswith("db")) {
644 switch (Tok.getString()[2]) {
645 case '0': RegNo = X86::DR0; break;
646 case '1': RegNo = X86::DR1; break;
647 case '2': RegNo = X86::DR2; break;
648 case '3': RegNo = X86::DR3; break;
649 case '4': RegNo = X86::DR4; break;
650 case '5': RegNo = X86::DR5; break;
651 case '6': RegNo = X86::DR6; break;
652 case '7': RegNo = X86::DR7; break;
656 EndLoc = Parser.getTok().getEndLoc();
657 Parser.Lex(); // Eat it.
663 if (isParsingIntelSyntax()) return true;
664 return Error(StartLoc, "invalid register name",
665 SMRange(StartLoc, EndLoc));
668 Parser.Lex(); // Eat identifier token.
672 X86Operand *X86AsmParser::ParseOperand() {
673 if (isParsingIntelSyntax())
674 return ParseIntelOperand();
675 return ParseATTOperand();
678 /// getIntelMemOperandSize - Return intel memory operand size.
679 static unsigned getIntelMemOperandSize(StringRef OpStr) {
680 unsigned Size = StringSwitch<unsigned>(OpStr)
681 .Cases("BYTE", "byte", 8)
682 .Cases("WORD", "word", 16)
683 .Cases("DWORD", "dword", 32)
684 .Cases("QWORD", "qword", 64)
685 .Cases("XWORD", "xword", 80)
686 .Cases("XMMWORD", "xmmword", 128)
687 .Cases("YMMWORD", "ymmword", 256)
692 enum IntelBracExprState {
698 IBES_REGISTER_STAR_INTEGER,
708 class IntelBracExprStateMachine {
709 IntelBracExprState State;
710 unsigned BaseReg, IndexReg, Scale;
719 IntelBracExprStateMachine(MCAsmParser &parser) :
720 State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(0),
721 TmpReg(0), TmpInteger(0), isPlus(true) {}
723 unsigned getBaseReg() { return BaseReg; }
724 unsigned getIndexReg() { return IndexReg; }
725 unsigned getScale() { return Scale; }
726 int64_t getDisp() { return Disp; }
727 bool isValidEndState() { return State == IBES_RBRAC; }
743 // If we already have a BaseReg, then assume this is the IndexReg with a
748 assert (!IndexReg && "BaseReg/IndexReg already set!");
753 case IBES_INDEX_REGISTER:
776 // If we already have a BaseReg, then assume this is the IndexReg with a
781 assert (!IndexReg && "BaseReg/IndexReg already set!");
786 case IBES_INDEX_REGISTER:
792 void onRegister(unsigned Reg) {
798 State = IBES_REGISTER;
801 case IBES_INTEGER_STAR:
802 assert (!IndexReg && "IndexReg already set!");
803 State = IBES_INDEX_REGISTER;
815 State = IBES_DISP_EXPR;
819 void onInteger(int64_t TmpInt) {
825 State = IBES_INTEGER;
829 State = IBES_INTEGER;
832 case IBES_REGISTER_STAR:
833 assert (!IndexReg && "IndexReg already set!");
834 State = IBES_INDEX_REGISTER;
846 State = IBES_INTEGER_STAR;
849 State = IBES_REGISTER_STAR;
881 // If we already have a BaseReg, then assume this is the IndexReg with a
886 assert (!IndexReg && "BaseReg/IndexReg already set!");
891 case IBES_INDEX_REGISTER:
898 X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
900 const AsmToken &Tok = Parser.getTok();
901 SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc();
904 if (getLexer().isNot(AsmToken::LBrac))
905 return ErrorOperand(Start, "Expected '[' token!");
910 // Try to handle '[' 'symbol' ']'
911 if (getLexer().is(AsmToken::Identifier)) {
912 if (ParseRegister(TmpReg, Start, End)) {
914 if (getParser().parseExpression(Disp, End))
917 if (getLexer().isNot(AsmToken::RBrac))
918 return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!");
919 // Adjust the EndLoc due to the ']'.
920 End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1);
922 return X86Operand::CreateMem(Disp, Start, End, Size);
926 // Parse [ BaseReg + Scale*IndexReg + Disp ].
928 IntelBracExprStateMachine SM(Parser);
930 // If we parsed a register, then the end loc has already been set and
931 // the identifier has already been lexed. We also need to update the
934 SM.onRegister(TmpReg);
936 const MCExpr *Disp = 0;
938 bool UpdateLocLex = true;
940 // The period in the dot operator (e.g., [ebx].foo.bar) is parsed as an
941 // identifier. Don't try an parse it as a register.
942 if (Tok.getString().startswith("."))
945 switch (getLexer().getKind()) {
947 if (SM.isValidEndState()) {
951 return ErrorOperand(Tok.getLoc(), "Unexpected token!");
953 case AsmToken::Identifier: {
954 // This could be a register or a displacement expression.
955 if(!ParseRegister(TmpReg, Start, End)) {
956 SM.onRegister(TmpReg);
957 UpdateLocLex = false;
959 } else if (!getParser().parseExpression(Disp, End)) {
961 UpdateLocLex = false;
964 return ErrorOperand(Tok.getLoc(), "Unexpected identifier!");
966 case AsmToken::Integer: {
967 int64_t Val = Tok.getIntVal();
971 case AsmToken::Plus: SM.onPlus(); break;
972 case AsmToken::Minus: SM.onMinus(); break;
973 case AsmToken::Star: SM.onStar(); break;
974 case AsmToken::LBrac: SM.onLBrac(); break;
975 case AsmToken::RBrac: SM.onRBrac(); break;
977 if (!Done && UpdateLocLex) {
979 Parser.Lex(); // Consume the token.
984 Disp = MCConstantExpr::Create(SM.getDisp(), getContext());
986 // Parse the dot operator (e.g., [ebx].foo.bar).
987 if (Tok.getString().startswith(".")) {
989 const MCExpr *NewDisp;
990 if (ParseIntelDotOperator(Disp, &NewDisp, Err))
991 return ErrorOperand(Tok.getLoc(), Err);
993 End = Parser.getTok().getEndLoc();
994 Parser.Lex(); // Eat the field.
998 int BaseReg = SM.getBaseReg();
999 int IndexReg = SM.getIndexReg();
1002 if (!BaseReg && !IndexReg) {
1004 return X86Operand::CreateMem(Disp, Start, End);
1006 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
1009 int Scale = SM.getScale();
1010 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1014 /// ParseIntelMemOperand - Parse intel style memory operand.
1015 X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) {
1016 const AsmToken &Tok = Parser.getTok();
1019 unsigned Size = getIntelMemOperandSize(Tok.getString());
1022 assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") &&
1023 "Unexpected token!");
1027 if (getLexer().is(AsmToken::LBrac))
1028 return ParseIntelBracExpression(SegReg, Size);
1030 if (!ParseRegister(SegReg, Start, End)) {
1031 // Handel SegReg : [ ... ]
1032 if (getLexer().isNot(AsmToken::Colon))
1033 return ErrorOperand(Start, "Expected ':' token!");
1034 Parser.Lex(); // Eat :
1035 if (getLexer().isNot(AsmToken::LBrac))
1036 return ErrorOperand(Start, "Expected '[' token!");
1037 return ParseIntelBracExpression(SegReg, Size);
1040 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1041 if (getParser().parseExpression(Disp, End))
1044 bool NeedSizeDir = false;
1045 bool IsVarDecl = false;
1046 if (isParsingInlineAsm()) {
1047 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) {
1048 const MCSymbol &Sym = SymRef->getSymbol();
1049 // FIXME: The SemaLookup will fail if the name is anything other then an
1051 // FIXME: Pass a valid SMLoc.
1052 unsigned tLength, tSize, tType;
1053 SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength,
1054 tSize, tType, IsVarDecl);
1056 Size = tType * 8; // Size is in terms of bits in this context.
1057 NeedSizeDir = Size > 0;
1061 if (!isParsingInlineAsm())
1062 return X86Operand::CreateMem(Disp, Start, End, Size);
1064 // If this is not a VarDecl then assume it is a FuncDecl or some other label
1065 // reference. We need an 'r' constraint here, so we need to create register
1066 // operand to ensure proper matching. Just pick a GPR based on the size of
1069 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
1070 return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true);
1073 // When parsing inline assembly we set the base register to a non-zero value
1074 // as we don't know the actual value at this time. This is necessary to
1075 // get the matching correct in some cases.
1076 return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0,
1077 /*Scale*/1, Start, End, Size, NeedSizeDir);
1081 /// Parse the '.' operator.
1082 bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
1083 const MCExpr **NewDisp,
1084 SmallString<64> &Err) {
1085 AsmToken Tok = *&Parser.getTok();
1086 uint64_t OrigDispVal, DotDispVal;
1088 // FIXME: Handle non-constant expressions.
1089 if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) {
1090 OrigDispVal = OrigDisp->getValue();
1092 Err = "Non-constant offsets are not supported!";
1097 StringRef DotDispStr = Tok.getString().drop_front(1);
1099 // .Imm gets lexed as a real.
1100 if (Tok.is(AsmToken::Real)) {
1102 DotDispStr.getAsInteger(10, DotDisp);
1103 DotDispVal = DotDisp.getZExtValue();
1104 } else if (Tok.is(AsmToken::Identifier)) {
1105 // We should only see an identifier when parsing the original inline asm.
1106 // The front-end should rewrite this in terms of immediates.
1107 assert (isParsingInlineAsm() && "Unexpected field name!");
1110 std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
1111 if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
1113 Err = "Unable to lookup field reference!";
1116 DotDispVal = DotDisp;
1118 Err = "Unexpected token type!";
1122 if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
1123 SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
1124 unsigned Len = DotDispStr.size();
1125 unsigned Val = OrigDispVal + DotDispVal;
1126 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len,
1130 *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext());
1134 /// Parse the 'offset' operator. This operator is used to specify the
1135 /// location rather then the content of a variable.
1136 X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) {
1137 SMLoc OffsetOfLoc = Start;
1138 Parser.Lex(); // Eat offset.
1139 Start = Parser.getTok().getLoc();
1140 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1144 if (getParser().parseExpression(Val, End))
1145 return ErrorOperand(Start, "Unable to parse expression!");
1147 // Don't emit the offset operator.
1148 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
1150 // The offset operator will have an 'r' constraint, thus we need to create
1151 // register operand to ensure proper matching. Just pick a GPR based on
1152 // the size of a pointer.
1153 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
1154 return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
1158 enum IntelOperatorKind {
1164 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
1165 /// returns the number of elements in an array. It returns the value 1 for
1166 /// non-array variables. The SIZE operator returns the size of a C or C++
1167 /// variable. A variable's size is the product of its LENGTH and TYPE. The
1168 /// TYPE operator returns the size of a C or C++ type or variable. If the
1169 /// variable is an array, TYPE returns the size of a single element.
1170 X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) {
1171 SMLoc TypeLoc = Start;
1172 Parser.Lex(); // Eat offset.
1173 Start = Parser.getTok().getLoc();
1174 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1178 if (getParser().parseExpression(Val, End))
1181 unsigned Length = 0, Size = 0, Type = 0;
1182 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) {
1183 const MCSymbol &Sym = SymRef->getSymbol();
1184 // FIXME: The SemaLookup will fail if the name is anything other then an
1186 // FIXME: Pass a valid SMLoc.
1188 if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length,
1189 Size, Type, IsVarDecl))
1190 return ErrorOperand(Start, "Unable to lookup expr!");
1194 default: llvm_unreachable("Unexpected operand kind!");
1195 case IOK_LENGTH: CVal = Length; break;
1196 case IOK_SIZE: CVal = Size; break;
1197 case IOK_TYPE: CVal = Type; break;
1200 // Rewrite the type operator and the C or C++ type or variable in terms of an
1201 // immediate. E.g. TYPE foo -> $$4
1202 unsigned Len = End.getPointer() - TypeLoc.getPointer();
1203 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal));
1205 const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext());
1206 return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false);
1209 X86Operand *X86AsmParser::ParseIntelOperand() {
1210 SMLoc Start = Parser.getTok().getLoc(), End;
1211 StringRef AsmTokStr = Parser.getTok().getString();
1213 // Offset, length, type and size operators.
1214 if (isParsingInlineAsm()) {
1215 if (AsmTokStr == "offset" || AsmTokStr == "OFFSET")
1216 return ParseIntelOffsetOfOperator(Start);
1217 if (AsmTokStr == "length" || AsmTokStr == "LENGTH")
1218 return ParseIntelOperator(Start, IOK_LENGTH);
1219 if (AsmTokStr == "size" || AsmTokStr == "SIZE")
1220 return ParseIntelOperator(Start, IOK_SIZE);
1221 if (AsmTokStr == "type" || AsmTokStr == "TYPE")
1222 return ParseIntelOperator(Start, IOK_TYPE);
1226 if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) ||
1227 getLexer().is(AsmToken::Minus)) {
1229 if (!getParser().parseExpression(Val, End)) {
1230 return X86Operand::CreateImm(Val, Start, End);
1236 if (!ParseRegister(RegNo, Start, End)) {
1237 // If this is a segment register followed by a ':', then this is the start
1238 // of a memory reference, otherwise this is a normal register reference.
1239 if (getLexer().isNot(AsmToken::Colon))
1240 return X86Operand::CreateReg(RegNo, Start, End);
1242 getParser().Lex(); // Eat the colon.
1243 return ParseIntelMemOperand(RegNo, Start);
1247 return ParseIntelMemOperand(0, Start);
1250 X86Operand *X86AsmParser::ParseATTOperand() {
1251 switch (getLexer().getKind()) {
1253 // Parse a memory operand with no segment register.
1254 return ParseMemOperand(0, Parser.getTok().getLoc());
1255 case AsmToken::Percent: {
1256 // Read the register.
1259 if (ParseRegister(RegNo, Start, End)) return 0;
1260 if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
1261 Error(Start, "%eiz and %riz can only be used as index registers",
1262 SMRange(Start, End));
1266 // If this is a segment register followed by a ':', then this is the start
1267 // of a memory reference, otherwise this is a normal register reference.
1268 if (getLexer().isNot(AsmToken::Colon))
1269 return X86Operand::CreateReg(RegNo, Start, End);
1272 getParser().Lex(); // Eat the colon.
1273 return ParseMemOperand(RegNo, Start);
1275 case AsmToken::Dollar: {
1276 // $42 -> immediate.
1277 SMLoc Start = Parser.getTok().getLoc(), End;
1280 if (getParser().parseExpression(Val, End))
1282 return X86Operand::CreateImm(Val, Start, End);
1287 /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix
1288 /// has already been parsed if present.
1289 X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
1291 // We have to disambiguate a parenthesized expression "(4+5)" from the start
1292 // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
1293 // only way to do this without lookahead is to eat the '(' and see what is
1295 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1296 if (getLexer().isNot(AsmToken::LParen)) {
1298 if (getParser().parseExpression(Disp, ExprEnd)) return 0;
1300 // After parsing the base expression we could either have a parenthesized
1301 // memory address or not. If not, return now. If so, eat the (.
1302 if (getLexer().isNot(AsmToken::LParen)) {
1303 // Unless we have a segment register, treat this as an immediate.
1305 return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
1306 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1312 // Okay, we have a '('. We don't know if this is an expression or not, but
1313 // so we have to eat the ( to see beyond it.
1314 SMLoc LParenLoc = Parser.getTok().getLoc();
1315 Parser.Lex(); // Eat the '('.
1317 if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) {
1318 // Nothing to do here, fall into the code below with the '(' part of the
1319 // memory operand consumed.
1323 // It must be an parenthesized expression, parse it now.
1324 if (getParser().parseParenExpression(Disp, ExprEnd))
1327 // After parsing the base expression we could either have a parenthesized
1328 // memory address or not. If not, return now. If so, eat the (.
1329 if (getLexer().isNot(AsmToken::LParen)) {
1330 // Unless we have a segment register, treat this as an immediate.
1332 return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
1333 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1341 // If we reached here, then we just ate the ( of the memory operand. Process
1342 // the rest of the memory operand.
1343 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
1346 if (getLexer().is(AsmToken::Percent)) {
1347 SMLoc StartLoc, EndLoc;
1348 if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0;
1349 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
1350 Error(StartLoc, "eiz and riz can only be used as index registers",
1351 SMRange(StartLoc, EndLoc));
1356 if (getLexer().is(AsmToken::Comma)) {
1357 Parser.Lex(); // Eat the comma.
1358 IndexLoc = Parser.getTok().getLoc();
1360 // Following the comma we should have either an index register, or a scale
1361 // value. We don't support the later form, but we want to parse it
1364 // Not that even though it would be completely consistent to support syntax
1365 // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
1366 if (getLexer().is(AsmToken::Percent)) {
1368 if (ParseRegister(IndexReg, L, L)) return 0;
1370 if (getLexer().isNot(AsmToken::RParen)) {
1371 // Parse the scale amount:
1372 // ::= ',' [scale-expression]
1373 if (getLexer().isNot(AsmToken::Comma)) {
1374 Error(Parser.getTok().getLoc(),
1375 "expected comma in scale expression");
1378 Parser.Lex(); // Eat the comma.
1380 if (getLexer().isNot(AsmToken::RParen)) {
1381 SMLoc Loc = Parser.getTok().getLoc();
1384 if (getParser().parseAbsoluteExpression(ScaleVal)){
1385 Error(Loc, "expected scale expression");
1389 // Validate the scale amount.
1390 if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
1391 Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
1394 Scale = (unsigned)ScaleVal;
1397 } else if (getLexer().isNot(AsmToken::RParen)) {
1398 // A scale amount without an index is ignored.
1400 SMLoc Loc = Parser.getTok().getLoc();
1403 if (getParser().parseAbsoluteExpression(Value))
1407 Warning(Loc, "scale factor without index register is ignored");
1412 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
1413 if (getLexer().isNot(AsmToken::RParen)) {
1414 Error(Parser.getTok().getLoc(), "unexpected token in memory operand");
1417 SMLoc MemEnd = Parser.getTok().getEndLoc();
1418 Parser.Lex(); // Eat the ')'.
1420 // If we have both a base register and an index register make sure they are
1421 // both 64-bit or 32-bit registers.
1422 // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
1423 if (BaseReg != 0 && IndexReg != 0) {
1424 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1425 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1426 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
1427 IndexReg != X86::RIZ) {
1428 Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
1431 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1432 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1433 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
1434 IndexReg != X86::EIZ){
1435 Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
1440 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1445 ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
1446 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1448 StringRef PatchedName = Name;
1450 // FIXME: Hack to recognize setneb as setne.
1451 if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
1452 PatchedName != "setb" && PatchedName != "setnb")
1453 PatchedName = PatchedName.substr(0, Name.size()-1);
1455 // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
1456 const MCExpr *ExtraImmOp = 0;
1457 if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
1458 (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
1459 PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
1460 bool IsVCMP = PatchedName[0] == 'v';
1461 unsigned SSECCIdx = IsVCMP ? 4 : 3;
1462 unsigned SSEComparisonCode = StringSwitch<unsigned>(
1463 PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
1467 .Case("unord", 0x03)
1472 /* AVX only from here */
1473 .Case("eq_uq", 0x08)
1476 .Case("false", 0x0B)
1477 .Case("neq_oq", 0x0C)
1481 .Case("eq_os", 0x10)
1482 .Case("lt_oq", 0x11)
1483 .Case("le_oq", 0x12)
1484 .Case("unord_s", 0x13)
1485 .Case("neq_us", 0x14)
1486 .Case("nlt_uq", 0x15)
1487 .Case("nle_uq", 0x16)
1488 .Case("ord_s", 0x17)
1489 .Case("eq_us", 0x18)
1490 .Case("nge_uq", 0x19)
1491 .Case("ngt_uq", 0x1A)
1492 .Case("false_os", 0x1B)
1493 .Case("neq_os", 0x1C)
1494 .Case("ge_oq", 0x1D)
1495 .Case("gt_oq", 0x1E)
1496 .Case("true_us", 0x1F)
1498 if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) {
1499 ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
1500 getParser().getContext());
1501 if (PatchedName.endswith("ss")) {
1502 PatchedName = IsVCMP ? "vcmpss" : "cmpss";
1503 } else if (PatchedName.endswith("sd")) {
1504 PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
1505 } else if (PatchedName.endswith("ps")) {
1506 PatchedName = IsVCMP ? "vcmpps" : "cmpps";
1508 assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
1509 PatchedName = IsVCMP ? "vcmppd" : "cmppd";
1514 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
1516 if (ExtraImmOp && !isParsingIntelSyntax())
1517 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1519 // Determine whether this is an instruction prefix.
1521 Name == "lock" || Name == "rep" ||
1522 Name == "repe" || Name == "repz" ||
1523 Name == "repne" || Name == "repnz" ||
1524 Name == "rex64" || Name == "data16";
1527 // This does the actual operand parsing. Don't parse any more if we have a
1528 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
1529 // just want to parse the "lock" as the first instruction and the "incl" as
1531 if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
1533 // Parse '*' modifier.
1534 if (getLexer().is(AsmToken::Star)) {
1535 SMLoc Loc = Parser.getTok().getLoc();
1536 Operands.push_back(X86Operand::CreateToken("*", Loc));
1537 Parser.Lex(); // Eat the star.
1540 // Read the first operand.
1541 if (X86Operand *Op = ParseOperand())
1542 Operands.push_back(Op);
1544 Parser.eatToEndOfStatement();
1548 while (getLexer().is(AsmToken::Comma)) {
1549 Parser.Lex(); // Eat the comma.
1551 // Parse and remember the operand.
1552 if (X86Operand *Op = ParseOperand())
1553 Operands.push_back(Op);
1555 Parser.eatToEndOfStatement();
1560 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1561 SMLoc Loc = getLexer().getLoc();
1562 Parser.eatToEndOfStatement();
1563 return Error(Loc, "unexpected token in argument list");
1567 if (getLexer().is(AsmToken::EndOfStatement))
1568 Parser.Lex(); // Consume the EndOfStatement
1569 else if (isPrefix && getLexer().is(AsmToken::Slash))
1570 Parser.Lex(); // Consume the prefix separator Slash
1572 if (ExtraImmOp && isParsingIntelSyntax())
1573 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1575 // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
1576 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
1577 // documented form in various unofficial manuals, so a lot of code uses it.
1578 if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
1579 Operands.size() == 3) {
1580 X86Operand &Op = *(X86Operand*)Operands.back();
1581 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1582 isa<MCConstantExpr>(Op.Mem.Disp) &&
1583 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1584 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1585 SMLoc Loc = Op.getEndLoc();
1586 Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1590 // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
1591 if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
1592 Operands.size() == 3) {
1593 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1594 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1595 isa<MCConstantExpr>(Op.Mem.Disp) &&
1596 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1597 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1598 SMLoc Loc = Op.getEndLoc();
1599 Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1603 // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]"
1604 if (Name.startswith("ins") && Operands.size() == 3 &&
1605 (Name == "insb" || Name == "insw" || Name == "insl")) {
1606 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1607 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1608 if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) {
1609 Operands.pop_back();
1610 Operands.pop_back();
1616 // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]"
1617 if (Name.startswith("outs") && Operands.size() == 3 &&
1618 (Name == "outsb" || Name == "outsw" || Name == "outsl")) {
1619 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1620 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1621 if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) {
1622 Operands.pop_back();
1623 Operands.pop_back();
1629 // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]"
1630 if (Name.startswith("movs") && Operands.size() == 3 &&
1631 (Name == "movsb" || Name == "movsw" || Name == "movsl" ||
1632 (is64BitMode() && Name == "movsq"))) {
1633 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1634 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1635 if (isSrcOp(Op) && isDstOp(Op2)) {
1636 Operands.pop_back();
1637 Operands.pop_back();
1642 // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]"
1643 if (Name.startswith("lods") && Operands.size() == 3 &&
1644 (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
1645 Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) {
1646 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1647 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1648 if (isSrcOp(*Op1) && Op2->isReg()) {
1650 unsigned reg = Op2->getReg();
1651 bool isLods = Name == "lods";
1652 if (reg == X86::AL && (isLods || Name == "lodsb"))
1654 else if (reg == X86::AX && (isLods || Name == "lodsw"))
1656 else if (reg == X86::EAX && (isLods || Name == "lodsl"))
1658 else if (reg == X86::RAX && (isLods || Name == "lodsq"))
1663 Operands.pop_back();
1664 Operands.pop_back();
1668 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1672 // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]"
1673 if (Name.startswith("stos") && Operands.size() == 3 &&
1674 (Name == "stos" || Name == "stosb" || Name == "stosw" ||
1675 Name == "stosl" || (is64BitMode() && Name == "stosq"))) {
1676 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1677 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1678 if (isDstOp(*Op2) && Op1->isReg()) {
1680 unsigned reg = Op1->getReg();
1681 bool isStos = Name == "stos";
1682 if (reg == X86::AL && (isStos || Name == "stosb"))
1684 else if (reg == X86::AX && (isStos || Name == "stosw"))
1686 else if (reg == X86::EAX && (isStos || Name == "stosl"))
1688 else if (reg == X86::RAX && (isStos || Name == "stosq"))
1693 Operands.pop_back();
1694 Operands.pop_back();
1698 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1703 // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
1705 if ((Name.startswith("shr") || Name.startswith("sar") ||
1706 Name.startswith("shl") || Name.startswith("sal") ||
1707 Name.startswith("rcl") || Name.startswith("rcr") ||
1708 Name.startswith("rol") || Name.startswith("ror")) &&
1709 Operands.size() == 3) {
1710 if (isParsingIntelSyntax()) {
1712 X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
1713 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1714 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1716 Operands.pop_back();
1719 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1720 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1721 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1723 Operands.erase(Operands.begin() + 1);
1728 // Transforms "int $3" into "int3" as a size optimization. We can't write an
1729 // instalias with an immediate operand yet.
1730 if (Name == "int" && Operands.size() == 2) {
1731 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1732 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1733 cast<MCConstantExpr>(Op1->getImm())->getValue() == 3) {
1735 Operands.erase(Operands.begin() + 1);
1736 static_cast<X86Operand*>(Operands[0])->setTokenValue("int3");
1743 static bool convertToSExti8(MCInst &Inst, unsigned Opcode, unsigned Reg,
1746 TmpInst.setOpcode(Opcode);
1748 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1749 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1750 TmpInst.addOperand(Inst.getOperand(0));
1755 static bool convert16i16to16ri8(MCInst &Inst, unsigned Opcode,
1756 bool isCmp = false) {
1757 if (!Inst.getOperand(0).isImm() ||
1758 !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
1761 return convertToSExti8(Inst, Opcode, X86::AX, isCmp);
1764 static bool convert32i32to32ri8(MCInst &Inst, unsigned Opcode,
1765 bool isCmp = false) {
1766 if (!Inst.getOperand(0).isImm() ||
1767 !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
1770 return convertToSExti8(Inst, Opcode, X86::EAX, isCmp);
1773 static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
1774 bool isCmp = false) {
1775 if (!Inst.getOperand(0).isImm() ||
1776 !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
1779 return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
1783 processInstruction(MCInst &Inst,
1784 const SmallVectorImpl<MCParsedAsmOperand*> &Ops) {
1785 switch (Inst.getOpcode()) {
1786 default: return false;
1787 case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8);
1788 case X86::AND32i32: return convert32i32to32ri8(Inst, X86::AND32ri8);
1789 case X86::AND64i32: return convert64i32to64ri8(Inst, X86::AND64ri8);
1790 case X86::XOR16i16: return convert16i16to16ri8(Inst, X86::XOR16ri8);
1791 case X86::XOR32i32: return convert32i32to32ri8(Inst, X86::XOR32ri8);
1792 case X86::XOR64i32: return convert64i32to64ri8(Inst, X86::XOR64ri8);
1793 case X86::OR16i16: return convert16i16to16ri8(Inst, X86::OR16ri8);
1794 case X86::OR32i32: return convert32i32to32ri8(Inst, X86::OR32ri8);
1795 case X86::OR64i32: return convert64i32to64ri8(Inst, X86::OR64ri8);
1796 case X86::CMP16i16: return convert16i16to16ri8(Inst, X86::CMP16ri8, true);
1797 case X86::CMP32i32: return convert32i32to32ri8(Inst, X86::CMP32ri8, true);
1798 case X86::CMP64i32: return convert64i32to64ri8(Inst, X86::CMP64ri8, true);
1799 case X86::ADD16i16: return convert16i16to16ri8(Inst, X86::ADD16ri8);
1800 case X86::ADD32i32: return convert32i32to32ri8(Inst, X86::ADD32ri8);
1801 case X86::ADD64i32: return convert64i32to64ri8(Inst, X86::ADD64ri8);
1802 case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8);
1803 case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8);
1804 case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8);
1805 case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8);
1806 case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8);
1807 case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8);
1808 case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8);
1809 case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8);
1810 case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8);
1814 static const char *getSubtargetFeatureName(unsigned Val);
1816 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1817 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1818 MCStreamer &Out, unsigned &ErrorInfo,
1819 bool MatchingInlineAsm) {
1820 assert(!Operands.empty() && "Unexpect empty operand list!");
1821 X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
1822 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
1823 ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>();
1825 // First, handle aliases that expand to multiple instructions.
1826 // FIXME: This should be replaced with a real .td file alias mechanism.
1827 // Also, MatchInstructionImpl should actually *do* the EmitInstruction
1829 if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
1830 Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
1831 Op->getToken() == "finit" || Op->getToken() == "fsave" ||
1832 Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
1834 Inst.setOpcode(X86::WAIT);
1836 if (!MatchingInlineAsm)
1837 Out.EmitInstruction(Inst);
1840 StringSwitch<const char*>(Op->getToken())
1841 .Case("finit", "fninit")
1842 .Case("fsave", "fnsave")
1843 .Case("fstcw", "fnstcw")
1844 .Case("fstcww", "fnstcw")
1845 .Case("fstenv", "fnstenv")
1846 .Case("fstsw", "fnstsw")
1847 .Case("fstsww", "fnstsw")
1848 .Case("fclex", "fnclex")
1850 assert(Repl && "Unknown wait-prefixed instruction");
1852 Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
1855 bool WasOriginallyInvalidOperand = false;
1858 // First, try a direct match.
1859 switch (MatchInstructionImpl(Operands, Inst,
1860 ErrorInfo, MatchingInlineAsm,
1861 isParsingIntelSyntax())) {
1864 // Some instructions need post-processing to, for example, tweak which
1865 // encoding is selected. Loop on it while changes happen so the
1866 // individual transformations can chain off each other.
1867 if (!MatchingInlineAsm)
1868 while (processInstruction(Inst, Operands))
1872 if (!MatchingInlineAsm)
1873 Out.EmitInstruction(Inst);
1874 Opcode = Inst.getOpcode();
1876 case Match_MissingFeature: {
1877 assert(ErrorInfo && "Unknown missing feature!");
1878 // Special case the error message for the very common case where only
1879 // a single subtarget feature is missing.
1880 std::string Msg = "instruction requires:";
1882 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
1883 if (ErrorInfo & Mask) {
1885 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
1889 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
1891 case Match_InvalidOperand:
1892 WasOriginallyInvalidOperand = true;
1894 case Match_MnemonicFail:
1898 // FIXME: Ideally, we would only attempt suffix matches for things which are
1899 // valid prefixes, and we could just infer the right unambiguous
1900 // type. However, that requires substantially more matcher support than the
1903 // Change the operand to point to a temporary token.
1904 StringRef Base = Op->getToken();
1905 SmallString<16> Tmp;
1908 Op->setTokenValue(Tmp.str());
1910 // If this instruction starts with an 'f', then it is a floating point stack
1911 // instruction. These come in up to three forms for 32-bit, 64-bit, and
1912 // 80-bit floating point, which use the suffixes s,l,t respectively.
1914 // Otherwise, we assume that this may be an integer instruction, which comes
1915 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
1916 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
1918 // Check for the various suffix matches.
1919 Tmp[Base.size()] = Suffixes[0];
1920 unsigned ErrorInfoIgnore;
1921 unsigned ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
1922 unsigned Match1, Match2, Match3, Match4;
1924 Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1925 isParsingIntelSyntax());
1926 // If this returned as a missing feature failure, remember that.
1927 if (Match1 == Match_MissingFeature)
1928 ErrorInfoMissingFeature = ErrorInfoIgnore;
1929 Tmp[Base.size()] = Suffixes[1];
1930 Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1931 isParsingIntelSyntax());
1932 // If this returned as a missing feature failure, remember that.
1933 if (Match2 == Match_MissingFeature)
1934 ErrorInfoMissingFeature = ErrorInfoIgnore;
1935 Tmp[Base.size()] = Suffixes[2];
1936 Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1937 isParsingIntelSyntax());
1938 // If this returned as a missing feature failure, remember that.
1939 if (Match3 == Match_MissingFeature)
1940 ErrorInfoMissingFeature = ErrorInfoIgnore;
1941 Tmp[Base.size()] = Suffixes[3];
1942 Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1943 isParsingIntelSyntax());
1944 // If this returned as a missing feature failure, remember that.
1945 if (Match4 == Match_MissingFeature)
1946 ErrorInfoMissingFeature = ErrorInfoIgnore;
1948 // Restore the old token.
1949 Op->setTokenValue(Base);
1951 // If exactly one matched, then we treat that as a successful match (and the
1952 // instruction will already have been filled in correctly, since the failing
1953 // matches won't have modified it).
1954 unsigned NumSuccessfulMatches =
1955 (Match1 == Match_Success) + (Match2 == Match_Success) +
1956 (Match3 == Match_Success) + (Match4 == Match_Success);
1957 if (NumSuccessfulMatches == 1) {
1959 if (!MatchingInlineAsm)
1960 Out.EmitInstruction(Inst);
1961 Opcode = Inst.getOpcode();
1965 // Otherwise, the match failed, try to produce a decent error message.
1967 // If we had multiple suffix matches, then identify this as an ambiguous
1969 if (NumSuccessfulMatches > 1) {
1971 unsigned NumMatches = 0;
1972 if (Match1 == Match_Success) MatchChars[NumMatches++] = Suffixes[0];
1973 if (Match2 == Match_Success) MatchChars[NumMatches++] = Suffixes[1];
1974 if (Match3 == Match_Success) MatchChars[NumMatches++] = Suffixes[2];
1975 if (Match4 == Match_Success) MatchChars[NumMatches++] = Suffixes[3];
1977 SmallString<126> Msg;
1978 raw_svector_ostream OS(Msg);
1979 OS << "ambiguous instructions require an explicit suffix (could be ";
1980 for (unsigned i = 0; i != NumMatches; ++i) {
1983 if (i + 1 == NumMatches)
1985 OS << "'" << Base << MatchChars[i] << "'";
1988 Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm);
1992 // Okay, we know that none of the variants matched successfully.
1994 // If all of the instructions reported an invalid mnemonic, then the original
1995 // mnemonic was invalid.
1996 if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
1997 (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
1998 if (!WasOriginallyInvalidOperand) {
1999 ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges :
2001 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
2002 Ranges, MatchingInlineAsm);
2005 // Recover location info for the operand if we know which was the problem.
2006 if (ErrorInfo != ~0U) {
2007 if (ErrorInfo >= Operands.size())
2008 return Error(IDLoc, "too few operands for instruction",
2009 EmptyRanges, MatchingInlineAsm);
2011 X86Operand *Operand = (X86Operand*)Operands[ErrorInfo];
2012 if (Operand->getStartLoc().isValid()) {
2013 SMRange OperandRange = Operand->getLocRange();
2014 return Error(Operand->getStartLoc(), "invalid operand for instruction",
2015 OperandRange, MatchingInlineAsm);
2019 return Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2023 // If one instruction matched with a missing feature, report this as a
2025 if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) +
2026 (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){
2027 std::string Msg = "instruction requires:";
2029 for (unsigned i = 0; i < (sizeof(ErrorInfoMissingFeature)*8-1); ++i) {
2030 if (ErrorInfoMissingFeature & Mask) {
2032 Msg += getSubtargetFeatureName(ErrorInfoMissingFeature & Mask);
2036 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
2039 // If one instruction matched with an invalid operand, report this as an
2041 if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) +
2042 (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){
2043 Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2048 // If all of these were an outright failure, report it in a useless way.
2049 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
2050 EmptyRanges, MatchingInlineAsm);
2055 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
2056 StringRef IDVal = DirectiveID.getIdentifier();
2057 if (IDVal == ".word")
2058 return ParseDirectiveWord(2, DirectiveID.getLoc());
2059 else if (IDVal.startswith(".code"))
2060 return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
2061 else if (IDVal.startswith(".att_syntax")) {
2062 getParser().setAssemblerDialect(0);
2064 } else if (IDVal.startswith(".intel_syntax")) {
2065 getParser().setAssemblerDialect(1);
2066 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2067 if(Parser.getTok().getString() == "noprefix") {
2068 // FIXME : Handle noprefix
2078 /// ParseDirectiveWord
2079 /// ::= .word [ expression (, expression)* ]
2080 bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2081 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2083 const MCExpr *Value;
2084 if (getParser().parseExpression(Value))
2087 getParser().getStreamer().EmitValue(Value, Size);
2089 if (getLexer().is(AsmToken::EndOfStatement))
2092 // FIXME: Improve diagnostic.
2093 if (getLexer().isNot(AsmToken::Comma))
2094 return Error(L, "unexpected token in directive");
2103 /// ParseDirectiveCode
2104 /// ::= .code32 | .code64
2105 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
2106 if (IDVal == ".code32") {
2108 if (is64BitMode()) {
2110 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
2112 } else if (IDVal == ".code64") {
2114 if (!is64BitMode()) {
2116 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
2119 return Error(L, "unexpected directive " + IDVal);
2125 // Force static initialization.
2126 extern "C" void LLVMInitializeX86AsmParser() {
2127 RegisterMCAsmParser<X86AsmParser> X(TheX86_32Target);
2128 RegisterMCAsmParser<X86AsmParser> Y(TheX86_64Target);
2131 #define GET_REGISTER_MATCHER
2132 #define GET_MATCHER_IMPLEMENTATION
2133 #define GET_SUBTARGET_FEATURE_NAME
2134 #include "X86GenAsmMatcher.inc"