1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "llvm/ADT/APFloat.h"
12 #include "llvm/ADT/SmallString.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/MC/MCParser/MCAsmLexer.h"
19 #include "llvm/MC/MCParser/MCAsmParser.h"
20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCStreamer.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCSymbol.h"
25 #include "llvm/MC/MCTargetAsmParser.h"
26 #include "llvm/Support/SourceMgr.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
35 class X86AsmParser : public MCTargetAsmParser {
38 ParseInstructionInfo *InstInfo;
40 MCAsmParser &getParser() const { return Parser; }
42 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
44 bool Error(SMLoc L, const Twine &Msg,
45 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(),
46 bool MatchingInlineAsm = false) {
47 if (MatchingInlineAsm) return true;
48 return Parser.Error(L, Msg, Ranges);
51 X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) {
56 X86Operand *ParseOperand();
57 X86Operand *ParseATTOperand();
58 X86Operand *ParseIntelOperand();
59 X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc);
60 X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind);
61 X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc);
62 X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size);
63 X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
65 X86Operand *CreateMemForInlineAsm(const MCExpr *Disp, SMLoc Start, SMLoc End,
66 SMLoc SizeDirLoc, unsigned Size);
68 bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp,
69 SmallString<64> &Err);
71 bool ParseDirectiveWord(unsigned Size, SMLoc L);
72 bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
74 bool processInstruction(MCInst &Inst,
75 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
77 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
78 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
79 MCStreamer &Out, unsigned &ErrorInfo,
80 bool MatchingInlineAsm);
82 /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
83 /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
84 bool isSrcOp(X86Operand &Op);
86 /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi)
87 /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
88 bool isDstOp(X86Operand &Op);
90 bool is64BitMode() const {
91 // FIXME: Can tablegen auto-generate this?
92 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
95 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit));
96 setAvailableFeatures(FB);
99 /// @name Auto-generated Matcher Functions
102 #define GET_ASSEMBLER_HEADER
103 #include "X86GenAsmMatcher.inc"
108 X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
109 : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
111 // Initialize the set of available features.
112 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
114 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
116 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
118 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
120 virtual bool ParseDirective(AsmToken DirectiveID);
122 bool isParsingIntelSyntax() {
123 return getParser().getAssemblerDialect();
126 } // end anonymous namespace
128 /// @name Auto-generated Match Functions
131 static unsigned MatchRegisterName(StringRef Name);
135 static bool isImmSExti16i8Value(uint64_t Value) {
136 return (( Value <= 0x000000000000007FULL)||
137 (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
138 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
141 static bool isImmSExti32i8Value(uint64_t Value) {
142 return (( Value <= 0x000000000000007FULL)||
143 (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
144 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
147 static bool isImmZExtu32u8Value(uint64_t Value) {
148 return (Value <= 0x00000000000000FFULL);
151 static bool isImmSExti64i8Value(uint64_t Value) {
152 return (( Value <= 0x000000000000007FULL)||
153 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
156 static bool isImmSExti64i32Value(uint64_t Value) {
157 return (( Value <= 0x000000007FFFFFFFULL)||
158 (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
162 /// X86Operand - Instances of this class represent a parsed X86 machine
164 struct X86Operand : public MCParsedAsmOperand {
172 SMLoc StartLoc, EndLoc;
206 X86Operand(KindTy K, SMLoc Start, SMLoc End)
207 : Kind(K), StartLoc(Start), EndLoc(End) {}
209 /// getStartLoc - Get the location of the first token of this operand.
210 SMLoc getStartLoc() const { return StartLoc; }
211 /// getEndLoc - Get the location of the last token of this operand.
212 SMLoc getEndLoc() const { return EndLoc; }
213 /// getLocRange - Get the range between the first and last token of this
215 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
216 /// getOffsetOfLoc - Get the location of the offset operator.
217 SMLoc getOffsetOfLoc() const { return OffsetOfLoc; }
219 virtual void print(raw_ostream &OS) const {}
221 StringRef getToken() const {
222 assert(Kind == Token && "Invalid access!");
223 return StringRef(Tok.Data, Tok.Length);
225 void setTokenValue(StringRef Value) {
226 assert(Kind == Token && "Invalid access!");
227 Tok.Data = Value.data();
228 Tok.Length = Value.size();
231 unsigned getReg() const {
232 assert(Kind == Register && "Invalid access!");
236 const MCExpr *getImm() const {
237 assert(Kind == Immediate && "Invalid access!");
241 bool needAsmRewrite() const {
242 assert(Kind == Immediate && "Invalid access!");
243 return Imm.NeedAsmRewrite;
246 const MCExpr *getMemDisp() const {
247 assert(Kind == Memory && "Invalid access!");
250 unsigned getMemSegReg() const {
251 assert(Kind == Memory && "Invalid access!");
254 unsigned getMemBaseReg() const {
255 assert(Kind == Memory && "Invalid access!");
258 unsigned getMemIndexReg() const {
259 assert(Kind == Memory && "Invalid access!");
262 unsigned getMemScale() const {
263 assert(Kind == Memory && "Invalid access!");
267 bool isToken() const {return Kind == Token; }
269 bool isImm() const { return Kind == Immediate; }
271 bool isImmSExti16i8() const {
275 // If this isn't a constant expr, just assume it fits and let relaxation
277 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
281 // Otherwise, check the value is in a range that makes sense for this
283 return isImmSExti16i8Value(CE->getValue());
285 bool isImmSExti32i8() const {
289 // If this isn't a constant expr, just assume it fits and let relaxation
291 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
295 // Otherwise, check the value is in a range that makes sense for this
297 return isImmSExti32i8Value(CE->getValue());
299 bool isImmZExtu32u8() const {
303 // If this isn't a constant expr, just assume it fits and let relaxation
305 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
309 // Otherwise, check the value is in a range that makes sense for this
311 return isImmZExtu32u8Value(CE->getValue());
313 bool isImmSExti64i8() const {
317 // If this isn't a constant expr, just assume it fits and let relaxation
319 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
323 // Otherwise, check the value is in a range that makes sense for this
325 return isImmSExti64i8Value(CE->getValue());
327 bool isImmSExti64i32() const {
331 // If this isn't a constant expr, just assume it fits and let relaxation
333 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
337 // Otherwise, check the value is in a range that makes sense for this
339 return isImmSExti64i32Value(CE->getValue());
342 bool isOffsetOf() const {
343 return OffsetOfLoc.getPointer();
346 bool needAddressOf() const {
350 bool isMem() const { return Kind == Memory; }
351 bool isMem8() const {
352 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
354 bool isMem16() const {
355 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
357 bool isMem32() const {
358 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
360 bool isMem64() const {
361 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
363 bool isMem80() const {
364 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
366 bool isMem128() const {
367 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
369 bool isMem256() const {
370 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
373 bool isMemVX32() const {
374 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
375 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
377 bool isMemVY32() const {
378 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
379 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
381 bool isMemVX64() const {
382 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
383 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
385 bool isMemVY64() const {
386 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
387 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
390 bool isAbsMem() const {
391 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
392 !getMemIndexReg() && getMemScale() == 1;
395 bool isReg() const { return Kind == Register; }
397 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
398 // Add as immediates when possible.
399 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
400 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
402 Inst.addOperand(MCOperand::CreateExpr(Expr));
405 void addRegOperands(MCInst &Inst, unsigned N) const {
406 assert(N == 1 && "Invalid number of operands!");
407 Inst.addOperand(MCOperand::CreateReg(getReg()));
410 void addImmOperands(MCInst &Inst, unsigned N) const {
411 assert(N == 1 && "Invalid number of operands!");
412 addExpr(Inst, getImm());
415 void addMem8Operands(MCInst &Inst, unsigned N) const {
416 addMemOperands(Inst, N);
418 void addMem16Operands(MCInst &Inst, unsigned N) const {
419 addMemOperands(Inst, N);
421 void addMem32Operands(MCInst &Inst, unsigned N) const {
422 addMemOperands(Inst, N);
424 void addMem64Operands(MCInst &Inst, unsigned N) const {
425 addMemOperands(Inst, N);
427 void addMem80Operands(MCInst &Inst, unsigned N) const {
428 addMemOperands(Inst, N);
430 void addMem128Operands(MCInst &Inst, unsigned N) const {
431 addMemOperands(Inst, N);
433 void addMem256Operands(MCInst &Inst, unsigned N) const {
434 addMemOperands(Inst, N);
436 void addMemVX32Operands(MCInst &Inst, unsigned N) const {
437 addMemOperands(Inst, N);
439 void addMemVY32Operands(MCInst &Inst, unsigned N) const {
440 addMemOperands(Inst, N);
442 void addMemVX64Operands(MCInst &Inst, unsigned N) const {
443 addMemOperands(Inst, N);
445 void addMemVY64Operands(MCInst &Inst, unsigned N) const {
446 addMemOperands(Inst, N);
449 void addMemOperands(MCInst &Inst, unsigned N) const {
450 assert((N == 5) && "Invalid number of operands!");
451 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
452 Inst.addOperand(MCOperand::CreateImm(getMemScale()));
453 Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
454 addExpr(Inst, getMemDisp());
455 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
458 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
459 assert((N == 1) && "Invalid number of operands!");
460 // Add as immediates when possible.
461 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
462 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
464 Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
467 static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
468 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
469 X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
470 Res->Tok.Data = Str.data();
471 Res->Tok.Length = Str.size();
475 static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
476 bool AddressOf = false,
477 SMLoc OffsetOfLoc = SMLoc()) {
478 X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
479 Res->Reg.RegNo = RegNo;
480 Res->AddressOf = AddressOf;
481 Res->OffsetOfLoc = OffsetOfLoc;
485 static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc,
486 bool NeedRewrite = true){
487 X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
489 Res->Imm.NeedAsmRewrite = NeedRewrite;
493 /// Create an absolute memory operand.
494 static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
496 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
498 Res->Mem.Disp = Disp;
499 Res->Mem.BaseReg = 0;
500 Res->Mem.IndexReg = 0;
502 Res->Mem.Size = Size;
503 Res->AddressOf = false;
507 /// Create a generalized memory operand.
508 static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
509 unsigned BaseReg, unsigned IndexReg,
510 unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
512 // We should never just have a displacement, that should be parsed as an
513 // absolute memory operand.
514 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
516 // The scale should always be one of {1,2,4,8}.
517 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
519 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
520 Res->Mem.SegReg = SegReg;
521 Res->Mem.Disp = Disp;
522 Res->Mem.BaseReg = BaseReg;
523 Res->Mem.IndexReg = IndexReg;
524 Res->Mem.Scale = Scale;
525 Res->Mem.Size = Size;
526 Res->AddressOf = false;
531 } // end anonymous namespace.
533 bool X86AsmParser::isSrcOp(X86Operand &Op) {
534 unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI;
536 return (Op.isMem() &&
537 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) &&
538 isa<MCConstantExpr>(Op.Mem.Disp) &&
539 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
540 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0);
543 bool X86AsmParser::isDstOp(X86Operand &Op) {
544 unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
547 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
548 isa<MCConstantExpr>(Op.Mem.Disp) &&
549 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
550 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0;
553 bool X86AsmParser::ParseRegister(unsigned &RegNo,
554 SMLoc &StartLoc, SMLoc &EndLoc) {
556 const AsmToken &PercentTok = Parser.getTok();
557 StartLoc = PercentTok.getLoc();
559 // If we encounter a %, ignore it. This code handles registers with and
560 // without the prefix, unprefixed registers can occur in cfi directives.
561 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
562 Parser.Lex(); // Eat percent token.
564 const AsmToken &Tok = Parser.getTok();
565 EndLoc = Tok.getEndLoc();
567 if (Tok.isNot(AsmToken::Identifier)) {
568 if (isParsingIntelSyntax()) return true;
569 return Error(StartLoc, "invalid register name",
570 SMRange(StartLoc, EndLoc));
573 RegNo = MatchRegisterName(Tok.getString());
575 // If the match failed, try the register name as lowercase.
577 RegNo = MatchRegisterName(Tok.getString().lower());
579 if (!is64BitMode()) {
580 // FIXME: This should be done using Requires<In32BitMode> and
581 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
583 // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
585 if (RegNo == X86::RIZ ||
586 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
587 X86II::isX86_64NonExtLowByteReg(RegNo) ||
588 X86II::isX86_64ExtendedReg(RegNo))
589 return Error(StartLoc, "register %"
590 + Tok.getString() + " is only available in 64-bit mode",
591 SMRange(StartLoc, EndLoc));
594 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
595 if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
597 Parser.Lex(); // Eat 'st'
599 // Check to see if we have '(4)' after %st.
600 if (getLexer().isNot(AsmToken::LParen))
605 const AsmToken &IntTok = Parser.getTok();
606 if (IntTok.isNot(AsmToken::Integer))
607 return Error(IntTok.getLoc(), "expected stack index");
608 switch (IntTok.getIntVal()) {
609 case 0: RegNo = X86::ST0; break;
610 case 1: RegNo = X86::ST1; break;
611 case 2: RegNo = X86::ST2; break;
612 case 3: RegNo = X86::ST3; break;
613 case 4: RegNo = X86::ST4; break;
614 case 5: RegNo = X86::ST5; break;
615 case 6: RegNo = X86::ST6; break;
616 case 7: RegNo = X86::ST7; break;
617 default: return Error(IntTok.getLoc(), "invalid stack index");
620 if (getParser().Lex().isNot(AsmToken::RParen))
621 return Error(Parser.getTok().getLoc(), "expected ')'");
623 EndLoc = Parser.getTok().getEndLoc();
624 Parser.Lex(); // Eat ')'
628 EndLoc = Parser.getTok().getEndLoc();
630 // If this is "db[0-7]", match it as an alias
632 if (RegNo == 0 && Tok.getString().size() == 3 &&
633 Tok.getString().startswith("db")) {
634 switch (Tok.getString()[2]) {
635 case '0': RegNo = X86::DR0; break;
636 case '1': RegNo = X86::DR1; break;
637 case '2': RegNo = X86::DR2; break;
638 case '3': RegNo = X86::DR3; break;
639 case '4': RegNo = X86::DR4; break;
640 case '5': RegNo = X86::DR5; break;
641 case '6': RegNo = X86::DR6; break;
642 case '7': RegNo = X86::DR7; break;
646 EndLoc = Parser.getTok().getEndLoc();
647 Parser.Lex(); // Eat it.
653 if (isParsingIntelSyntax()) return true;
654 return Error(StartLoc, "invalid register name",
655 SMRange(StartLoc, EndLoc));
658 Parser.Lex(); // Eat identifier token.
662 X86Operand *X86AsmParser::ParseOperand() {
663 if (isParsingIntelSyntax())
664 return ParseIntelOperand();
665 return ParseATTOperand();
668 /// getIntelMemOperandSize - Return intel memory operand size.
669 static unsigned getIntelMemOperandSize(StringRef OpStr) {
670 unsigned Size = StringSwitch<unsigned>(OpStr)
671 .Cases("BYTE", "byte", 8)
672 .Cases("WORD", "word", 16)
673 .Cases("DWORD", "dword", 32)
674 .Cases("QWORD", "qword", 64)
675 .Cases("XWORD", "xword", 80)
676 .Cases("XMMWORD", "xmmword", 128)
677 .Cases("YMMWORD", "ymmword", 256)
682 enum IntelBracExprState {
688 IBES_REGISTER_STAR_INTEGER,
698 class IntelBracExprStateMachine {
699 IntelBracExprState State;
700 unsigned BaseReg, IndexReg, Scale;
709 IntelBracExprStateMachine(MCAsmParser &parser) :
710 State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(0),
711 TmpReg(0), TmpInteger(0), isPlus(true) {}
713 unsigned getBaseReg() { return BaseReg; }
714 unsigned getIndexReg() { return IndexReg; }
715 unsigned getScale() { return Scale; }
716 int64_t getDisp() { return Disp; }
717 bool isValidEndState() { return State == IBES_RBRAC; }
733 // If we already have a BaseReg, then assume this is the IndexReg with a
738 assert (!IndexReg && "BaseReg/IndexReg already set!");
743 case IBES_INDEX_REGISTER:
766 // If we already have a BaseReg, then assume this is the IndexReg with a
771 assert (!IndexReg && "BaseReg/IndexReg already set!");
776 case IBES_INDEX_REGISTER:
782 void onRegister(unsigned Reg) {
788 State = IBES_REGISTER;
791 case IBES_INTEGER_STAR:
792 assert (!IndexReg && "IndexReg already set!");
793 State = IBES_INDEX_REGISTER;
805 State = IBES_DISP_EXPR;
809 void onInteger(int64_t TmpInt) {
815 State = IBES_INTEGER;
819 State = IBES_INTEGER;
822 case IBES_REGISTER_STAR:
823 assert (!IndexReg && "IndexReg already set!");
824 State = IBES_INDEX_REGISTER;
836 State = IBES_INTEGER_STAR;
839 State = IBES_REGISTER_STAR;
871 // If we already have a BaseReg, then assume this is the IndexReg with a
876 assert (!IndexReg && "BaseReg/IndexReg already set!");
881 case IBES_INDEX_REGISTER:
888 X86Operand *X86AsmParser::CreateMemForInlineAsm(const MCExpr *Disp, SMLoc Start,
889 SMLoc End, SMLoc SizeDirLoc,
891 bool NeedSizeDir = false;
892 bool IsVarDecl = false;
893 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) {
894 const MCSymbol &Sym = SymRef->getSymbol();
895 // FIXME: The SemaLookup will fail if the name is anything other then an
897 // FIXME: Pass a valid SMLoc.
898 unsigned tLength, tSize, tType;
899 SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength,
900 tSize, tType, IsVarDecl);
902 Size = tType * 8; // Size is in terms of bits in this context.
903 NeedSizeDir = Size > 0;
907 // If this is not a VarDecl then assume it is a FuncDecl or some other label
908 // reference. We need an 'r' constraint here, so we need to create register
909 // operand to ensure proper matching. Just pick a GPR based on the size of
912 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
913 return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true);
917 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, SizeDirLoc,
920 // When parsing inline assembly we set the base register to a non-zero value
921 // as we don't know the actual value at this time. This is necessary to
922 // get the matching correct in some cases.
923 return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0,
924 /*Scale*/1, Start, End, Size);
927 X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
929 const AsmToken &Tok = Parser.getTok();
930 SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc();
933 if (getLexer().isNot(AsmToken::LBrac))
934 return ErrorOperand(Start, "Expected '[' token!");
939 // Try to handle '[' 'symbol' ']'
940 if (getLexer().is(AsmToken::Identifier)) {
941 if (ParseRegister(TmpReg, Start, End)) {
943 if (getParser().parseExpression(Disp, End))
946 if (getLexer().isNot(AsmToken::RBrac))
947 return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!");
948 // Adjust the EndLoc due to the ']'.
949 End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1);
951 if (!isParsingInlineAsm())
952 return X86Operand::CreateMem(Disp, Start, End, Size);
954 // We want the size directive before the '['.
955 SMLoc SizeDirLoc = SMLoc::getFromPointer(Start.getPointer()-1);
956 return CreateMemForInlineAsm(Disp, Start, End, SizeDirLoc, Size);
960 // Parse [ BaseReg + Scale*IndexReg + Disp ].
962 IntelBracExprStateMachine SM(Parser);
964 // If we parsed a register, then the end loc has already been set and
965 // the identifier has already been lexed. We also need to update the
968 SM.onRegister(TmpReg);
970 const MCExpr *Disp = 0;
972 bool UpdateLocLex = true;
974 // The period in the dot operator (e.g., [ebx].foo.bar) is parsed as an
975 // identifier. Don't try an parse it as a register.
976 if (Tok.getString().startswith("."))
979 switch (getLexer().getKind()) {
981 if (SM.isValidEndState()) {
985 return ErrorOperand(Tok.getLoc(), "Unexpected token!");
987 case AsmToken::Identifier: {
988 // This could be a register or a displacement expression.
989 if(!ParseRegister(TmpReg, Start, End)) {
990 SM.onRegister(TmpReg);
991 UpdateLocLex = false;
993 } else if (!getParser().parseExpression(Disp, End)) {
995 UpdateLocLex = false;
998 return ErrorOperand(Tok.getLoc(), "Unexpected identifier!");
1000 case AsmToken::Integer: {
1001 int64_t Val = Tok.getIntVal();
1005 case AsmToken::Plus: SM.onPlus(); break;
1006 case AsmToken::Minus: SM.onMinus(); break;
1007 case AsmToken::Star: SM.onStar(); break;
1008 case AsmToken::LBrac: SM.onLBrac(); break;
1009 case AsmToken::RBrac: SM.onRBrac(); break;
1011 if (!Done && UpdateLocLex) {
1013 Parser.Lex(); // Consume the token.
1018 Disp = MCConstantExpr::Create(SM.getDisp(), getContext());
1020 // Parse the dot operator (e.g., [ebx].foo.bar).
1021 if (Tok.getString().startswith(".")) {
1022 SmallString<64> Err;
1023 const MCExpr *NewDisp;
1024 if (ParseIntelDotOperator(Disp, &NewDisp, Err))
1025 return ErrorOperand(Tok.getLoc(), Err);
1027 End = Parser.getTok().getEndLoc();
1028 Parser.Lex(); // Eat the field.
1032 int BaseReg = SM.getBaseReg();
1033 int IndexReg = SM.getIndexReg();
1036 if (!BaseReg && !IndexReg) {
1038 return X86Operand::CreateMem(Disp, Start, End);
1040 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
1043 int Scale = SM.getScale();
1044 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1048 /// ParseIntelMemOperand - Parse intel style memory operand.
1049 X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) {
1050 const AsmToken &Tok = Parser.getTok();
1053 unsigned Size = getIntelMemOperandSize(Tok.getString());
1056 assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") &&
1057 "Unexpected token!");
1061 if (getLexer().is(AsmToken::LBrac))
1062 return ParseIntelBracExpression(SegReg, Size);
1064 if (!ParseRegister(SegReg, Start, End)) {
1065 // Handel SegReg : [ ... ]
1066 if (getLexer().isNot(AsmToken::Colon))
1067 return ErrorOperand(Start, "Expected ':' token!");
1068 Parser.Lex(); // Eat :
1069 if (getLexer().isNot(AsmToken::LBrac))
1070 return ErrorOperand(Start, "Expected '[' token!");
1071 return ParseIntelBracExpression(SegReg, Size);
1074 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1075 if (getParser().parseExpression(Disp, End))
1078 if (!isParsingInlineAsm())
1079 return X86Operand::CreateMem(Disp, Start, End, Size);
1080 return CreateMemForInlineAsm(Disp, Start, End, Start, Size);
1083 /// Parse the '.' operator.
1084 bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
1085 const MCExpr **NewDisp,
1086 SmallString<64> &Err) {
1087 AsmToken Tok = *&Parser.getTok();
1088 uint64_t OrigDispVal, DotDispVal;
1090 // FIXME: Handle non-constant expressions.
1091 if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) {
1092 OrigDispVal = OrigDisp->getValue();
1094 Err = "Non-constant offsets are not supported!";
1099 StringRef DotDispStr = Tok.getString().drop_front(1);
1101 // .Imm gets lexed as a real.
1102 if (Tok.is(AsmToken::Real)) {
1104 DotDispStr.getAsInteger(10, DotDisp);
1105 DotDispVal = DotDisp.getZExtValue();
1106 } else if (Tok.is(AsmToken::Identifier)) {
1107 // We should only see an identifier when parsing the original inline asm.
1108 // The front-end should rewrite this in terms of immediates.
1109 assert (isParsingInlineAsm() && "Unexpected field name!");
1112 std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
1113 if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
1115 Err = "Unable to lookup field reference!";
1118 DotDispVal = DotDisp;
1120 Err = "Unexpected token type!";
1124 if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
1125 SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
1126 unsigned Len = DotDispStr.size();
1127 unsigned Val = OrigDispVal + DotDispVal;
1128 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len,
1132 *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext());
1136 /// Parse the 'offset' operator. This operator is used to specify the
1137 /// location rather then the content of a variable.
1138 X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) {
1139 SMLoc OffsetOfLoc = Start;
1140 Parser.Lex(); // Eat offset.
1141 Start = Parser.getTok().getLoc();
1142 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1146 if (getParser().parseExpression(Val, End))
1147 return ErrorOperand(Start, "Unable to parse expression!");
1149 // Don't emit the offset operator.
1150 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
1152 // The offset operator will have an 'r' constraint, thus we need to create
1153 // register operand to ensure proper matching. Just pick a GPR based on
1154 // the size of a pointer.
1155 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
1156 return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
1160 enum IntelOperatorKind {
1166 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
1167 /// returns the number of elements in an array. It returns the value 1 for
1168 /// non-array variables. The SIZE operator returns the size of a C or C++
1169 /// variable. A variable's size is the product of its LENGTH and TYPE. The
1170 /// TYPE operator returns the size of a C or C++ type or variable. If the
1171 /// variable is an array, TYPE returns the size of a single element.
1172 X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) {
1173 SMLoc TypeLoc = Start;
1174 Parser.Lex(); // Eat offset.
1175 Start = Parser.getTok().getLoc();
1176 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1180 if (getParser().parseExpression(Val, End))
1183 unsigned Length = 0, Size = 0, Type = 0;
1184 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) {
1185 const MCSymbol &Sym = SymRef->getSymbol();
1186 // FIXME: The SemaLookup will fail if the name is anything other then an
1188 // FIXME: Pass a valid SMLoc.
1190 if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length,
1191 Size, Type, IsVarDecl))
1192 return ErrorOperand(Start, "Unable to lookup expr!");
1196 default: llvm_unreachable("Unexpected operand kind!");
1197 case IOK_LENGTH: CVal = Length; break;
1198 case IOK_SIZE: CVal = Size; break;
1199 case IOK_TYPE: CVal = Type; break;
1202 // Rewrite the type operator and the C or C++ type or variable in terms of an
1203 // immediate. E.g. TYPE foo -> $$4
1204 unsigned Len = End.getPointer() - TypeLoc.getPointer();
1205 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal));
1207 const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext());
1208 return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false);
1211 X86Operand *X86AsmParser::ParseIntelOperand() {
1212 SMLoc Start = Parser.getTok().getLoc(), End;
1213 StringRef AsmTokStr = Parser.getTok().getString();
1215 // Offset, length, type and size operators.
1216 if (isParsingInlineAsm()) {
1217 if (AsmTokStr == "offset" || AsmTokStr == "OFFSET")
1218 return ParseIntelOffsetOfOperator(Start);
1219 if (AsmTokStr == "length" || AsmTokStr == "LENGTH")
1220 return ParseIntelOperator(Start, IOK_LENGTH);
1221 if (AsmTokStr == "size" || AsmTokStr == "SIZE")
1222 return ParseIntelOperator(Start, IOK_SIZE);
1223 if (AsmTokStr == "type" || AsmTokStr == "TYPE")
1224 return ParseIntelOperator(Start, IOK_TYPE);
1228 if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) ||
1229 getLexer().is(AsmToken::Minus)) {
1231 if (!getParser().parseExpression(Val, End)) {
1232 return X86Operand::CreateImm(Val, Start, End);
1238 if (!ParseRegister(RegNo, Start, End)) {
1239 // If this is a segment register followed by a ':', then this is the start
1240 // of a memory reference, otherwise this is a normal register reference.
1241 if (getLexer().isNot(AsmToken::Colon))
1242 return X86Operand::CreateReg(RegNo, Start, End);
1244 getParser().Lex(); // Eat the colon.
1245 return ParseIntelMemOperand(RegNo, Start);
1249 return ParseIntelMemOperand(0, Start);
1252 X86Operand *X86AsmParser::ParseATTOperand() {
1253 switch (getLexer().getKind()) {
1255 // Parse a memory operand with no segment register.
1256 return ParseMemOperand(0, Parser.getTok().getLoc());
1257 case AsmToken::Percent: {
1258 // Read the register.
1261 if (ParseRegister(RegNo, Start, End)) return 0;
1262 if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
1263 Error(Start, "%eiz and %riz can only be used as index registers",
1264 SMRange(Start, End));
1268 // If this is a segment register followed by a ':', then this is the start
1269 // of a memory reference, otherwise this is a normal register reference.
1270 if (getLexer().isNot(AsmToken::Colon))
1271 return X86Operand::CreateReg(RegNo, Start, End);
1274 getParser().Lex(); // Eat the colon.
1275 return ParseMemOperand(RegNo, Start);
1277 case AsmToken::Dollar: {
1278 // $42 -> immediate.
1279 SMLoc Start = Parser.getTok().getLoc(), End;
1282 if (getParser().parseExpression(Val, End))
1284 return X86Operand::CreateImm(Val, Start, End);
1289 /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix
1290 /// has already been parsed if present.
1291 X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
1293 // We have to disambiguate a parenthesized expression "(4+5)" from the start
1294 // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
1295 // only way to do this without lookahead is to eat the '(' and see what is
1297 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1298 if (getLexer().isNot(AsmToken::LParen)) {
1300 if (getParser().parseExpression(Disp, ExprEnd)) return 0;
1302 // After parsing the base expression we could either have a parenthesized
1303 // memory address or not. If not, return now. If so, eat the (.
1304 if (getLexer().isNot(AsmToken::LParen)) {
1305 // Unless we have a segment register, treat this as an immediate.
1307 return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
1308 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1314 // Okay, we have a '('. We don't know if this is an expression or not, but
1315 // so we have to eat the ( to see beyond it.
1316 SMLoc LParenLoc = Parser.getTok().getLoc();
1317 Parser.Lex(); // Eat the '('.
1319 if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) {
1320 // Nothing to do here, fall into the code below with the '(' part of the
1321 // memory operand consumed.
1325 // It must be an parenthesized expression, parse it now.
1326 if (getParser().parseParenExpression(Disp, ExprEnd))
1329 // After parsing the base expression we could either have a parenthesized
1330 // memory address or not. If not, return now. If so, eat the (.
1331 if (getLexer().isNot(AsmToken::LParen)) {
1332 // Unless we have a segment register, treat this as an immediate.
1334 return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
1335 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1343 // If we reached here, then we just ate the ( of the memory operand. Process
1344 // the rest of the memory operand.
1345 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
1348 if (getLexer().is(AsmToken::Percent)) {
1349 SMLoc StartLoc, EndLoc;
1350 if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0;
1351 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
1352 Error(StartLoc, "eiz and riz can only be used as index registers",
1353 SMRange(StartLoc, EndLoc));
1358 if (getLexer().is(AsmToken::Comma)) {
1359 Parser.Lex(); // Eat the comma.
1360 IndexLoc = Parser.getTok().getLoc();
1362 // Following the comma we should have either an index register, or a scale
1363 // value. We don't support the later form, but we want to parse it
1366 // Not that even though it would be completely consistent to support syntax
1367 // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
1368 if (getLexer().is(AsmToken::Percent)) {
1370 if (ParseRegister(IndexReg, L, L)) return 0;
1372 if (getLexer().isNot(AsmToken::RParen)) {
1373 // Parse the scale amount:
1374 // ::= ',' [scale-expression]
1375 if (getLexer().isNot(AsmToken::Comma)) {
1376 Error(Parser.getTok().getLoc(),
1377 "expected comma in scale expression");
1380 Parser.Lex(); // Eat the comma.
1382 if (getLexer().isNot(AsmToken::RParen)) {
1383 SMLoc Loc = Parser.getTok().getLoc();
1386 if (getParser().parseAbsoluteExpression(ScaleVal)){
1387 Error(Loc, "expected scale expression");
1391 // Validate the scale amount.
1392 if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
1393 Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
1396 Scale = (unsigned)ScaleVal;
1399 } else if (getLexer().isNot(AsmToken::RParen)) {
1400 // A scale amount without an index is ignored.
1402 SMLoc Loc = Parser.getTok().getLoc();
1405 if (getParser().parseAbsoluteExpression(Value))
1409 Warning(Loc, "scale factor without index register is ignored");
1414 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
1415 if (getLexer().isNot(AsmToken::RParen)) {
1416 Error(Parser.getTok().getLoc(), "unexpected token in memory operand");
1419 SMLoc MemEnd = Parser.getTok().getEndLoc();
1420 Parser.Lex(); // Eat the ')'.
1422 // If we have both a base register and an index register make sure they are
1423 // both 64-bit or 32-bit registers.
1424 // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
1425 if (BaseReg != 0 && IndexReg != 0) {
1426 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1427 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1428 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
1429 IndexReg != X86::RIZ) {
1430 Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
1433 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1434 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1435 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
1436 IndexReg != X86::EIZ){
1437 Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
1442 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1447 ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
1448 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1450 StringRef PatchedName = Name;
1452 // FIXME: Hack to recognize setneb as setne.
1453 if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
1454 PatchedName != "setb" && PatchedName != "setnb")
1455 PatchedName = PatchedName.substr(0, Name.size()-1);
1457 // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
1458 const MCExpr *ExtraImmOp = 0;
1459 if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
1460 (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
1461 PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
1462 bool IsVCMP = PatchedName[0] == 'v';
1463 unsigned SSECCIdx = IsVCMP ? 4 : 3;
1464 unsigned SSEComparisonCode = StringSwitch<unsigned>(
1465 PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
1469 .Case("unord", 0x03)
1474 /* AVX only from here */
1475 .Case("eq_uq", 0x08)
1478 .Case("false", 0x0B)
1479 .Case("neq_oq", 0x0C)
1483 .Case("eq_os", 0x10)
1484 .Case("lt_oq", 0x11)
1485 .Case("le_oq", 0x12)
1486 .Case("unord_s", 0x13)
1487 .Case("neq_us", 0x14)
1488 .Case("nlt_uq", 0x15)
1489 .Case("nle_uq", 0x16)
1490 .Case("ord_s", 0x17)
1491 .Case("eq_us", 0x18)
1492 .Case("nge_uq", 0x19)
1493 .Case("ngt_uq", 0x1A)
1494 .Case("false_os", 0x1B)
1495 .Case("neq_os", 0x1C)
1496 .Case("ge_oq", 0x1D)
1497 .Case("gt_oq", 0x1E)
1498 .Case("true_us", 0x1F)
1500 if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) {
1501 ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
1502 getParser().getContext());
1503 if (PatchedName.endswith("ss")) {
1504 PatchedName = IsVCMP ? "vcmpss" : "cmpss";
1505 } else if (PatchedName.endswith("sd")) {
1506 PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
1507 } else if (PatchedName.endswith("ps")) {
1508 PatchedName = IsVCMP ? "vcmpps" : "cmpps";
1510 assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
1511 PatchedName = IsVCMP ? "vcmppd" : "cmppd";
1516 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
1518 if (ExtraImmOp && !isParsingIntelSyntax())
1519 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1521 // Determine whether this is an instruction prefix.
1523 Name == "lock" || Name == "rep" ||
1524 Name == "repe" || Name == "repz" ||
1525 Name == "repne" || Name == "repnz" ||
1526 Name == "rex64" || Name == "data16";
1529 // This does the actual operand parsing. Don't parse any more if we have a
1530 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
1531 // just want to parse the "lock" as the first instruction and the "incl" as
1533 if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
1535 // Parse '*' modifier.
1536 if (getLexer().is(AsmToken::Star)) {
1537 SMLoc Loc = Parser.getTok().getLoc();
1538 Operands.push_back(X86Operand::CreateToken("*", Loc));
1539 Parser.Lex(); // Eat the star.
1542 // Read the first operand.
1543 if (X86Operand *Op = ParseOperand())
1544 Operands.push_back(Op);
1546 Parser.eatToEndOfStatement();
1550 while (getLexer().is(AsmToken::Comma)) {
1551 Parser.Lex(); // Eat the comma.
1553 // Parse and remember the operand.
1554 if (X86Operand *Op = ParseOperand())
1555 Operands.push_back(Op);
1557 Parser.eatToEndOfStatement();
1562 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1563 SMLoc Loc = getLexer().getLoc();
1564 Parser.eatToEndOfStatement();
1565 return Error(Loc, "unexpected token in argument list");
1569 if (getLexer().is(AsmToken::EndOfStatement))
1570 Parser.Lex(); // Consume the EndOfStatement
1571 else if (isPrefix && getLexer().is(AsmToken::Slash))
1572 Parser.Lex(); // Consume the prefix separator Slash
1574 if (ExtraImmOp && isParsingIntelSyntax())
1575 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1577 // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
1578 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
1579 // documented form in various unofficial manuals, so a lot of code uses it.
1580 if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
1581 Operands.size() == 3) {
1582 X86Operand &Op = *(X86Operand*)Operands.back();
1583 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1584 isa<MCConstantExpr>(Op.Mem.Disp) &&
1585 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1586 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1587 SMLoc Loc = Op.getEndLoc();
1588 Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1592 // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
1593 if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
1594 Operands.size() == 3) {
1595 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1596 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1597 isa<MCConstantExpr>(Op.Mem.Disp) &&
1598 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1599 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1600 SMLoc Loc = Op.getEndLoc();
1601 Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1605 // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]"
1606 if (Name.startswith("ins") && Operands.size() == 3 &&
1607 (Name == "insb" || Name == "insw" || Name == "insl")) {
1608 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1609 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1610 if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) {
1611 Operands.pop_back();
1612 Operands.pop_back();
1618 // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]"
1619 if (Name.startswith("outs") && Operands.size() == 3 &&
1620 (Name == "outsb" || Name == "outsw" || Name == "outsl")) {
1621 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1622 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1623 if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) {
1624 Operands.pop_back();
1625 Operands.pop_back();
1631 // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]"
1632 if (Name.startswith("movs") && Operands.size() == 3 &&
1633 (Name == "movsb" || Name == "movsw" || Name == "movsl" ||
1634 (is64BitMode() && Name == "movsq"))) {
1635 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1636 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1637 if (isSrcOp(Op) && isDstOp(Op2)) {
1638 Operands.pop_back();
1639 Operands.pop_back();
1644 // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]"
1645 if (Name.startswith("lods") && Operands.size() == 3 &&
1646 (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
1647 Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) {
1648 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1649 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1650 if (isSrcOp(*Op1) && Op2->isReg()) {
1652 unsigned reg = Op2->getReg();
1653 bool isLods = Name == "lods";
1654 if (reg == X86::AL && (isLods || Name == "lodsb"))
1656 else if (reg == X86::AX && (isLods || Name == "lodsw"))
1658 else if (reg == X86::EAX && (isLods || Name == "lodsl"))
1660 else if (reg == X86::RAX && (isLods || Name == "lodsq"))
1665 Operands.pop_back();
1666 Operands.pop_back();
1670 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1674 // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]"
1675 if (Name.startswith("stos") && Operands.size() == 3 &&
1676 (Name == "stos" || Name == "stosb" || Name == "stosw" ||
1677 Name == "stosl" || (is64BitMode() && Name == "stosq"))) {
1678 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1679 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1680 if (isDstOp(*Op2) && Op1->isReg()) {
1682 unsigned reg = Op1->getReg();
1683 bool isStos = Name == "stos";
1684 if (reg == X86::AL && (isStos || Name == "stosb"))
1686 else if (reg == X86::AX && (isStos || Name == "stosw"))
1688 else if (reg == X86::EAX && (isStos || Name == "stosl"))
1690 else if (reg == X86::RAX && (isStos || Name == "stosq"))
1695 Operands.pop_back();
1696 Operands.pop_back();
1700 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1705 // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
1707 if ((Name.startswith("shr") || Name.startswith("sar") ||
1708 Name.startswith("shl") || Name.startswith("sal") ||
1709 Name.startswith("rcl") || Name.startswith("rcr") ||
1710 Name.startswith("rol") || Name.startswith("ror")) &&
1711 Operands.size() == 3) {
1712 if (isParsingIntelSyntax()) {
1714 X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
1715 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1716 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1718 Operands.pop_back();
1721 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1722 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1723 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1725 Operands.erase(Operands.begin() + 1);
1730 // Transforms "int $3" into "int3" as a size optimization. We can't write an
1731 // instalias with an immediate operand yet.
1732 if (Name == "int" && Operands.size() == 2) {
1733 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1734 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1735 cast<MCConstantExpr>(Op1->getImm())->getValue() == 3) {
1737 Operands.erase(Operands.begin() + 1);
1738 static_cast<X86Operand*>(Operands[0])->setTokenValue("int3");
1745 static bool convertToSExti8(MCInst &Inst, unsigned Opcode, unsigned Reg,
1748 TmpInst.setOpcode(Opcode);
1750 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1751 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1752 TmpInst.addOperand(Inst.getOperand(0));
1757 static bool convert16i16to16ri8(MCInst &Inst, unsigned Opcode,
1758 bool isCmp = false) {
1759 if (!Inst.getOperand(0).isImm() ||
1760 !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
1763 return convertToSExti8(Inst, Opcode, X86::AX, isCmp);
1766 static bool convert32i32to32ri8(MCInst &Inst, unsigned Opcode,
1767 bool isCmp = false) {
1768 if (!Inst.getOperand(0).isImm() ||
1769 !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
1772 return convertToSExti8(Inst, Opcode, X86::EAX, isCmp);
1775 static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
1776 bool isCmp = false) {
1777 if (!Inst.getOperand(0).isImm() ||
1778 !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
1781 return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
1785 processInstruction(MCInst &Inst,
1786 const SmallVectorImpl<MCParsedAsmOperand*> &Ops) {
1787 switch (Inst.getOpcode()) {
1788 default: return false;
1789 case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8);
1790 case X86::AND32i32: return convert32i32to32ri8(Inst, X86::AND32ri8);
1791 case X86::AND64i32: return convert64i32to64ri8(Inst, X86::AND64ri8);
1792 case X86::XOR16i16: return convert16i16to16ri8(Inst, X86::XOR16ri8);
1793 case X86::XOR32i32: return convert32i32to32ri8(Inst, X86::XOR32ri8);
1794 case X86::XOR64i32: return convert64i32to64ri8(Inst, X86::XOR64ri8);
1795 case X86::OR16i16: return convert16i16to16ri8(Inst, X86::OR16ri8);
1796 case X86::OR32i32: return convert32i32to32ri8(Inst, X86::OR32ri8);
1797 case X86::OR64i32: return convert64i32to64ri8(Inst, X86::OR64ri8);
1798 case X86::CMP16i16: return convert16i16to16ri8(Inst, X86::CMP16ri8, true);
1799 case X86::CMP32i32: return convert32i32to32ri8(Inst, X86::CMP32ri8, true);
1800 case X86::CMP64i32: return convert64i32to64ri8(Inst, X86::CMP64ri8, true);
1801 case X86::ADD16i16: return convert16i16to16ri8(Inst, X86::ADD16ri8);
1802 case X86::ADD32i32: return convert32i32to32ri8(Inst, X86::ADD32ri8);
1803 case X86::ADD64i32: return convert64i32to64ri8(Inst, X86::ADD64ri8);
1804 case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8);
1805 case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8);
1806 case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8);
1807 case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8);
1808 case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8);
1809 case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8);
1810 case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8);
1811 case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8);
1812 case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8);
1816 static const char *getSubtargetFeatureName(unsigned Val);
1818 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1819 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1820 MCStreamer &Out, unsigned &ErrorInfo,
1821 bool MatchingInlineAsm) {
1822 assert(!Operands.empty() && "Unexpect empty operand list!");
1823 X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
1824 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
1825 ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>();
1827 // First, handle aliases that expand to multiple instructions.
1828 // FIXME: This should be replaced with a real .td file alias mechanism.
1829 // Also, MatchInstructionImpl should actually *do* the EmitInstruction
1831 if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
1832 Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
1833 Op->getToken() == "finit" || Op->getToken() == "fsave" ||
1834 Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
1836 Inst.setOpcode(X86::WAIT);
1838 if (!MatchingInlineAsm)
1839 Out.EmitInstruction(Inst);
1842 StringSwitch<const char*>(Op->getToken())
1843 .Case("finit", "fninit")
1844 .Case("fsave", "fnsave")
1845 .Case("fstcw", "fnstcw")
1846 .Case("fstcww", "fnstcw")
1847 .Case("fstenv", "fnstenv")
1848 .Case("fstsw", "fnstsw")
1849 .Case("fstsww", "fnstsw")
1850 .Case("fclex", "fnclex")
1852 assert(Repl && "Unknown wait-prefixed instruction");
1854 Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
1857 bool WasOriginallyInvalidOperand = false;
1860 // First, try a direct match.
1861 switch (MatchInstructionImpl(Operands, Inst,
1862 ErrorInfo, MatchingInlineAsm,
1863 isParsingIntelSyntax())) {
1866 // Some instructions need post-processing to, for example, tweak which
1867 // encoding is selected. Loop on it while changes happen so the
1868 // individual transformations can chain off each other.
1869 if (!MatchingInlineAsm)
1870 while (processInstruction(Inst, Operands))
1874 if (!MatchingInlineAsm)
1875 Out.EmitInstruction(Inst);
1876 Opcode = Inst.getOpcode();
1878 case Match_MissingFeature: {
1879 assert(ErrorInfo && "Unknown missing feature!");
1880 // Special case the error message for the very common case where only
1881 // a single subtarget feature is missing.
1882 std::string Msg = "instruction requires:";
1884 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
1885 if (ErrorInfo & Mask) {
1887 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
1891 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
1893 case Match_InvalidOperand:
1894 WasOriginallyInvalidOperand = true;
1896 case Match_MnemonicFail:
1900 // FIXME: Ideally, we would only attempt suffix matches for things which are
1901 // valid prefixes, and we could just infer the right unambiguous
1902 // type. However, that requires substantially more matcher support than the
1905 // Change the operand to point to a temporary token.
1906 StringRef Base = Op->getToken();
1907 SmallString<16> Tmp;
1910 Op->setTokenValue(Tmp.str());
1912 // If this instruction starts with an 'f', then it is a floating point stack
1913 // instruction. These come in up to three forms for 32-bit, 64-bit, and
1914 // 80-bit floating point, which use the suffixes s,l,t respectively.
1916 // Otherwise, we assume that this may be an integer instruction, which comes
1917 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
1918 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
1920 // Check for the various suffix matches.
1921 Tmp[Base.size()] = Suffixes[0];
1922 unsigned ErrorInfoIgnore;
1923 unsigned ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
1924 unsigned Match1, Match2, Match3, Match4;
1926 Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1927 isParsingIntelSyntax());
1928 // If this returned as a missing feature failure, remember that.
1929 if (Match1 == Match_MissingFeature)
1930 ErrorInfoMissingFeature = ErrorInfoIgnore;
1931 Tmp[Base.size()] = Suffixes[1];
1932 Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1933 isParsingIntelSyntax());
1934 // If this returned as a missing feature failure, remember that.
1935 if (Match2 == Match_MissingFeature)
1936 ErrorInfoMissingFeature = ErrorInfoIgnore;
1937 Tmp[Base.size()] = Suffixes[2];
1938 Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1939 isParsingIntelSyntax());
1940 // If this returned as a missing feature failure, remember that.
1941 if (Match3 == Match_MissingFeature)
1942 ErrorInfoMissingFeature = ErrorInfoIgnore;
1943 Tmp[Base.size()] = Suffixes[3];
1944 Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1945 isParsingIntelSyntax());
1946 // If this returned as a missing feature failure, remember that.
1947 if (Match4 == Match_MissingFeature)
1948 ErrorInfoMissingFeature = ErrorInfoIgnore;
1950 // Restore the old token.
1951 Op->setTokenValue(Base);
1953 // If exactly one matched, then we treat that as a successful match (and the
1954 // instruction will already have been filled in correctly, since the failing
1955 // matches won't have modified it).
1956 unsigned NumSuccessfulMatches =
1957 (Match1 == Match_Success) + (Match2 == Match_Success) +
1958 (Match3 == Match_Success) + (Match4 == Match_Success);
1959 if (NumSuccessfulMatches == 1) {
1961 if (!MatchingInlineAsm)
1962 Out.EmitInstruction(Inst);
1963 Opcode = Inst.getOpcode();
1967 // Otherwise, the match failed, try to produce a decent error message.
1969 // If we had multiple suffix matches, then identify this as an ambiguous
1971 if (NumSuccessfulMatches > 1) {
1973 unsigned NumMatches = 0;
1974 if (Match1 == Match_Success) MatchChars[NumMatches++] = Suffixes[0];
1975 if (Match2 == Match_Success) MatchChars[NumMatches++] = Suffixes[1];
1976 if (Match3 == Match_Success) MatchChars[NumMatches++] = Suffixes[2];
1977 if (Match4 == Match_Success) MatchChars[NumMatches++] = Suffixes[3];
1979 SmallString<126> Msg;
1980 raw_svector_ostream OS(Msg);
1981 OS << "ambiguous instructions require an explicit suffix (could be ";
1982 for (unsigned i = 0; i != NumMatches; ++i) {
1985 if (i + 1 == NumMatches)
1987 OS << "'" << Base << MatchChars[i] << "'";
1990 Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm);
1994 // Okay, we know that none of the variants matched successfully.
1996 // If all of the instructions reported an invalid mnemonic, then the original
1997 // mnemonic was invalid.
1998 if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
1999 (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
2000 if (!WasOriginallyInvalidOperand) {
2001 ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges :
2003 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
2004 Ranges, MatchingInlineAsm);
2007 // Recover location info for the operand if we know which was the problem.
2008 if (ErrorInfo != ~0U) {
2009 if (ErrorInfo >= Operands.size())
2010 return Error(IDLoc, "too few operands for instruction",
2011 EmptyRanges, MatchingInlineAsm);
2013 X86Operand *Operand = (X86Operand*)Operands[ErrorInfo];
2014 if (Operand->getStartLoc().isValid()) {
2015 SMRange OperandRange = Operand->getLocRange();
2016 return Error(Operand->getStartLoc(), "invalid operand for instruction",
2017 OperandRange, MatchingInlineAsm);
2021 return Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2025 // If one instruction matched with a missing feature, report this as a
2027 if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) +
2028 (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){
2029 std::string Msg = "instruction requires:";
2031 for (unsigned i = 0; i < (sizeof(ErrorInfoMissingFeature)*8-1); ++i) {
2032 if (ErrorInfoMissingFeature & Mask) {
2034 Msg += getSubtargetFeatureName(ErrorInfoMissingFeature & Mask);
2038 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
2041 // If one instruction matched with an invalid operand, report this as an
2043 if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) +
2044 (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){
2045 Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2050 // If all of these were an outright failure, report it in a useless way.
2051 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
2052 EmptyRanges, MatchingInlineAsm);
2057 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
2058 StringRef IDVal = DirectiveID.getIdentifier();
2059 if (IDVal == ".word")
2060 return ParseDirectiveWord(2, DirectiveID.getLoc());
2061 else if (IDVal.startswith(".code"))
2062 return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
2063 else if (IDVal.startswith(".att_syntax")) {
2064 getParser().setAssemblerDialect(0);
2066 } else if (IDVal.startswith(".intel_syntax")) {
2067 getParser().setAssemblerDialect(1);
2068 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2069 if(Parser.getTok().getString() == "noprefix") {
2070 // FIXME : Handle noprefix
2080 /// ParseDirectiveWord
2081 /// ::= .word [ expression (, expression)* ]
2082 bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2083 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2085 const MCExpr *Value;
2086 if (getParser().parseExpression(Value))
2089 getParser().getStreamer().EmitValue(Value, Size);
2091 if (getLexer().is(AsmToken::EndOfStatement))
2094 // FIXME: Improve diagnostic.
2095 if (getLexer().isNot(AsmToken::Comma))
2096 return Error(L, "unexpected token in directive");
2105 /// ParseDirectiveCode
2106 /// ::= .code32 | .code64
2107 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
2108 if (IDVal == ".code32") {
2110 if (is64BitMode()) {
2112 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
2114 } else if (IDVal == ".code64") {
2116 if (!is64BitMode()) {
2118 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
2121 return Error(L, "unexpected directive " + IDVal);
2127 // Force static initialization.
2128 extern "C" void LLVMInitializeX86AsmParser() {
2129 RegisterMCAsmParser<X86AsmParser> X(TheX86_32Target);
2130 RegisterMCAsmParser<X86AsmParser> Y(TheX86_64Target);
2133 #define GET_REGISTER_MATCHER
2134 #define GET_MATCHER_IMPLEMENTATION
2135 #define GET_SUBTARGET_FEATURE_NAME
2136 #include "X86GenAsmMatcher.inc"