1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "llvm/ADT/APFloat.h"
12 #include "llvm/ADT/SmallString.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/MC/MCParser/MCAsmLexer.h"
19 #include "llvm/MC/MCParser/MCAsmParser.h"
20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCStreamer.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCSymbol.h"
25 #include "llvm/MC/MCTargetAsmParser.h"
26 #include "llvm/Support/SourceMgr.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
35 class X86AsmParser : public MCTargetAsmParser {
38 ParseInstructionInfo *InstInfo;
40 MCAsmParser &getParser() const { return Parser; }
42 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
44 bool Error(SMLoc L, const Twine &Msg,
45 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(),
46 bool MatchingInlineAsm = false) {
47 if (MatchingInlineAsm) return true;
48 return Parser.Error(L, Msg, Ranges);
51 X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) {
56 X86Operand *ParseOperand();
57 X86Operand *ParseATTOperand();
58 X86Operand *ParseIntelOperand();
59 X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc);
60 X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind);
61 X86Operand *ParseIntelMemOperand(unsigned SegReg, uint64_t ImmDisp,
63 X86Operand *ParseIntelBracExpression(unsigned SegReg, uint64_t ImmDisp,
65 X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
67 X86Operand *CreateMemForInlineAsm(const MCExpr *Disp, SMLoc Start, SMLoc End,
68 SMLoc SizeDirLoc, unsigned Size);
70 bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp,
71 SmallString<64> &Err);
73 bool ParseDirectiveWord(unsigned Size, SMLoc L);
74 bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
76 bool processInstruction(MCInst &Inst,
77 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
79 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
80 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
81 MCStreamer &Out, unsigned &ErrorInfo,
82 bool MatchingInlineAsm);
84 /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
85 /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
86 bool isSrcOp(X86Operand &Op);
88 /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi)
89 /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
90 bool isDstOp(X86Operand &Op);
92 bool is64BitMode() const {
93 // FIXME: Can tablegen auto-generate this?
94 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
97 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit));
98 setAvailableFeatures(FB);
101 /// @name Auto-generated Matcher Functions
104 #define GET_ASSEMBLER_HEADER
105 #include "X86GenAsmMatcher.inc"
110 X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
111 : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
113 // Initialize the set of available features.
114 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
116 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
118 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
120 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122 virtual bool ParseDirective(AsmToken DirectiveID);
124 bool isParsingIntelSyntax() {
125 return getParser().getAssemblerDialect();
128 } // end anonymous namespace
130 /// @name Auto-generated Match Functions
133 static unsigned MatchRegisterName(StringRef Name);
137 static bool isImmSExti16i8Value(uint64_t Value) {
138 return (( Value <= 0x000000000000007FULL)||
139 (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
140 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
143 static bool isImmSExti32i8Value(uint64_t Value) {
144 return (( Value <= 0x000000000000007FULL)||
145 (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
146 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
149 static bool isImmZExtu32u8Value(uint64_t Value) {
150 return (Value <= 0x00000000000000FFULL);
153 static bool isImmSExti64i8Value(uint64_t Value) {
154 return (( Value <= 0x000000000000007FULL)||
155 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
158 static bool isImmSExti64i32Value(uint64_t Value) {
159 return (( Value <= 0x000000007FFFFFFFULL)||
160 (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
164 /// X86Operand - Instances of this class represent a parsed X86 machine
166 struct X86Operand : public MCParsedAsmOperand {
174 SMLoc StartLoc, EndLoc;
207 X86Operand(KindTy K, SMLoc Start, SMLoc End)
208 : Kind(K), StartLoc(Start), EndLoc(End) {}
210 /// getStartLoc - Get the location of the first token of this operand.
211 SMLoc getStartLoc() const { return StartLoc; }
212 /// getEndLoc - Get the location of the last token of this operand.
213 SMLoc getEndLoc() const { return EndLoc; }
214 /// getLocRange - Get the range between the first and last token of this
216 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
217 /// getOffsetOfLoc - Get the location of the offset operator.
218 SMLoc getOffsetOfLoc() const { return OffsetOfLoc; }
220 virtual void print(raw_ostream &OS) const {}
222 StringRef getToken() const {
223 assert(Kind == Token && "Invalid access!");
224 return StringRef(Tok.Data, Tok.Length);
226 void setTokenValue(StringRef Value) {
227 assert(Kind == Token && "Invalid access!");
228 Tok.Data = Value.data();
229 Tok.Length = Value.size();
232 unsigned getReg() const {
233 assert(Kind == Register && "Invalid access!");
237 const MCExpr *getImm() const {
238 assert(Kind == Immediate && "Invalid access!");
242 const MCExpr *getMemDisp() const {
243 assert(Kind == Memory && "Invalid access!");
246 unsigned getMemSegReg() const {
247 assert(Kind == Memory && "Invalid access!");
250 unsigned getMemBaseReg() const {
251 assert(Kind == Memory && "Invalid access!");
254 unsigned getMemIndexReg() const {
255 assert(Kind == Memory && "Invalid access!");
258 unsigned getMemScale() const {
259 assert(Kind == Memory && "Invalid access!");
263 bool isToken() const {return Kind == Token; }
265 bool isImm() const { return Kind == Immediate; }
267 bool isImmSExti16i8() const {
271 // If this isn't a constant expr, just assume it fits and let relaxation
273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
277 // Otherwise, check the value is in a range that makes sense for this
279 return isImmSExti16i8Value(CE->getValue());
281 bool isImmSExti32i8() const {
285 // If this isn't a constant expr, just assume it fits and let relaxation
287 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
291 // Otherwise, check the value is in a range that makes sense for this
293 return isImmSExti32i8Value(CE->getValue());
295 bool isImmZExtu32u8() const {
299 // If this isn't a constant expr, just assume it fits and let relaxation
301 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
305 // Otherwise, check the value is in a range that makes sense for this
307 return isImmZExtu32u8Value(CE->getValue());
309 bool isImmSExti64i8() const {
313 // If this isn't a constant expr, just assume it fits and let relaxation
315 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
319 // Otherwise, check the value is in a range that makes sense for this
321 return isImmSExti64i8Value(CE->getValue());
323 bool isImmSExti64i32() const {
327 // If this isn't a constant expr, just assume it fits and let relaxation
329 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
333 // Otherwise, check the value is in a range that makes sense for this
335 return isImmSExti64i32Value(CE->getValue());
338 bool isOffsetOf() const {
339 return OffsetOfLoc.getPointer();
342 bool needAddressOf() const {
346 bool isMem() const { return Kind == Memory; }
347 bool isMem8() const {
348 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
350 bool isMem16() const {
351 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
353 bool isMem32() const {
354 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
356 bool isMem64() const {
357 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
359 bool isMem80() const {
360 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
362 bool isMem128() const {
363 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
365 bool isMem256() const {
366 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
369 bool isMemVX32() const {
370 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
371 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
373 bool isMemVY32() const {
374 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
375 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
377 bool isMemVX64() const {
378 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
379 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
381 bool isMemVY64() const {
382 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
383 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
386 bool isAbsMem() const {
387 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
388 !getMemIndexReg() && getMemScale() == 1;
391 bool isReg() const { return Kind == Register; }
393 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
394 // Add as immediates when possible.
395 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
396 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
398 Inst.addOperand(MCOperand::CreateExpr(Expr));
401 void addRegOperands(MCInst &Inst, unsigned N) const {
402 assert(N == 1 && "Invalid number of operands!");
403 Inst.addOperand(MCOperand::CreateReg(getReg()));
406 void addImmOperands(MCInst &Inst, unsigned N) const {
407 assert(N == 1 && "Invalid number of operands!");
408 addExpr(Inst, getImm());
411 void addMem8Operands(MCInst &Inst, unsigned N) const {
412 addMemOperands(Inst, N);
414 void addMem16Operands(MCInst &Inst, unsigned N) const {
415 addMemOperands(Inst, N);
417 void addMem32Operands(MCInst &Inst, unsigned N) const {
418 addMemOperands(Inst, N);
420 void addMem64Operands(MCInst &Inst, unsigned N) const {
421 addMemOperands(Inst, N);
423 void addMem80Operands(MCInst &Inst, unsigned N) const {
424 addMemOperands(Inst, N);
426 void addMem128Operands(MCInst &Inst, unsigned N) const {
427 addMemOperands(Inst, N);
429 void addMem256Operands(MCInst &Inst, unsigned N) const {
430 addMemOperands(Inst, N);
432 void addMemVX32Operands(MCInst &Inst, unsigned N) const {
433 addMemOperands(Inst, N);
435 void addMemVY32Operands(MCInst &Inst, unsigned N) const {
436 addMemOperands(Inst, N);
438 void addMemVX64Operands(MCInst &Inst, unsigned N) const {
439 addMemOperands(Inst, N);
441 void addMemVY64Operands(MCInst &Inst, unsigned N) const {
442 addMemOperands(Inst, N);
445 void addMemOperands(MCInst &Inst, unsigned N) const {
446 assert((N == 5) && "Invalid number of operands!");
447 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
448 Inst.addOperand(MCOperand::CreateImm(getMemScale()));
449 Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
450 addExpr(Inst, getMemDisp());
451 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
454 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
455 assert((N == 1) && "Invalid number of operands!");
456 // Add as immediates when possible.
457 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
458 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
460 Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
463 static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
464 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
465 X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
466 Res->Tok.Data = Str.data();
467 Res->Tok.Length = Str.size();
471 static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
472 bool AddressOf = false,
473 SMLoc OffsetOfLoc = SMLoc()) {
474 X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
475 Res->Reg.RegNo = RegNo;
476 Res->AddressOf = AddressOf;
477 Res->OffsetOfLoc = OffsetOfLoc;
481 static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){
482 X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
487 /// Create an absolute memory operand.
488 static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
490 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
492 Res->Mem.Disp = Disp;
493 Res->Mem.BaseReg = 0;
494 Res->Mem.IndexReg = 0;
496 Res->Mem.Size = Size;
497 Res->AddressOf = false;
501 /// Create a generalized memory operand.
502 static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
503 unsigned BaseReg, unsigned IndexReg,
504 unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
506 // We should never just have a displacement, that should be parsed as an
507 // absolute memory operand.
508 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
510 // The scale should always be one of {1,2,4,8}.
511 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
513 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
514 Res->Mem.SegReg = SegReg;
515 Res->Mem.Disp = Disp;
516 Res->Mem.BaseReg = BaseReg;
517 Res->Mem.IndexReg = IndexReg;
518 Res->Mem.Scale = Scale;
519 Res->Mem.Size = Size;
520 Res->AddressOf = false;
525 } // end anonymous namespace.
527 bool X86AsmParser::isSrcOp(X86Operand &Op) {
528 unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI;
530 return (Op.isMem() &&
531 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) &&
532 isa<MCConstantExpr>(Op.Mem.Disp) &&
533 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
534 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0);
537 bool X86AsmParser::isDstOp(X86Operand &Op) {
538 unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
541 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
542 isa<MCConstantExpr>(Op.Mem.Disp) &&
543 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
544 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0;
547 bool X86AsmParser::ParseRegister(unsigned &RegNo,
548 SMLoc &StartLoc, SMLoc &EndLoc) {
550 const AsmToken &PercentTok = Parser.getTok();
551 StartLoc = PercentTok.getLoc();
553 // If we encounter a %, ignore it. This code handles registers with and
554 // without the prefix, unprefixed registers can occur in cfi directives.
555 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
556 Parser.Lex(); // Eat percent token.
558 const AsmToken &Tok = Parser.getTok();
559 EndLoc = Tok.getEndLoc();
561 if (Tok.isNot(AsmToken::Identifier)) {
562 if (isParsingIntelSyntax()) return true;
563 return Error(StartLoc, "invalid register name",
564 SMRange(StartLoc, EndLoc));
567 RegNo = MatchRegisterName(Tok.getString());
569 // If the match failed, try the register name as lowercase.
571 RegNo = MatchRegisterName(Tok.getString().lower());
573 if (!is64BitMode()) {
574 // FIXME: This should be done using Requires<In32BitMode> and
575 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
577 // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
579 if (RegNo == X86::RIZ ||
580 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
581 X86II::isX86_64NonExtLowByteReg(RegNo) ||
582 X86II::isX86_64ExtendedReg(RegNo))
583 return Error(StartLoc, "register %"
584 + Tok.getString() + " is only available in 64-bit mode",
585 SMRange(StartLoc, EndLoc));
588 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
589 if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
591 Parser.Lex(); // Eat 'st'
593 // Check to see if we have '(4)' after %st.
594 if (getLexer().isNot(AsmToken::LParen))
599 const AsmToken &IntTok = Parser.getTok();
600 if (IntTok.isNot(AsmToken::Integer))
601 return Error(IntTok.getLoc(), "expected stack index");
602 switch (IntTok.getIntVal()) {
603 case 0: RegNo = X86::ST0; break;
604 case 1: RegNo = X86::ST1; break;
605 case 2: RegNo = X86::ST2; break;
606 case 3: RegNo = X86::ST3; break;
607 case 4: RegNo = X86::ST4; break;
608 case 5: RegNo = X86::ST5; break;
609 case 6: RegNo = X86::ST6; break;
610 case 7: RegNo = X86::ST7; break;
611 default: return Error(IntTok.getLoc(), "invalid stack index");
614 if (getParser().Lex().isNot(AsmToken::RParen))
615 return Error(Parser.getTok().getLoc(), "expected ')'");
617 EndLoc = Parser.getTok().getEndLoc();
618 Parser.Lex(); // Eat ')'
622 EndLoc = Parser.getTok().getEndLoc();
624 // If this is "db[0-7]", match it as an alias
626 if (RegNo == 0 && Tok.getString().size() == 3 &&
627 Tok.getString().startswith("db")) {
628 switch (Tok.getString()[2]) {
629 case '0': RegNo = X86::DR0; break;
630 case '1': RegNo = X86::DR1; break;
631 case '2': RegNo = X86::DR2; break;
632 case '3': RegNo = X86::DR3; break;
633 case '4': RegNo = X86::DR4; break;
634 case '5': RegNo = X86::DR5; break;
635 case '6': RegNo = X86::DR6; break;
636 case '7': RegNo = X86::DR7; break;
640 EndLoc = Parser.getTok().getEndLoc();
641 Parser.Lex(); // Eat it.
647 if (isParsingIntelSyntax()) return true;
648 return Error(StartLoc, "invalid register name",
649 SMRange(StartLoc, EndLoc));
652 Parser.Lex(); // Eat identifier token.
656 X86Operand *X86AsmParser::ParseOperand() {
657 if (isParsingIntelSyntax())
658 return ParseIntelOperand();
659 return ParseATTOperand();
662 /// getIntelMemOperandSize - Return intel memory operand size.
663 static unsigned getIntelMemOperandSize(StringRef OpStr) {
664 unsigned Size = StringSwitch<unsigned>(OpStr)
665 .Cases("BYTE", "byte", 8)
666 .Cases("WORD", "word", 16)
667 .Cases("DWORD", "dword", 32)
668 .Cases("QWORD", "qword", 64)
669 .Cases("XWORD", "xword", 80)
670 .Cases("XMMWORD", "xmmword", 128)
671 .Cases("YMMWORD", "ymmword", 256)
676 enum IntelBracExprState {
682 IBES_REGISTER_STAR_INTEGER,
692 class IntelBracExprStateMachine {
693 IntelBracExprState State;
694 unsigned BaseReg, IndexReg, Scale;
703 IntelBracExprStateMachine(MCAsmParser &parser, int64_t disp) :
704 State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(disp),
705 TmpReg(0), TmpInteger(0), isPlus(true) {}
707 unsigned getBaseReg() { return BaseReg; }
708 unsigned getIndexReg() { return IndexReg; }
709 unsigned getScale() { return Scale; }
710 int64_t getDisp() { return Disp; }
711 bool isValidEndState() { return State == IBES_RBRAC; }
727 // If we already have a BaseReg, then assume this is the IndexReg with a
732 assert (!IndexReg && "BaseReg/IndexReg already set!");
737 case IBES_INDEX_REGISTER:
760 // If we already have a BaseReg, then assume this is the IndexReg with a
765 assert (!IndexReg && "BaseReg/IndexReg already set!");
770 case IBES_INDEX_REGISTER:
776 void onRegister(unsigned Reg) {
782 State = IBES_REGISTER;
785 case IBES_INTEGER_STAR:
786 assert (!IndexReg && "IndexReg already set!");
787 State = IBES_INDEX_REGISTER;
799 State = IBES_DISP_EXPR;
803 void onInteger(int64_t TmpInt) {
809 State = IBES_INTEGER;
813 State = IBES_INTEGER;
816 case IBES_REGISTER_STAR:
817 assert (!IndexReg && "IndexReg already set!");
818 State = IBES_INDEX_REGISTER;
830 State = IBES_INTEGER_STAR;
833 State = IBES_REGISTER_STAR;
865 // If we already have a BaseReg, then assume this is the IndexReg with a
870 assert (!IndexReg && "BaseReg/IndexReg already set!");
875 case IBES_INDEX_REGISTER:
882 X86Operand *X86AsmParser::CreateMemForInlineAsm(const MCExpr *Disp, SMLoc Start,
883 SMLoc End, SMLoc SizeDirLoc,
885 bool NeedSizeDir = false;
886 bool IsVarDecl = false;
887 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) {
888 const MCSymbol &Sym = SymRef->getSymbol();
889 // FIXME: The SemaLookup will fail if the name is anything other then an
891 // FIXME: Pass a valid SMLoc.
892 unsigned tLength, tSize, tType;
893 SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength,
894 tSize, tType, IsVarDecl);
896 Size = tType * 8; // Size is in terms of bits in this context.
897 NeedSizeDir = Size > 0;
901 // If this is not a VarDecl then assume it is a FuncDecl or some other label
902 // reference. We need an 'r' constraint here, so we need to create register
903 // operand to ensure proper matching. Just pick a GPR based on the size of
906 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
907 return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true);
911 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, SizeDirLoc,
914 // When parsing inline assembly we set the base register to a non-zero value
915 // as we don't know the actual value at this time. This is necessary to
916 // get the matching correct in some cases.
917 return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0,
918 /*Scale*/1, Start, End, Size);
921 X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
924 const AsmToken &Tok = Parser.getTok();
925 SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc();
928 if (getLexer().isNot(AsmToken::LBrac))
929 return ErrorOperand(Start, "Expected '[' token!");
934 // Try to handle '[' 'Symbol' ']'
935 if (getLexer().is(AsmToken::Identifier)) {
936 if (ParseRegister(TmpReg, Start, End)) {
938 if (getParser().parseExpression(Disp, End))
941 if (getLexer().isNot(AsmToken::RBrac))
942 return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!");
944 // FIXME: We don't handle 'ImmDisp' '[' 'Symbol' ']'.
946 return ErrorOperand(Start, "Unsupported immediate displacement!");
948 // Adjust the EndLoc due to the ']'.
949 End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1);
951 if (!isParsingInlineAsm())
952 return X86Operand::CreateMem(Disp, Start, End, Size);
954 // We want the size directive before the '['.
955 SMLoc SizeDirLoc = SMLoc::getFromPointer(Start.getPointer()-1);
956 return CreateMemForInlineAsm(Disp, Start, End, SizeDirLoc, Size);
960 // Parse [ BaseReg + Scale*IndexReg + Disp ]. We may have already parsed an
961 // immediate displacement before the bracketed expression.
963 IntelBracExprStateMachine SM(Parser, ImmDisp);
965 // If we parsed a register, then the end loc has already been set and
966 // the identifier has already been lexed. We also need to update the
969 SM.onRegister(TmpReg);
971 const MCExpr *Disp = 0;
973 bool UpdateLocLex = true;
975 // The period in the dot operator (e.g., [ebx].foo.bar) is parsed as an
976 // identifier. Don't try an parse it as a register.
977 if (Tok.getString().startswith("."))
980 switch (getLexer().getKind()) {
982 if (SM.isValidEndState()) {
986 return ErrorOperand(Tok.getLoc(), "Unexpected token!");
988 case AsmToken::Identifier: {
989 // This could be a register or a displacement expression.
990 if(!ParseRegister(TmpReg, Start, End)) {
991 SM.onRegister(TmpReg);
992 UpdateLocLex = false;
994 } else if (!getParser().parseExpression(Disp, End)) {
996 UpdateLocLex = false;
999 return ErrorOperand(Tok.getLoc(), "Unexpected identifier!");
1001 case AsmToken::Integer: {
1002 int64_t Val = Tok.getIntVal();
1006 case AsmToken::Plus: SM.onPlus(); break;
1007 case AsmToken::Minus: SM.onMinus(); break;
1008 case AsmToken::Star: SM.onStar(); break;
1009 case AsmToken::LBrac: SM.onLBrac(); break;
1010 case AsmToken::RBrac: SM.onRBrac(); break;
1012 if (!Done && UpdateLocLex) {
1014 Parser.Lex(); // Consume the token.
1019 Disp = MCConstantExpr::Create(SM.getDisp(), getContext());
1021 // Parse the dot operator (e.g., [ebx].foo.bar).
1022 if (Tok.getString().startswith(".")) {
1023 SmallString<64> Err;
1024 const MCExpr *NewDisp;
1025 if (ParseIntelDotOperator(Disp, &NewDisp, Err))
1026 return ErrorOperand(Tok.getLoc(), Err);
1028 End = Parser.getTok().getEndLoc();
1029 Parser.Lex(); // Eat the field.
1033 int BaseReg = SM.getBaseReg();
1034 int IndexReg = SM.getIndexReg();
1037 if (!BaseReg && !IndexReg) {
1039 return X86Operand::CreateMem(Disp, Start, End);
1041 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
1044 int Scale = SM.getScale();
1045 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1049 /// ParseIntelMemOperand - Parse intel style memory operand.
1050 X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg,
1053 const AsmToken &Tok = Parser.getTok();
1056 unsigned Size = getIntelMemOperandSize(Tok.getString());
1059 assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") &&
1060 "Unexpected token!");
1064 // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
1065 if (getLexer().is(AsmToken::Integer)) {
1066 const AsmToken &IntTok = Parser.getTok();
1067 if (isParsingInlineAsm())
1068 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix,
1070 uint64_t ImmDisp = IntTok.getIntVal();
1071 Parser.Lex(); // Eat the integer.
1072 if (getLexer().isNot(AsmToken::LBrac))
1073 return ErrorOperand(Start, "Expected '[' token!");
1074 return ParseIntelBracExpression(SegReg, ImmDisp, Size);
1077 if (getLexer().is(AsmToken::LBrac))
1078 return ParseIntelBracExpression(SegReg, ImmDisp, Size);
1080 if (!ParseRegister(SegReg, Start, End)) {
1081 // Handel SegReg : [ ... ]
1082 if (getLexer().isNot(AsmToken::Colon))
1083 return ErrorOperand(Start, "Expected ':' token!");
1084 Parser.Lex(); // Eat :
1085 if (getLexer().isNot(AsmToken::LBrac))
1086 return ErrorOperand(Start, "Expected '[' token!");
1087 return ParseIntelBracExpression(SegReg, ImmDisp, Size);
1090 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1091 if (getParser().parseExpression(Disp, End))
1094 if (!isParsingInlineAsm())
1095 return X86Operand::CreateMem(Disp, Start, End, Size);
1096 return CreateMemForInlineAsm(Disp, Start, End, Start, Size);
1099 /// Parse the '.' operator.
1100 bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
1101 const MCExpr **NewDisp,
1102 SmallString<64> &Err) {
1103 AsmToken Tok = *&Parser.getTok();
1104 uint64_t OrigDispVal, DotDispVal;
1106 // FIXME: Handle non-constant expressions.
1107 if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) {
1108 OrigDispVal = OrigDisp->getValue();
1110 Err = "Non-constant offsets are not supported!";
1115 StringRef DotDispStr = Tok.getString().drop_front(1);
1117 // .Imm gets lexed as a real.
1118 if (Tok.is(AsmToken::Real)) {
1120 DotDispStr.getAsInteger(10, DotDisp);
1121 DotDispVal = DotDisp.getZExtValue();
1122 } else if (Tok.is(AsmToken::Identifier)) {
1123 // We should only see an identifier when parsing the original inline asm.
1124 // The front-end should rewrite this in terms of immediates.
1125 assert (isParsingInlineAsm() && "Unexpected field name!");
1128 std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
1129 if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
1131 Err = "Unable to lookup field reference!";
1134 DotDispVal = DotDisp;
1136 Err = "Unexpected token type!";
1140 if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
1141 SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
1142 unsigned Len = DotDispStr.size();
1143 unsigned Val = OrigDispVal + DotDispVal;
1144 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len,
1148 *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext());
1152 /// Parse the 'offset' operator. This operator is used to specify the
1153 /// location rather then the content of a variable.
1154 X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) {
1155 SMLoc OffsetOfLoc = Start;
1156 Parser.Lex(); // Eat offset.
1157 Start = Parser.getTok().getLoc();
1158 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1162 if (getParser().parseExpression(Val, End))
1163 return ErrorOperand(Start, "Unable to parse expression!");
1165 // Don't emit the offset operator.
1166 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
1168 // The offset operator will have an 'r' constraint, thus we need to create
1169 // register operand to ensure proper matching. Just pick a GPR based on
1170 // the size of a pointer.
1171 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
1172 return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
1176 enum IntelOperatorKind {
1182 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
1183 /// returns the number of elements in an array. It returns the value 1 for
1184 /// non-array variables. The SIZE operator returns the size of a C or C++
1185 /// variable. A variable's size is the product of its LENGTH and TYPE. The
1186 /// TYPE operator returns the size of a C or C++ type or variable. If the
1187 /// variable is an array, TYPE returns the size of a single element.
1188 X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) {
1189 SMLoc TypeLoc = Start;
1190 Parser.Lex(); // Eat offset.
1191 Start = Parser.getTok().getLoc();
1192 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier");
1196 if (getParser().parseExpression(Val, End))
1199 unsigned Length = 0, Size = 0, Type = 0;
1200 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) {
1201 const MCSymbol &Sym = SymRef->getSymbol();
1202 // FIXME: The SemaLookup will fail if the name is anything other then an
1204 // FIXME: Pass a valid SMLoc.
1206 if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length,
1207 Size, Type, IsVarDecl))
1208 return ErrorOperand(Start, "Unable to lookup expr!");
1212 default: llvm_unreachable("Unexpected operand kind!");
1213 case IOK_LENGTH: CVal = Length; break;
1214 case IOK_SIZE: CVal = Size; break;
1215 case IOK_TYPE: CVal = Type; break;
1218 // Rewrite the type operator and the C or C++ type or variable in terms of an
1219 // immediate. E.g. TYPE foo -> $$4
1220 unsigned Len = End.getPointer() - TypeLoc.getPointer();
1221 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal));
1223 const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext());
1224 return X86Operand::CreateImm(Imm, Start, End);
1227 X86Operand *X86AsmParser::ParseIntelOperand() {
1228 SMLoc Start = Parser.getTok().getLoc(), End;
1229 StringRef AsmTokStr = Parser.getTok().getString();
1231 // Offset, length, type and size operators.
1232 if (isParsingInlineAsm()) {
1233 if (AsmTokStr == "offset" || AsmTokStr == "OFFSET")
1234 return ParseIntelOffsetOfOperator(Start);
1235 if (AsmTokStr == "length" || AsmTokStr == "LENGTH")
1236 return ParseIntelOperator(Start, IOK_LENGTH);
1237 if (AsmTokStr == "size" || AsmTokStr == "SIZE")
1238 return ParseIntelOperator(Start, IOK_SIZE);
1239 if (AsmTokStr == "type" || AsmTokStr == "TYPE")
1240 return ParseIntelOperator(Start, IOK_TYPE);
1244 if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) ||
1245 getLexer().is(AsmToken::Minus)) {
1247 bool isInteger = getLexer().is(AsmToken::Integer);
1248 if (!getParser().parseExpression(Val, End)) {
1249 if (isParsingInlineAsm())
1250 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, Start));
1252 if (getLexer().isNot(AsmToken::LBrac))
1253 return X86Operand::CreateImm(Val, Start, End);
1255 // Only positive immediates are valid.
1257 Error(Parser.getTok().getLoc(), "expected a positive immediate "
1258 "displacement before bracketed expr.");
1262 // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
1263 if (uint64_t ImmDisp = dyn_cast<MCConstantExpr>(Val)->getValue())
1264 return ParseIntelMemOperand(/*SegReg=*/0, ImmDisp, Start);
1270 if (!ParseRegister(RegNo, Start, End)) {
1271 // If this is a segment register followed by a ':', then this is the start
1272 // of a memory reference, otherwise this is a normal register reference.
1273 if (getLexer().isNot(AsmToken::Colon))
1274 return X86Operand::CreateReg(RegNo, Start, End);
1276 getParser().Lex(); // Eat the colon.
1277 return ParseIntelMemOperand(/*SegReg=*/RegNo, /*Disp=*/0, Start);
1281 return ParseIntelMemOperand(/*SegReg=*/0, /*Disp=*/0, Start);
1284 X86Operand *X86AsmParser::ParseATTOperand() {
1285 switch (getLexer().getKind()) {
1287 // Parse a memory operand with no segment register.
1288 return ParseMemOperand(0, Parser.getTok().getLoc());
1289 case AsmToken::Percent: {
1290 // Read the register.
1293 if (ParseRegister(RegNo, Start, End)) return 0;
1294 if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
1295 Error(Start, "%eiz and %riz can only be used as index registers",
1296 SMRange(Start, End));
1300 // If this is a segment register followed by a ':', then this is the start
1301 // of a memory reference, otherwise this is a normal register reference.
1302 if (getLexer().isNot(AsmToken::Colon))
1303 return X86Operand::CreateReg(RegNo, Start, End);
1305 getParser().Lex(); // Eat the colon.
1306 return ParseMemOperand(RegNo, Start);
1308 case AsmToken::Dollar: {
1309 // $42 -> immediate.
1310 SMLoc Start = Parser.getTok().getLoc(), End;
1313 if (getParser().parseExpression(Val, End))
1315 return X86Operand::CreateImm(Val, Start, End);
1320 /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix
1321 /// has already been parsed if present.
1322 X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
1324 // We have to disambiguate a parenthesized expression "(4+5)" from the start
1325 // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
1326 // only way to do this without lookahead is to eat the '(' and see what is
1328 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
1329 if (getLexer().isNot(AsmToken::LParen)) {
1331 if (getParser().parseExpression(Disp, ExprEnd)) return 0;
1333 // After parsing the base expression we could either have a parenthesized
1334 // memory address or not. If not, return now. If so, eat the (.
1335 if (getLexer().isNot(AsmToken::LParen)) {
1336 // Unless we have a segment register, treat this as an immediate.
1338 return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
1339 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1345 // Okay, we have a '('. We don't know if this is an expression or not, but
1346 // so we have to eat the ( to see beyond it.
1347 SMLoc LParenLoc = Parser.getTok().getLoc();
1348 Parser.Lex(); // Eat the '('.
1350 if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) {
1351 // Nothing to do here, fall into the code below with the '(' part of the
1352 // memory operand consumed.
1356 // It must be an parenthesized expression, parse it now.
1357 if (getParser().parseParenExpression(Disp, ExprEnd))
1360 // After parsing the base expression we could either have a parenthesized
1361 // memory address or not. If not, return now. If so, eat the (.
1362 if (getLexer().isNot(AsmToken::LParen)) {
1363 // Unless we have a segment register, treat this as an immediate.
1365 return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
1366 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
1374 // If we reached here, then we just ate the ( of the memory operand. Process
1375 // the rest of the memory operand.
1376 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
1379 if (getLexer().is(AsmToken::Percent)) {
1380 SMLoc StartLoc, EndLoc;
1381 if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0;
1382 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
1383 Error(StartLoc, "eiz and riz can only be used as index registers",
1384 SMRange(StartLoc, EndLoc));
1389 if (getLexer().is(AsmToken::Comma)) {
1390 Parser.Lex(); // Eat the comma.
1391 IndexLoc = Parser.getTok().getLoc();
1393 // Following the comma we should have either an index register, or a scale
1394 // value. We don't support the later form, but we want to parse it
1397 // Not that even though it would be completely consistent to support syntax
1398 // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
1399 if (getLexer().is(AsmToken::Percent)) {
1401 if (ParseRegister(IndexReg, L, L)) return 0;
1403 if (getLexer().isNot(AsmToken::RParen)) {
1404 // Parse the scale amount:
1405 // ::= ',' [scale-expression]
1406 if (getLexer().isNot(AsmToken::Comma)) {
1407 Error(Parser.getTok().getLoc(),
1408 "expected comma in scale expression");
1411 Parser.Lex(); // Eat the comma.
1413 if (getLexer().isNot(AsmToken::RParen)) {
1414 SMLoc Loc = Parser.getTok().getLoc();
1417 if (getParser().parseAbsoluteExpression(ScaleVal)){
1418 Error(Loc, "expected scale expression");
1422 // Validate the scale amount.
1423 if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
1424 Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
1427 Scale = (unsigned)ScaleVal;
1430 } else if (getLexer().isNot(AsmToken::RParen)) {
1431 // A scale amount without an index is ignored.
1433 SMLoc Loc = Parser.getTok().getLoc();
1436 if (getParser().parseAbsoluteExpression(Value))
1440 Warning(Loc, "scale factor without index register is ignored");
1445 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
1446 if (getLexer().isNot(AsmToken::RParen)) {
1447 Error(Parser.getTok().getLoc(), "unexpected token in memory operand");
1450 SMLoc MemEnd = Parser.getTok().getEndLoc();
1451 Parser.Lex(); // Eat the ')'.
1453 // If we have both a base register and an index register make sure they are
1454 // both 64-bit or 32-bit registers.
1455 // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
1456 if (BaseReg != 0 && IndexReg != 0) {
1457 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1458 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1459 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
1460 IndexReg != X86::RIZ) {
1461 Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
1464 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1465 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1466 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
1467 IndexReg != X86::EIZ){
1468 Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
1473 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
1478 ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
1479 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1481 StringRef PatchedName = Name;
1483 // FIXME: Hack to recognize setneb as setne.
1484 if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
1485 PatchedName != "setb" && PatchedName != "setnb")
1486 PatchedName = PatchedName.substr(0, Name.size()-1);
1488 // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
1489 const MCExpr *ExtraImmOp = 0;
1490 if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
1491 (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
1492 PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
1493 bool IsVCMP = PatchedName[0] == 'v';
1494 unsigned SSECCIdx = IsVCMP ? 4 : 3;
1495 unsigned SSEComparisonCode = StringSwitch<unsigned>(
1496 PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
1500 .Case("unord", 0x03)
1505 /* AVX only from here */
1506 .Case("eq_uq", 0x08)
1509 .Case("false", 0x0B)
1510 .Case("neq_oq", 0x0C)
1514 .Case("eq_os", 0x10)
1515 .Case("lt_oq", 0x11)
1516 .Case("le_oq", 0x12)
1517 .Case("unord_s", 0x13)
1518 .Case("neq_us", 0x14)
1519 .Case("nlt_uq", 0x15)
1520 .Case("nle_uq", 0x16)
1521 .Case("ord_s", 0x17)
1522 .Case("eq_us", 0x18)
1523 .Case("nge_uq", 0x19)
1524 .Case("ngt_uq", 0x1A)
1525 .Case("false_os", 0x1B)
1526 .Case("neq_os", 0x1C)
1527 .Case("ge_oq", 0x1D)
1528 .Case("gt_oq", 0x1E)
1529 .Case("true_us", 0x1F)
1531 if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) {
1532 ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
1533 getParser().getContext());
1534 if (PatchedName.endswith("ss")) {
1535 PatchedName = IsVCMP ? "vcmpss" : "cmpss";
1536 } else if (PatchedName.endswith("sd")) {
1537 PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
1538 } else if (PatchedName.endswith("ps")) {
1539 PatchedName = IsVCMP ? "vcmpps" : "cmpps";
1541 assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
1542 PatchedName = IsVCMP ? "vcmppd" : "cmppd";
1547 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
1549 if (ExtraImmOp && !isParsingIntelSyntax())
1550 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1552 // Determine whether this is an instruction prefix.
1554 Name == "lock" || Name == "rep" ||
1555 Name == "repe" || Name == "repz" ||
1556 Name == "repne" || Name == "repnz" ||
1557 Name == "rex64" || Name == "data16";
1560 // This does the actual operand parsing. Don't parse any more if we have a
1561 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
1562 // just want to parse the "lock" as the first instruction and the "incl" as
1564 if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
1566 // Parse '*' modifier.
1567 if (getLexer().is(AsmToken::Star)) {
1568 SMLoc Loc = Parser.getTok().getLoc();
1569 Operands.push_back(X86Operand::CreateToken("*", Loc));
1570 Parser.Lex(); // Eat the star.
1573 // Read the first operand.
1574 if (X86Operand *Op = ParseOperand())
1575 Operands.push_back(Op);
1577 Parser.eatToEndOfStatement();
1581 while (getLexer().is(AsmToken::Comma)) {
1582 Parser.Lex(); // Eat the comma.
1584 // Parse and remember the operand.
1585 if (X86Operand *Op = ParseOperand())
1586 Operands.push_back(Op);
1588 Parser.eatToEndOfStatement();
1593 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1594 SMLoc Loc = getLexer().getLoc();
1595 Parser.eatToEndOfStatement();
1596 return Error(Loc, "unexpected token in argument list");
1600 if (getLexer().is(AsmToken::EndOfStatement))
1601 Parser.Lex(); // Consume the EndOfStatement
1602 else if (isPrefix && getLexer().is(AsmToken::Slash))
1603 Parser.Lex(); // Consume the prefix separator Slash
1605 if (ExtraImmOp && isParsingIntelSyntax())
1606 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
1608 // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
1609 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
1610 // documented form in various unofficial manuals, so a lot of code uses it.
1611 if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
1612 Operands.size() == 3) {
1613 X86Operand &Op = *(X86Operand*)Operands.back();
1614 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1615 isa<MCConstantExpr>(Op.Mem.Disp) &&
1616 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1617 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1618 SMLoc Loc = Op.getEndLoc();
1619 Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1623 // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
1624 if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
1625 Operands.size() == 3) {
1626 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1627 if (Op.isMem() && Op.Mem.SegReg == 0 &&
1628 isa<MCConstantExpr>(Op.Mem.Disp) &&
1629 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
1630 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
1631 SMLoc Loc = Op.getEndLoc();
1632 Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
1636 // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]"
1637 if (Name.startswith("ins") && Operands.size() == 3 &&
1638 (Name == "insb" || Name == "insw" || Name == "insl")) {
1639 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1640 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1641 if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) {
1642 Operands.pop_back();
1643 Operands.pop_back();
1649 // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]"
1650 if (Name.startswith("outs") && Operands.size() == 3 &&
1651 (Name == "outsb" || Name == "outsw" || Name == "outsl")) {
1652 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1653 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1654 if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) {
1655 Operands.pop_back();
1656 Operands.pop_back();
1662 // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]"
1663 if (Name.startswith("movs") && Operands.size() == 3 &&
1664 (Name == "movsb" || Name == "movsw" || Name == "movsl" ||
1665 (is64BitMode() && Name == "movsq"))) {
1666 X86Operand &Op = *(X86Operand*)Operands.begin()[1];
1667 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
1668 if (isSrcOp(Op) && isDstOp(Op2)) {
1669 Operands.pop_back();
1670 Operands.pop_back();
1675 // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]"
1676 if (Name.startswith("lods") && Operands.size() == 3 &&
1677 (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
1678 Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) {
1679 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1680 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1681 if (isSrcOp(*Op1) && Op2->isReg()) {
1683 unsigned reg = Op2->getReg();
1684 bool isLods = Name == "lods";
1685 if (reg == X86::AL && (isLods || Name == "lodsb"))
1687 else if (reg == X86::AX && (isLods || Name == "lodsw"))
1689 else if (reg == X86::EAX && (isLods || Name == "lodsl"))
1691 else if (reg == X86::RAX && (isLods || Name == "lodsq"))
1696 Operands.pop_back();
1697 Operands.pop_back();
1701 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1705 // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]"
1706 if (Name.startswith("stos") && Operands.size() == 3 &&
1707 (Name == "stos" || Name == "stosb" || Name == "stosw" ||
1708 Name == "stosl" || (is64BitMode() && Name == "stosq"))) {
1709 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1710 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
1711 if (isDstOp(*Op2) && Op1->isReg()) {
1713 unsigned reg = Op1->getReg();
1714 bool isStos = Name == "stos";
1715 if (reg == X86::AL && (isStos || Name == "stosb"))
1717 else if (reg == X86::AX && (isStos || Name == "stosw"))
1719 else if (reg == X86::EAX && (isStos || Name == "stosl"))
1721 else if (reg == X86::RAX && (isStos || Name == "stosq"))
1726 Operands.pop_back();
1727 Operands.pop_back();
1731 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
1736 // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
1738 if ((Name.startswith("shr") || Name.startswith("sar") ||
1739 Name.startswith("shl") || Name.startswith("sal") ||
1740 Name.startswith("rcl") || Name.startswith("rcr") ||
1741 Name.startswith("rol") || Name.startswith("ror")) &&
1742 Operands.size() == 3) {
1743 if (isParsingIntelSyntax()) {
1745 X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
1746 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1747 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1749 Operands.pop_back();
1752 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1753 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1754 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
1756 Operands.erase(Operands.begin() + 1);
1761 // Transforms "int $3" into "int3" as a size optimization. We can't write an
1762 // instalias with an immediate operand yet.
1763 if (Name == "int" && Operands.size() == 2) {
1764 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
1765 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
1766 cast<MCConstantExpr>(Op1->getImm())->getValue() == 3) {
1768 Operands.erase(Operands.begin() + 1);
1769 static_cast<X86Operand*>(Operands[0])->setTokenValue("int3");
1776 static bool convertToSExti8(MCInst &Inst, unsigned Opcode, unsigned Reg,
1779 TmpInst.setOpcode(Opcode);
1781 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1782 TmpInst.addOperand(MCOperand::CreateReg(Reg));
1783 TmpInst.addOperand(Inst.getOperand(0));
1788 static bool convert16i16to16ri8(MCInst &Inst, unsigned Opcode,
1789 bool isCmp = false) {
1790 if (!Inst.getOperand(0).isImm() ||
1791 !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
1794 return convertToSExti8(Inst, Opcode, X86::AX, isCmp);
1797 static bool convert32i32to32ri8(MCInst &Inst, unsigned Opcode,
1798 bool isCmp = false) {
1799 if (!Inst.getOperand(0).isImm() ||
1800 !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
1803 return convertToSExti8(Inst, Opcode, X86::EAX, isCmp);
1806 static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
1807 bool isCmp = false) {
1808 if (!Inst.getOperand(0).isImm() ||
1809 !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
1812 return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
1816 processInstruction(MCInst &Inst,
1817 const SmallVectorImpl<MCParsedAsmOperand*> &Ops) {
1818 switch (Inst.getOpcode()) {
1819 default: return false;
1820 case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8);
1821 case X86::AND32i32: return convert32i32to32ri8(Inst, X86::AND32ri8);
1822 case X86::AND64i32: return convert64i32to64ri8(Inst, X86::AND64ri8);
1823 case X86::XOR16i16: return convert16i16to16ri8(Inst, X86::XOR16ri8);
1824 case X86::XOR32i32: return convert32i32to32ri8(Inst, X86::XOR32ri8);
1825 case X86::XOR64i32: return convert64i32to64ri8(Inst, X86::XOR64ri8);
1826 case X86::OR16i16: return convert16i16to16ri8(Inst, X86::OR16ri8);
1827 case X86::OR32i32: return convert32i32to32ri8(Inst, X86::OR32ri8);
1828 case X86::OR64i32: return convert64i32to64ri8(Inst, X86::OR64ri8);
1829 case X86::CMP16i16: return convert16i16to16ri8(Inst, X86::CMP16ri8, true);
1830 case X86::CMP32i32: return convert32i32to32ri8(Inst, X86::CMP32ri8, true);
1831 case X86::CMP64i32: return convert64i32to64ri8(Inst, X86::CMP64ri8, true);
1832 case X86::ADD16i16: return convert16i16to16ri8(Inst, X86::ADD16ri8);
1833 case X86::ADD32i32: return convert32i32to32ri8(Inst, X86::ADD32ri8);
1834 case X86::ADD64i32: return convert64i32to64ri8(Inst, X86::ADD64ri8);
1835 case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8);
1836 case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8);
1837 case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8);
1838 case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8);
1839 case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8);
1840 case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8);
1841 case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8);
1842 case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8);
1843 case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8);
1847 static const char *getSubtargetFeatureName(unsigned Val);
1849 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1850 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1851 MCStreamer &Out, unsigned &ErrorInfo,
1852 bool MatchingInlineAsm) {
1853 assert(!Operands.empty() && "Unexpect empty operand list!");
1854 X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
1855 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
1856 ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>();
1858 // First, handle aliases that expand to multiple instructions.
1859 // FIXME: This should be replaced with a real .td file alias mechanism.
1860 // Also, MatchInstructionImpl should actually *do* the EmitInstruction
1862 if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
1863 Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
1864 Op->getToken() == "finit" || Op->getToken() == "fsave" ||
1865 Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
1867 Inst.setOpcode(X86::WAIT);
1869 if (!MatchingInlineAsm)
1870 Out.EmitInstruction(Inst);
1873 StringSwitch<const char*>(Op->getToken())
1874 .Case("finit", "fninit")
1875 .Case("fsave", "fnsave")
1876 .Case("fstcw", "fnstcw")
1877 .Case("fstcww", "fnstcw")
1878 .Case("fstenv", "fnstenv")
1879 .Case("fstsw", "fnstsw")
1880 .Case("fstsww", "fnstsw")
1881 .Case("fclex", "fnclex")
1883 assert(Repl && "Unknown wait-prefixed instruction");
1885 Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
1888 bool WasOriginallyInvalidOperand = false;
1891 // First, try a direct match.
1892 switch (MatchInstructionImpl(Operands, Inst,
1893 ErrorInfo, MatchingInlineAsm,
1894 isParsingIntelSyntax())) {
1897 // Some instructions need post-processing to, for example, tweak which
1898 // encoding is selected. Loop on it while changes happen so the
1899 // individual transformations can chain off each other.
1900 if (!MatchingInlineAsm)
1901 while (processInstruction(Inst, Operands))
1905 if (!MatchingInlineAsm)
1906 Out.EmitInstruction(Inst);
1907 Opcode = Inst.getOpcode();
1909 case Match_MissingFeature: {
1910 assert(ErrorInfo && "Unknown missing feature!");
1911 // Special case the error message for the very common case where only
1912 // a single subtarget feature is missing.
1913 std::string Msg = "instruction requires:";
1915 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
1916 if (ErrorInfo & Mask) {
1918 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
1922 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
1924 case Match_InvalidOperand:
1925 WasOriginallyInvalidOperand = true;
1927 case Match_MnemonicFail:
1931 // FIXME: Ideally, we would only attempt suffix matches for things which are
1932 // valid prefixes, and we could just infer the right unambiguous
1933 // type. However, that requires substantially more matcher support than the
1936 // Change the operand to point to a temporary token.
1937 StringRef Base = Op->getToken();
1938 SmallString<16> Tmp;
1941 Op->setTokenValue(Tmp.str());
1943 // If this instruction starts with an 'f', then it is a floating point stack
1944 // instruction. These come in up to three forms for 32-bit, 64-bit, and
1945 // 80-bit floating point, which use the suffixes s,l,t respectively.
1947 // Otherwise, we assume that this may be an integer instruction, which comes
1948 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
1949 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
1951 // Check for the various suffix matches.
1952 Tmp[Base.size()] = Suffixes[0];
1953 unsigned ErrorInfoIgnore;
1954 unsigned ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
1955 unsigned Match1, Match2, Match3, Match4;
1957 Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1958 isParsingIntelSyntax());
1959 // If this returned as a missing feature failure, remember that.
1960 if (Match1 == Match_MissingFeature)
1961 ErrorInfoMissingFeature = ErrorInfoIgnore;
1962 Tmp[Base.size()] = Suffixes[1];
1963 Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1964 isParsingIntelSyntax());
1965 // If this returned as a missing feature failure, remember that.
1966 if (Match2 == Match_MissingFeature)
1967 ErrorInfoMissingFeature = ErrorInfoIgnore;
1968 Tmp[Base.size()] = Suffixes[2];
1969 Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1970 isParsingIntelSyntax());
1971 // If this returned as a missing feature failure, remember that.
1972 if (Match3 == Match_MissingFeature)
1973 ErrorInfoMissingFeature = ErrorInfoIgnore;
1974 Tmp[Base.size()] = Suffixes[3];
1975 Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
1976 isParsingIntelSyntax());
1977 // If this returned as a missing feature failure, remember that.
1978 if (Match4 == Match_MissingFeature)
1979 ErrorInfoMissingFeature = ErrorInfoIgnore;
1981 // Restore the old token.
1982 Op->setTokenValue(Base);
1984 // If exactly one matched, then we treat that as a successful match (and the
1985 // instruction will already have been filled in correctly, since the failing
1986 // matches won't have modified it).
1987 unsigned NumSuccessfulMatches =
1988 (Match1 == Match_Success) + (Match2 == Match_Success) +
1989 (Match3 == Match_Success) + (Match4 == Match_Success);
1990 if (NumSuccessfulMatches == 1) {
1992 if (!MatchingInlineAsm)
1993 Out.EmitInstruction(Inst);
1994 Opcode = Inst.getOpcode();
1998 // Otherwise, the match failed, try to produce a decent error message.
2000 // If we had multiple suffix matches, then identify this as an ambiguous
2002 if (NumSuccessfulMatches > 1) {
2004 unsigned NumMatches = 0;
2005 if (Match1 == Match_Success) MatchChars[NumMatches++] = Suffixes[0];
2006 if (Match2 == Match_Success) MatchChars[NumMatches++] = Suffixes[1];
2007 if (Match3 == Match_Success) MatchChars[NumMatches++] = Suffixes[2];
2008 if (Match4 == Match_Success) MatchChars[NumMatches++] = Suffixes[3];
2010 SmallString<126> Msg;
2011 raw_svector_ostream OS(Msg);
2012 OS << "ambiguous instructions require an explicit suffix (could be ";
2013 for (unsigned i = 0; i != NumMatches; ++i) {
2016 if (i + 1 == NumMatches)
2018 OS << "'" << Base << MatchChars[i] << "'";
2021 Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm);
2025 // Okay, we know that none of the variants matched successfully.
2027 // If all of the instructions reported an invalid mnemonic, then the original
2028 // mnemonic was invalid.
2029 if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
2030 (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
2031 if (!WasOriginallyInvalidOperand) {
2032 ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges :
2034 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
2035 Ranges, MatchingInlineAsm);
2038 // Recover location info for the operand if we know which was the problem.
2039 if (ErrorInfo != ~0U) {
2040 if (ErrorInfo >= Operands.size())
2041 return Error(IDLoc, "too few operands for instruction",
2042 EmptyRanges, MatchingInlineAsm);
2044 X86Operand *Operand = (X86Operand*)Operands[ErrorInfo];
2045 if (Operand->getStartLoc().isValid()) {
2046 SMRange OperandRange = Operand->getLocRange();
2047 return Error(Operand->getStartLoc(), "invalid operand for instruction",
2048 OperandRange, MatchingInlineAsm);
2052 return Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2056 // If one instruction matched with a missing feature, report this as a
2058 if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) +
2059 (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){
2060 std::string Msg = "instruction requires:";
2062 for (unsigned i = 0; i < (sizeof(ErrorInfoMissingFeature)*8-1); ++i) {
2063 if (ErrorInfoMissingFeature & Mask) {
2065 Msg += getSubtargetFeatureName(ErrorInfoMissingFeature & Mask);
2069 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm);
2072 // If one instruction matched with an invalid operand, report this as an
2074 if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) +
2075 (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){
2076 Error(IDLoc, "invalid operand for instruction", EmptyRanges,
2081 // If all of these were an outright failure, report it in a useless way.
2082 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
2083 EmptyRanges, MatchingInlineAsm);
2088 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
2089 StringRef IDVal = DirectiveID.getIdentifier();
2090 if (IDVal == ".word")
2091 return ParseDirectiveWord(2, DirectiveID.getLoc());
2092 else if (IDVal.startswith(".code"))
2093 return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
2094 else if (IDVal.startswith(".att_syntax")) {
2095 getParser().setAssemblerDialect(0);
2097 } else if (IDVal.startswith(".intel_syntax")) {
2098 getParser().setAssemblerDialect(1);
2099 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2100 if(Parser.getTok().getString() == "noprefix") {
2101 // FIXME : Handle noprefix
2111 /// ParseDirectiveWord
2112 /// ::= .word [ expression (, expression)* ]
2113 bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2114 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2116 const MCExpr *Value;
2117 if (getParser().parseExpression(Value))
2120 getParser().getStreamer().EmitValue(Value, Size);
2122 if (getLexer().is(AsmToken::EndOfStatement))
2125 // FIXME: Improve diagnostic.
2126 if (getLexer().isNot(AsmToken::Comma))
2127 return Error(L, "unexpected token in directive");
2136 /// ParseDirectiveCode
2137 /// ::= .code32 | .code64
2138 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
2139 if (IDVal == ".code32") {
2141 if (is64BitMode()) {
2143 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
2145 } else if (IDVal == ".code64") {
2147 if (!is64BitMode()) {
2149 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
2152 return Error(L, "unexpected directive " + IDVal);
2158 // Force static initialization.
2159 extern "C" void LLVMInitializeX86AsmParser() {
2160 RegisterMCAsmParser<X86AsmParser> X(TheX86_32Target);
2161 RegisterMCAsmParser<X86AsmParser> Y(TheX86_64Target);
2164 #define GET_REGISTER_MATCHER
2165 #define GET_MATCHER_IMPLEMENTATION
2166 #define GET_SUBTARGET_FEATURE_NAME
2167 #include "X86GenAsmMatcher.inc"