X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FAsmParser%2FX86AsmParser.cpp;h=5f3498c7c3ce594160c15c2a1bc24d3e70737953;hb=674140fc3e47271f39a0e25cd41d7afa507b8f25;hp=5f6c110cb83355c00ce8482c89993edb87555240;hpb=4bef961baf9660f1ac5a5b80378631cd942636b2;p=oota-llvm.git diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index 5f6c110cb83..5f3498c7c3c 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -9,10 +9,12 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Twine.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCParser/MCAsmLexer.h" @@ -32,17 +34,513 @@ using namespace llvm; namespace { struct X86Operand; +static const char OpPrecedence[] = { + 0, // IC_OR + 1, // IC_AND + 2, // IC_PLUS + 2, // IC_MINUS + 3, // IC_MULTIPLY + 3, // IC_DIVIDE + 4, // IC_RPAREN + 5, // IC_LPAREN + 0, // IC_IMM + 0 // IC_REGISTER +}; + class X86AsmParser : public MCTargetAsmParser { MCSubtargetInfo &STI; MCAsmParser &Parser; ParseInstructionInfo *InstInfo; private: + SMLoc consumeToken() { + SMLoc Result = Parser.getTok().getLoc(); + Parser.Lex(); + return Result; + } + + enum InfixCalculatorTok { + IC_OR = 0, + IC_AND, + IC_PLUS, + IC_MINUS, + IC_MULTIPLY, + IC_DIVIDE, + IC_RPAREN, + IC_LPAREN, + IC_IMM, + IC_REGISTER + }; + + class InfixCalculator { + typedef std::pair< InfixCalculatorTok, int64_t > ICToken; + SmallVector InfixOperatorStack; + SmallVector PostfixStack; + + public: + int64_t popOperand() { + assert (!PostfixStack.empty() && "Poped an empty stack!"); + ICToken Op = PostfixStack.pop_back_val(); + assert ((Op.first == IC_IMM || Op.first == IC_REGISTER) + && "Expected and immediate or register!"); + return Op.second; + } + void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) { + assert ((Op == IC_IMM || Op == IC_REGISTER) && + "Unexpected operand!"); + PostfixStack.push_back(std::make_pair(Op, Val)); + } + + void popOperator() { InfixOperatorStack.pop_back(); } + void pushOperator(InfixCalculatorTok Op) { + // Push the new operator if the stack is empty. + if (InfixOperatorStack.empty()) { + InfixOperatorStack.push_back(Op); + return; + } + + // Push the new operator if it has a higher precedence than the operator + // on the top of the stack or the operator on the top of the stack is a + // left parentheses. + unsigned Idx = InfixOperatorStack.size() - 1; + InfixCalculatorTok StackOp = InfixOperatorStack[Idx]; + if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) { + InfixOperatorStack.push_back(Op); + return; + } + + // The operator on the top of the stack has higher precedence than the + // new operator. + unsigned ParenCount = 0; + while (1) { + // Nothing to process. + if (InfixOperatorStack.empty()) + break; + + Idx = InfixOperatorStack.size() - 1; + StackOp = InfixOperatorStack[Idx]; + if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount)) + break; + + // If we have an even parentheses count and we see a left parentheses, + // then stop processing. + if (!ParenCount && StackOp == IC_LPAREN) + break; + + if (StackOp == IC_RPAREN) { + ++ParenCount; + InfixOperatorStack.pop_back(); + } else if (StackOp == IC_LPAREN) { + --ParenCount; + InfixOperatorStack.pop_back(); + } else { + InfixOperatorStack.pop_back(); + PostfixStack.push_back(std::make_pair(StackOp, 0)); + } + } + // Push the new operator. + InfixOperatorStack.push_back(Op); + } + int64_t execute() { + // Push any remaining operators onto the postfix stack. + while (!InfixOperatorStack.empty()) { + InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val(); + if (StackOp != IC_LPAREN && StackOp != IC_RPAREN) + PostfixStack.push_back(std::make_pair(StackOp, 0)); + } + + if (PostfixStack.empty()) + return 0; + + SmallVector OperandStack; + for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) { + ICToken Op = PostfixStack[i]; + if (Op.first == IC_IMM || Op.first == IC_REGISTER) { + OperandStack.push_back(Op); + } else { + assert (OperandStack.size() > 1 && "Too few operands."); + int64_t Val; + ICToken Op2 = OperandStack.pop_back_val(); + ICToken Op1 = OperandStack.pop_back_val(); + switch (Op.first) { + default: + report_fatal_error("Unexpected operator!"); + break; + case IC_PLUS: + Val = Op1.second + Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_MINUS: + Val = Op1.second - Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_MULTIPLY: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Multiply operation with an immediate and a register!"); + Val = Op1.second * Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_DIVIDE: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Divide operation with an immediate and a register!"); + assert (Op2.second != 0 && "Division by zero!"); + Val = Op1.second / Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_OR: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Or operation with an immediate and a register!"); + Val = Op1.second | Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_AND: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "And operation with an immediate and a register!"); + Val = Op1.second & Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + } + } + } + assert (OperandStack.size() == 1 && "Expected a single result."); + return OperandStack.pop_back_val().second; + } + }; + + enum IntelExprState { + IES_OR, + IES_AND, + IES_PLUS, + IES_MINUS, + IES_MULTIPLY, + IES_DIVIDE, + IES_LBRAC, + IES_RBRAC, + IES_LPAREN, + IES_RPAREN, + IES_REGISTER, + IES_INTEGER, + IES_IDENTIFIER, + IES_ERROR + }; + + class IntelExprStateMachine { + IntelExprState State, PrevState; + unsigned BaseReg, IndexReg, TmpReg, Scale; + int64_t Imm; + const MCExpr *Sym; + StringRef SymName; + bool StopOnLBrac, AddImmPrefix; + InfixCalculator IC; + InlineAsmIdentifierInfo Info; + public: + IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) : + State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0), + Scale(1), Imm(imm), Sym(0), StopOnLBrac(stoponlbrac), + AddImmPrefix(addimmprefix) { Info.clear(); } + + unsigned getBaseReg() { return BaseReg; } + unsigned getIndexReg() { return IndexReg; } + unsigned getScale() { return Scale; } + const MCExpr *getSym() { return Sym; } + StringRef getSymName() { return SymName; } + int64_t getImm() { return Imm + IC.execute(); } + bool isValidEndState() { + return State == IES_RBRAC || State == IES_INTEGER; + } + bool getStopOnLBrac() { return StopOnLBrac; } + bool getAddImmPrefix() { return AddImmPrefix; } + bool hadError() { return State == IES_ERROR; } + + InlineAsmIdentifierInfo &getIdentifierInfo() { + return Info; + } + + void onOr() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_OR; + IC.pushOperator(IC_OR); + break; + } + PrevState = CurrState; + } + void onAnd() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_AND; + IC.pushOperator(IC_AND); + break; + } + PrevState = CurrState; + } + void onPlus() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_PLUS; + IC.pushOperator(IC_PLUS); + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onMinus() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MULTIPLY: + case IES_DIVIDE: + case IES_LPAREN: + case IES_RPAREN: + case IES_LBRAC: + case IES_RBRAC: + case IES_INTEGER: + case IES_REGISTER: + State = IES_MINUS; + // Only push the minus operator if it is not a unary operator. + if (!(CurrState == IES_PLUS || CurrState == IES_MINUS || + CurrState == IES_MULTIPLY || CurrState == IES_DIVIDE || + CurrState == IES_LPAREN || CurrState == IES_LBRAC)) + IC.pushOperator(IC_MINUS); + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onRegister(unsigned Reg) { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_LPAREN: + State = IES_REGISTER; + TmpReg = Reg; + IC.pushOperand(IC_REGISTER); + break; + case IES_MULTIPLY: + // Index Register - Scale * Register + if (PrevState == IES_INTEGER) { + assert (!IndexReg && "IndexReg already set!"); + State = IES_REGISTER; + IndexReg = Reg; + // Get the scale and replace the 'Scale * Register' with '0'. + Scale = IC.popOperand(); + IC.pushOperand(IC_IMM); + IC.popOperator(); + } else { + State = IES_ERROR; + } + break; + } + PrevState = CurrState; + } + void onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName) { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + State = IES_INTEGER; + Sym = SymRef; + SymName = SymRefName; + IC.pushOperand(IC_IMM); + break; + } + } + void onInteger(int64_t TmpInt) { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + case IES_OR: + case IES_AND: + case IES_DIVIDE: + case IES_MULTIPLY: + case IES_LPAREN: + State = IES_INTEGER; + if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) { + // Index Register - Register * Scale + assert (!IndexReg && "IndexReg already set!"); + IndexReg = TmpReg; + Scale = TmpInt; + // Get the scale and replace the 'Register * Scale' with '0'. + IC.popOperator(); + } else if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_OR || PrevState == IES_AND || + PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || + PrevState == IES_LPAREN || PrevState == IES_LBRAC) && + CurrState == IES_MINUS) { + // Unary minus. No need to pop the minus operand because it was never + // pushed. + IC.pushOperand(IC_IMM, -TmpInt); // Push -Imm. + } else { + IC.pushOperand(IC_IMM, TmpInt); + } + break; + } + PrevState = CurrState; + } + void onStar() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_MULTIPLY; + IC.pushOperator(IC_MULTIPLY); + break; + } + } + void onDivide() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + State = IES_DIVIDE; + IC.pushOperator(IC_DIVIDE); + break; + } + } + void onLBrac() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_RBRAC: + State = IES_PLUS; + IC.pushOperator(IC_PLUS); + break; + } + } + void onRBrac() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_RBRAC; + if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) { + // If we already have a BaseReg, then assume this is the IndexReg with + // a scale of 1. + if (!BaseReg) { + BaseReg = TmpReg; + } else { + assert (!IndexReg && "BaseReg/IndexReg already set!"); + IndexReg = TmpReg; + Scale = 1; + } + } + break; + } + PrevState = CurrState; + } + void onLParen() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_PLUS: + case IES_MINUS: + case IES_OR: + case IES_AND: + case IES_MULTIPLY: + case IES_DIVIDE: + case IES_LPAREN: + // FIXME: We don't handle this type of unary minus, yet. + if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_OR || PrevState == IES_AND || + PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || + PrevState == IES_LPAREN || PrevState == IES_LBRAC) && + CurrState == IES_MINUS) { + State = IES_ERROR; + break; + } + State = IES_LPAREN; + IC.pushOperator(IC_LPAREN); + break; + } + PrevState = CurrState; + } + void onRParen() { + PrevState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_REGISTER: + case IES_RPAREN: + State = IES_RPAREN; + IC.pushOperator(IC_RPAREN); + break; + } + } + }; + MCAsmParser &getParser() const { return Parser; } MCAsmLexer &getLexer() const { return Parser.getLexer(); } bool Error(SMLoc L, const Twine &Msg, - ArrayRef Ranges = ArrayRef(), + ArrayRef Ranges = None, bool MatchingInlineAsm = false) { if (MatchingInlineAsm) return true; return Parser.Error(L, Msg, Ranges); @@ -53,17 +551,31 @@ private: return 0; } + X86Operand *DefaultMemSIOperand(SMLoc Loc); + X86Operand *DefaultMemDIOperand(SMLoc Loc); X86Operand *ParseOperand(); X86Operand *ParseATTOperand(); X86Operand *ParseIntelOperand(); - X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc); - X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind); - X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc); - X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size); + X86Operand *ParseIntelOffsetOfOperator(); + bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp); + X86Operand *ParseIntelOperator(unsigned OpKind); + X86Operand *ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size); + X86Operand *ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc, + unsigned Size); + bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End); + X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start, + int64_t ImmDisp, unsigned Size); + bool ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, + InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End); + X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); - bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp, - SmallString<64> &Err); + X86Operand *CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, + unsigned BaseReg, unsigned IndexReg, + unsigned Scale, SMLoc Start, SMLoc End, + unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info); bool ParseDirectiveWord(unsigned Size, SMLoc L); bool ParseDirectiveCode(StringRef IDVal, SMLoc L); @@ -76,6 +588,11 @@ private: MCStreamer &Out, unsigned &ErrorInfo, bool MatchingInlineAsm); + /// doSrcDstMatch - Returns true if operands are matching in their + /// word size (%si and %di, %esi and %edi, etc.). Order depends on + /// the parsing mode (Intel vs. AT&T). + bool doSrcDstMatch(X86Operand &Op1, X86Operand &Op2); + /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi) /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode. bool isSrcOp(X86Operand &Op); @@ -88,9 +605,25 @@ private: // FIXME: Can tablegen auto-generate this? return (STI.getFeatureBits() & X86::Mode64Bit) != 0; } - void SwitchMode() { - unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit)); + bool is32BitMode() const { + // FIXME: Can tablegen auto-generate this? + return (STI.getFeatureBits() & X86::Mode32Bit) != 0; + } + bool is16BitMode() const { + // FIXME: Can tablegen auto-generate this? + return (STI.getFeatureBits() & X86::Mode16Bit) != 0; + } + void SwitchMode(uint64_t mode) { + uint64_t oldMode = STI.getFeatureBits() & + (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit); + unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(oldMode | mode)); setAvailableFeatures(FB); + assert(mode == (STI.getFeatureBits() & + (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit))); + } + + bool isParsingIntelSyntax() { + return getParser().getAssemblerDialect(); } /// @name Auto-generated Matcher Functions @@ -102,8 +635,9 @@ private: /// } public: - X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) - : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) { + X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser, + const MCInstrInfo &MII) + : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) { // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); @@ -115,10 +649,6 @@ public: SmallVectorImpl &Operands); virtual bool ParseDirective(AsmToken DirectiveID); - - bool isParsingIntelSyntax() { - return getParser().getAssemblerDialect(); - } }; } // end anonymous namespace @@ -168,6 +698,8 @@ struct X86Operand : public MCParsedAsmOperand { SMLoc StartLoc, EndLoc; SMLoc OffsetOfLoc; + StringRef SymName; + void *OpDecl; bool AddressOf; struct TokOp { @@ -181,7 +713,6 @@ struct X86Operand : public MCParsedAsmOperand { struct ImmOp { const MCExpr *Val; - bool NeedAsmRewrite; }; struct MemOp { @@ -191,7 +722,6 @@ struct X86Operand : public MCParsedAsmOperand { unsigned IndexReg; unsigned Scale; unsigned Size; - bool NeedSizeDir; }; union { @@ -204,6 +734,9 @@ struct X86Operand : public MCParsedAsmOperand { X86Operand(KindTy K, SMLoc Start, SMLoc End) : Kind(K), StartLoc(Start), EndLoc(End) {} + StringRef getSymName() { return SymName; } + void *getOpDecl() { return OpDecl; } + /// getStartLoc - Get the location of the first token of this operand. SMLoc getStartLoc() const { return StartLoc; } /// getEndLoc - Get the location of the last token of this operand. @@ -236,11 +769,6 @@ struct X86Operand : public MCParsedAsmOperand { return Imm.Val; } - bool needAsmRewrite() const { - assert(Kind == Immediate && "Invalid access!"); - return Imm.NeedAsmRewrite; - } - const MCExpr *getMemDisp() const { assert(Kind == Memory && "Invalid access!"); return Mem.Disp; @@ -337,11 +865,6 @@ struct X86Operand : public MCParsedAsmOperand { return isImmSExti64i32Value(CE->getValue()); } - unsigned getMemSize() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Size; - } - bool isOffsetOf() const { return OffsetOfLoc.getPointer(); } @@ -350,11 +873,6 @@ struct X86Operand : public MCParsedAsmOperand { return AddressOf; } - bool needSizeDirective() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.NeedSizeDir; - } - bool isMem() const { return Kind == Memory; } bool isMem8() const { return Kind == Memory && (!Mem.Size || Mem.Size == 8); @@ -377,6 +895,9 @@ struct X86Operand : public MCParsedAsmOperand { bool isMem256() const { return Kind == Memory && (!Mem.Size || Mem.Size == 256); } + bool isMem512() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 512); + } bool isMemVX32() const { return Kind == Memory && (!Mem.Size || Mem.Size == 32) && @@ -394,14 +915,84 @@ struct X86Operand : public MCParsedAsmOperand { return Kind == Memory && (!Mem.Size || Mem.Size == 64) && getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; } + bool isMemVZ32() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 32) && + getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; + } + bool isMemVZ64() const { + return Kind == Memory && (!Mem.Size || Mem.Size == 64) && + getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; + } bool isAbsMem() const { return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && !getMemIndexReg() && getMemScale() == 1; } + bool isSrcIdx() const { + return !getMemIndexReg() && getMemScale() == 1 && + (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI || + getMemBaseReg() == X86::SI) && isa(getMemDisp()) && + cast(getMemDisp())->getValue() == 0; + } + bool isSrcIdx8() const { + return isMem8() && isSrcIdx(); + } + bool isSrcIdx16() const { + return isMem16() && isSrcIdx(); + } + bool isSrcIdx32() const { + return isMem32() && isSrcIdx(); + } + bool isSrcIdx64() const { + return isMem64() && isSrcIdx(); + } + + bool isDstIdx() const { + return !getMemIndexReg() && getMemScale() == 1 && + (getMemSegReg() == 0 || getMemSegReg() == X86::ES) && + (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI || + getMemBaseReg() == X86::DI) && isa(getMemDisp()) && + cast(getMemDisp())->getValue() == 0; + } + bool isDstIdx8() const { + return isMem8() && isDstIdx(); + } + bool isDstIdx16() const { + return isMem16() && isDstIdx(); + } + bool isDstIdx32() const { + return isMem32() && isDstIdx(); + } + bool isDstIdx64() const { + return isMem64() && isDstIdx(); + } + + bool isMemOffs8() const { + return Kind == Memory && !getMemBaseReg() && + !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8); + } + bool isMemOffs16() const { + return Kind == Memory && !getMemBaseReg() && + !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16); + } + bool isMemOffs32() const { + return Kind == Memory && !getMemBaseReg() && + !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32); + } + bool isMemOffs64() const { + return Kind == Memory && !getMemBaseReg() && + !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64); + } + bool isReg() const { return Kind == Register; } + bool isGR32orGR64() const { + return Kind == Register && + (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) || + X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg())); + } + void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediates when possible. if (const MCConstantExpr *CE = dyn_cast(Expr)) @@ -415,43 +1006,40 @@ struct X86Operand : public MCParsedAsmOperand { Inst.addOperand(MCOperand::CreateReg(getReg())); } - void addImmOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - addExpr(Inst, getImm()); + static unsigned getGR32FromGR64(unsigned RegNo) { + switch (RegNo) { + default: llvm_unreachable("Unexpected register"); + case X86::RAX: return X86::EAX; + case X86::RCX: return X86::ECX; + case X86::RDX: return X86::EDX; + case X86::RBX: return X86::EBX; + case X86::RBP: return X86::EBP; + case X86::RSP: return X86::ESP; + case X86::RSI: return X86::ESI; + case X86::RDI: return X86::EDI; + case X86::R8: return X86::R8D; + case X86::R9: return X86::R9D; + case X86::R10: return X86::R10D; + case X86::R11: return X86::R11D; + case X86::R12: return X86::R12D; + case X86::R13: return X86::R13D; + case X86::R14: return X86::R14D; + case X86::R15: return X86::R15D; + case X86::RIP: return X86::EIP; + } } - void addMem8Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem16Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem32Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem64Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem80Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem128Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMem256Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMemVX32Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMemVY32Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); - } - void addMemVX64Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); + void addGR32orGR64Operands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + unsigned RegNo = getReg(); + if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo)) + RegNo = getGR32FromGR64(RegNo); + Inst.addOperand(MCOperand::CreateReg(RegNo)); } - void addMemVY64Operands(MCInst &Inst, unsigned N) const { - addMemOperands(Inst, N); + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + addExpr(Inst, getImm()); } void addMemOperands(MCInst &Inst, unsigned N) const { @@ -472,6 +1060,26 @@ struct X86Operand : public MCParsedAsmOperand { Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); } + void addSrcIdxOperands(MCInst &Inst, unsigned N) const { + assert((N == 2) && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); + Inst.addOperand(MCOperand::CreateReg(getMemSegReg())); + } + void addDstIdxOperands(MCInst &Inst, unsigned N) const { + assert((N == 1) && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); + } + + void addMemOffsOperands(MCInst &Inst, unsigned N) const { + assert((N == 2) && "Invalid number of operands!"); + // Add as immediates when possible. + if (const MCConstantExpr *CE = dyn_cast(getMemDisp())) + Inst.addOperand(MCOperand::CreateImm(CE->getValue())); + else + Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); + Inst.addOperand(MCOperand::CreateReg(getMemSegReg())); + } + static X86Operand *CreateToken(StringRef Str, SMLoc Loc) { SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); X86Operand *Res = new X86Operand(Token, Loc, EndLoc); @@ -482,25 +1090,28 @@ struct X86Operand : public MCParsedAsmOperand { static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf = false, - SMLoc OffsetOfLoc = SMLoc()) { + SMLoc OffsetOfLoc = SMLoc(), + StringRef SymName = StringRef(), + void *OpDecl = 0) { X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc); Res->Reg.RegNo = RegNo; Res->AddressOf = AddressOf; Res->OffsetOfLoc = OffsetOfLoc; + Res->SymName = SymName; + Res->OpDecl = OpDecl; return Res; } - static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, - bool NeedRewrite = true){ + static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){ X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc); Res->Imm.Val = Val; - Res->Imm.NeedAsmRewrite = NeedRewrite; return Res; } /// Create an absolute memory operand. static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, bool NeedSizeDir = false) { + unsigned Size = 0, StringRef SymName = StringRef(), + void *OpDecl = 0) { X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); Res->Mem.SegReg = 0; Res->Mem.Disp = Disp; @@ -508,8 +1119,9 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = 0; Res->Mem.Scale = 1; Res->Mem.Size = Size; - Res->Mem.NeedSizeDir = NeedSizeDir; - Res->AddressOf = false; + Res->SymName = SymName; + Res->OpDecl = OpDecl; + Res->AddressOf = false; return Res; } @@ -517,7 +1129,9 @@ struct X86Operand : public MCParsedAsmOperand { static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, bool NeedSizeDir = false) { + unsigned Size = 0, + StringRef SymName = StringRef(), + void *OpDecl = 0) { // We should never just have a displacement, that should be parsed as an // absolute memory operand. assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); @@ -532,16 +1146,39 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = IndexReg; Res->Mem.Scale = Scale; Res->Mem.Size = Size; - Res->Mem.NeedSizeDir = NeedSizeDir; - Res->AddressOf = false; + Res->SymName = SymName; + Res->OpDecl = OpDecl; + Res->AddressOf = false; return Res; } }; } // end anonymous namespace. +bool X86AsmParser::doSrcDstMatch(X86Operand &Op1, X86Operand &Op2) +{ + // Return true and let a normal complaint about bogus operands happen. + if (!Op1.isMem() || !Op2.isMem()) + return true; + + // Actually these might be the other way round if Intel syntax is + // being used. It doesn't matter. + unsigned diReg = Op1.Mem.BaseReg; + unsigned siReg = Op2.Mem.BaseReg; + + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR16RegClassID].contains(diReg); + if (X86MCRegisterClasses[X86::GR32RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR32RegClassID].contains(diReg); + if (X86MCRegisterClasses[X86::GR64RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR64RegClassID].contains(diReg); + // Again, return true and let another error happen. + return true; +} + bool X86AsmParser::isSrcOp(X86Operand &Op) { - unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI; + unsigned basereg = + is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI); return (Op.isMem() && (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) && @@ -551,7 +1188,8 @@ bool X86AsmParser::isSrcOp(X86Operand &Op) { } bool X86AsmParser::isDstOp(X86Operand &Op) { - unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI; + unsigned basereg = + is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI); return Op.isMem() && (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) && @@ -587,7 +1225,7 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo, RegNo = MatchRegisterName(Tok.getString().lower()); if (!is64BitMode()) { - // FIXME: This should be done using Requires and + // FIXME: This should be done using Requires and // Requires so "eiz" usage in 64-bit instructions can be also // checked. // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a @@ -669,6 +1307,22 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo, return false; } +X86Operand *X86AsmParser::DefaultMemSIOperand(SMLoc Loc) { + unsigned basereg = + is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI); + const MCExpr *Disp = MCConstantExpr::Create(0, getContext()); + return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg, + /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0); +} + +X86Operand *X86AsmParser::DefaultMemDIOperand(SMLoc Loc) { + unsigned basereg = + is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI); + const MCExpr *Disp = MCConstantExpr::Create(0, getContext()); + return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg, + /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0); +} + X86Operand *X86AsmParser::ParseOperand() { if (isParsingIntelSyntax()) return ParseIntelOperand(); @@ -685,255 +1339,111 @@ static unsigned getIntelMemOperandSize(StringRef OpStr) { .Cases("XWORD", "xword", 80) .Cases("XMMWORD", "xmmword", 128) .Cases("YMMWORD", "ymmword", 256) + .Cases("ZMMWORD", "zmmword", 512) + .Cases("OPAQUE", "opaque", -1U) // needs to be non-zero, but doesn't matter .Default(0); return Size; } -enum IntelBracExprState { - IBES_START, - IBES_LBRAC, - IBES_RBRAC, - IBES_REGISTER, - IBES_REGISTER_STAR, - IBES_REGISTER_STAR_INTEGER, - IBES_INTEGER, - IBES_INTEGER_STAR, - IBES_INDEX_REGISTER, - IBES_IDENTIFIER, - IBES_DISP_EXPR, - IBES_MINUS, - IBES_ERROR -}; - -class IntelBracExprStateMachine { - IntelBracExprState State; - unsigned BaseReg, IndexReg, Scale; - int64_t Disp; - - unsigned TmpReg; - int64_t TmpInteger; - - bool isPlus; - -public: - IntelBracExprStateMachine(MCAsmParser &parser) : - State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(0), - TmpReg(0), TmpInteger(0), isPlus(true) {} - - unsigned getBaseReg() { return BaseReg; } - unsigned getIndexReg() { return IndexReg; } - unsigned getScale() { return Scale; } - int64_t getDisp() { return Disp; } - bool isValidEndState() { return State == IBES_RBRAC; } - - void onPlus() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_INTEGER: - State = IBES_START; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_START; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; - } - break; - case IBES_INDEX_REGISTER: - State = IBES_START; - break; - } - isPlus = true; - } - void onMinus() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_MINUS; - break; - case IBES_INTEGER: - State = IBES_START; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_START; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; - } - break; - case IBES_INDEX_REGISTER: - State = IBES_START; - break; - } - isPlus = false; - } - void onRegister(unsigned Reg) { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_REGISTER; - TmpReg = Reg; - break; - case IBES_INTEGER_STAR: - assert (!IndexReg && "IndexReg already set!"); - State = IBES_INDEX_REGISTER; - IndexReg = Reg; - Scale = TmpInteger; - break; - } - } - void onDispExpr() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_DISP_EXPR; - break; - } - } - void onInteger(int64_t TmpInt) { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_START: - State = IBES_INTEGER; - TmpInteger = TmpInt; - break; - case IBES_MINUS: - State = IBES_INTEGER; - TmpInteger = TmpInt; - break; - case IBES_REGISTER_STAR: - assert (!IndexReg && "IndexReg already set!"); - State = IBES_INDEX_REGISTER; - IndexReg = TmpReg; - Scale = TmpInt; - break; +X86Operand * +X86AsmParser::CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, + unsigned BaseReg, unsigned IndexReg, + unsigned Scale, SMLoc Start, SMLoc End, + unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info){ + if (isa(Disp)) { + // If this is not a VarDecl then assume it is a FuncDecl or some other label + // reference. We need an 'r' constraint here, so we need to create register + // operand to ensure proper matching. Just pick a GPR based on the size of + // a pointer. + if (!Info.IsVarDecl) { + unsigned RegNo = + is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX); + return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true, + SMLoc(), Identifier, Info.OpDecl); } - } - void onStar() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_INTEGER: - State = IBES_INTEGER_STAR; - break; - case IBES_REGISTER: - State = IBES_REGISTER_STAR; - break; + if (!Size) { + Size = Info.Type * 8; // Size is in terms of bits in this context. + if (Size) + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, Start, + /*Len=*/0, Size)); } } - void onLBrac() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_RBRAC: - State = IBES_START; - isPlus = true; - break; - } - } - void onRBrac() { - switch (State) { - default: - State = IBES_ERROR; - break; - case IBES_DISP_EXPR: - State = IBES_RBRAC; - break; - case IBES_INTEGER: - State = IBES_RBRAC; - if (isPlus) - Disp += TmpInteger; - else - Disp -= TmpInteger; - break; - case IBES_REGISTER: - State = IBES_RBRAC; - // If we already have a BaseReg, then assume this is the IndexReg with a - // scale of 1. - if (!BaseReg) { - BaseReg = TmpReg; - } else { - assert (!IndexReg && "BaseReg/IndexReg already set!"); - IndexReg = TmpReg; - Scale = 1; + + // When parsing inline assembly we set the base register to a non-zero value + // if we don't know the actual value at this time. This is necessary to + // get the matching correct in some cases. + BaseReg = BaseReg ? BaseReg : 1; + return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size, Identifier, Info.OpDecl); +} + +static void +RewriteIntelBracExpression(SmallVectorImpl *AsmRewrites, + StringRef SymName, int64_t ImmDisp, + int64_t FinalImmDisp, SMLoc &BracLoc, + SMLoc &StartInBrac, SMLoc &End) { + // Remove the '[' and ']' from the IR string. + AsmRewrites->push_back(AsmRewrite(AOK_Skip, BracLoc, 1)); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, End, 1)); + + // If ImmDisp is non-zero, then we parsed a displacement before the + // bracketed expression (i.e., ImmDisp [ BaseReg + Scale*IndexReg + Disp]) + // If ImmDisp doesn't match the displacement computed by the state machine + // then we have an additional displacement in the bracketed expression. + if (ImmDisp != FinalImmDisp) { + if (ImmDisp) { + // We have an immediate displacement before the bracketed expression. + // Adjust this to match the final immediate displacement. + bool Found = false; + for (SmallVectorImpl::iterator I = AsmRewrites->begin(), + E = AsmRewrites->end(); I != E; ++I) { + if ((*I).Loc.getPointer() > BracLoc.getPointer()) + continue; + if ((*I).Kind == AOK_ImmPrefix || (*I).Kind == AOK_Imm) { + assert (!Found && "ImmDisp already rewritten."); + (*I).Kind = AOK_Imm; + (*I).Len = BracLoc.getPointer() - (*I).Loc.getPointer(); + (*I).Val = FinalImmDisp; + Found = true; + break; + } } - break; - case IBES_INDEX_REGISTER: - State = IBES_RBRAC; - break; + assert (Found && "Unable to rewrite ImmDisp."); + (void)Found; + } else { + // We have a symbolic and an immediate displacement, but no displacement + // before the bracketed expression. Put the immediate displacement + // before the bracketed expression. + AsmRewrites->push_back(AsmRewrite(AOK_Imm, BracLoc, 0, FinalImmDisp)); } } -}; + // Remove all the ImmPrefix rewrites within the brackets. + for (SmallVectorImpl::iterator I = AsmRewrites->begin(), + E = AsmRewrites->end(); I != E; ++I) { + if ((*I).Loc.getPointer() < StartInBrac.getPointer()) + continue; + if ((*I).Kind == AOK_ImmPrefix) + (*I).Kind = AOK_Delete; + } + const char *SymLocPtr = SymName.data(); + // Skip everything before the symbol. + if (unsigned Len = SymLocPtr - StartInBrac.getPointer()) { + assert(Len > 0 && "Expected a non-negative length."); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, StartInBrac, Len)); + } + // Skip everything after the symbol. + if (unsigned Len = End.getPointer() - (SymLocPtr + SymName.size())) { + SMLoc Loc = SMLoc::getFromPointer(SymLocPtr + SymName.size()); + assert(Len > 0 && "Expected a non-negative length."); + AsmRewrites->push_back(AsmRewrite(AOK_Skip, Loc, Len)); + } +} -X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, - unsigned Size) { +bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { const AsmToken &Tok = Parser.getTok(); - SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc(); - - // Eat '[' - if (getLexer().isNot(AsmToken::LBrac)) - return ErrorOperand(Start, "Expected '[' token!"); - Parser.Lex(); - - unsigned TmpReg = 0; - - // Try to handle '[' 'symbol' ']' - if (getLexer().is(AsmToken::Identifier)) { - if (ParseRegister(TmpReg, Start, End)) { - const MCExpr *Disp; - if (getParser().parseExpression(Disp, End)) - return 0; - - if (getLexer().isNot(AsmToken::RBrac)) - return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!"); - // Adjust the EndLoc due to the ']'. - End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1); - Parser.Lex(); - return X86Operand::CreateMem(Disp, Start, End, Size); - } - } - // Parse [ BaseReg + Scale*IndexReg + Disp ]. bool Done = false; - IntelBracExprStateMachine SM(Parser); - - // If we parsed a register, then the end loc has already been set and - // the identifier has already been lexed. We also need to update the - // state. - if (TmpReg) - SM.onRegister(TmpReg); - - const MCExpr *Disp = 0; while (!Done) { bool UpdateLocLex = true; @@ -941,6 +1451,10 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, // identifier. Don't try an parse it as a register. if (Tok.getString().startswith(".")) break; + + // If we're parsing an immediate expression, we don't expect a '['. + if (SM.getStopOnLBrac() && getLexer().getKind() == AsmToken::LBrac) + break; switch (getLexer().getKind()) { default: { @@ -948,149 +1462,271 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, Done = true; break; } - return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + return Error(Tok.getLoc(), "unknown token in expression"); + } + case AsmToken::EndOfStatement: { + Done = true; + break; } case AsmToken::Identifier: { - // This could be a register or a displacement expression. - if(!ParseRegister(TmpReg, Start, End)) { + // This could be a register or a symbolic displacement. + unsigned TmpReg; + const MCExpr *Val; + SMLoc IdentLoc = Tok.getLoc(); + StringRef Identifier = Tok.getString(); + if(!ParseRegister(TmpReg, IdentLoc, End)) { SM.onRegister(TmpReg); UpdateLocLex = false; break; - } else if (!getParser().parseExpression(Disp, End)) { - SM.onDispExpr(); + } else { + if (!isParsingInlineAsm()) { + if (getParser().parsePrimaryExpr(Val, End)) + return Error(Tok.getLoc(), "Unexpected identifier!"); + } else { + InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return true; + } + SM.onIdentifierExpr(Val, Identifier); UpdateLocLex = false; break; } - return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); + return Error(Tok.getLoc(), "Unexpected identifier!"); } case AsmToken::Integer: { - int64_t Val = Tok.getIntVal(); - SM.onInteger(Val); + if (isParsingInlineAsm() && SM.getAddImmPrefix()) + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, + Tok.getLoc())); + // Look for 'b' or 'f' following an Integer as a directional label + SMLoc Loc = getTok().getLoc(); + int64_t IntVal = getTok().getIntVal(); + End = consumeToken(); + UpdateLocLex = false; + if (getLexer().getKind() == AsmToken::Identifier) { + StringRef IDVal = getTok().getString(); + if (IDVal == "f" || IDVal == "b") { + MCSymbol *Sym = + getContext().GetDirectionalLocalSymbol(IntVal, + IDVal == "f" ? 1 : 0); + MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; + const MCExpr *Val = + MCSymbolRefExpr::Create(Sym, Variant, getContext()); + if (IDVal == "b" && Sym->isUndefined()) + return Error(Loc, "invalid reference to undefined symbol"); + StringRef Identifier = Sym->getName(); + SM.onIdentifierExpr(Val, Identifier); + End = consumeToken(); + } else { + SM.onInteger(IntVal); + } + } else { + SM.onInteger(IntVal); + } break; } case AsmToken::Plus: SM.onPlus(); break; case AsmToken::Minus: SM.onMinus(); break; case AsmToken::Star: SM.onStar(); break; + case AsmToken::Slash: SM.onDivide(); break; + case AsmToken::Pipe: SM.onOr(); break; + case AsmToken::Amp: SM.onAnd(); break; case AsmToken::LBrac: SM.onLBrac(); break; case AsmToken::RBrac: SM.onRBrac(); break; + case AsmToken::LParen: SM.onLParen(); break; + case AsmToken::RParen: SM.onRParen(); break; } - if (!Done && UpdateLocLex) { - End = Tok.getLoc(); - Parser.Lex(); // Consume the token. - } + if (SM.hadError()) + return Error(Tok.getLoc(), "unknown token in expression"); + + if (!Done && UpdateLocLex) + End = consumeToken(); } + return false; +} - if (!Disp) - Disp = MCConstantExpr::Create(SM.getDisp(), getContext()); +X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, + int64_t ImmDisp, + unsigned Size) { + const AsmToken &Tok = Parser.getTok(); + SMLoc BracLoc = Tok.getLoc(), End = Tok.getEndLoc(); + if (getLexer().isNot(AsmToken::LBrac)) + return ErrorOperand(BracLoc, "Expected '[' token!"); + Parser.Lex(); // Eat '[' + + SMLoc StartInBrac = Tok.getLoc(); + // Parse [ Symbol + ImmDisp ] and [ BaseReg + Scale*IndexReg + ImmDisp ]. We + // may have already parsed an immediate displacement before the bracketed + // expression. + IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true); + if (ParseIntelExpression(SM, End)) + return 0; + + const MCExpr *Disp; + if (const MCExpr *Sym = SM.getSym()) { + // A symbolic displacement. + Disp = Sym; + if (isParsingInlineAsm()) + RewriteIntelBracExpression(InstInfo->AsmRewrites, SM.getSymName(), + ImmDisp, SM.getImm(), BracLoc, StartInBrac, + End); + } else { + // An immediate displacement only. + Disp = MCConstantExpr::Create(SM.getImm(), getContext()); + } // Parse the dot operator (e.g., [ebx].foo.bar). if (Tok.getString().startswith(".")) { - SmallString<64> Err; const MCExpr *NewDisp; - if (ParseIntelDotOperator(Disp, &NewDisp, Err)) - return ErrorOperand(Tok.getLoc(), Err); + if (ParseIntelDotOperator(Disp, NewDisp)) + return 0; - End = Parser.getTok().getEndLoc(); + End = Tok.getEndLoc(); Parser.Lex(); // Eat the field. Disp = NewDisp; } int BaseReg = SM.getBaseReg(); int IndexReg = SM.getIndexReg(); - - // handle [-42] - if (!BaseReg && !IndexReg) { - if (!SegReg) - return X86Operand::CreateMem(Disp, Start, End); - else - return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); + int Scale = SM.getScale(); + if (!isParsingInlineAsm()) { + // handle [-42] + if (!BaseReg && !IndexReg) { + if (!SegReg) + return X86Operand::CreateMem(Disp, Start, End, Size); + else + return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); + } + return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size); } - int Scale = SM.getScale(); - return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, - Start, End, Size); + InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); + return CreateMemForInlineAsm(SegReg, Disp, BaseReg, IndexReg, Scale, Start, + End, Size, SM.getSymName(), Info); } -/// ParseIntelMemOperand - Parse intel style memory operand. -X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) { +// Inline assembly may use variable names with namespace alias qualifiers. +bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, + StringRef &Identifier, + InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End) { + assert (isParsingInlineAsm() && "Expected to be parsing inline assembly."); + Val = 0; + + StringRef LineBuf(Identifier.data()); + SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand); + const AsmToken &Tok = Parser.getTok(); - SMLoc End; - unsigned Size = getIntelMemOperandSize(Tok.getString()); - if (Size) { - Parser.Lex(); - assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") && - "Unexpected token!"); - Parser.Lex(); + // Advance the token stream until the end of the current token is + // after the end of what the frontend claimed. + const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size(); + while (true) { + End = Tok.getEndLoc(); + getLexer().Lex(); + + assert(End.getPointer() <= EndPtr && "frontend claimed part of a token?"); + if (End.getPointer() == EndPtr) break; + } + + // Create the symbol reference. + Identifier = LineBuf; + MCSymbol *Sym = getContext().GetOrCreateSymbol(Identifier); + MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; + Val = MCSymbolRefExpr::Create(Sym, Variant, getParser().getContext()); + return false; +} + +/// \brief Parse intel style segment override. +X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, + SMLoc Start, + unsigned Size) { + assert(SegReg != 0 && "Tried to parse a segment override without a segment!"); + const AsmToken &Tok = Parser.getTok(); // Eat colon. + if (Tok.isNot(AsmToken::Colon)) + return ErrorOperand(Tok.getLoc(), "Expected ':' token!"); + Parser.Lex(); // Eat ':' + + int64_t ImmDisp = 0; + if (getLexer().is(AsmToken::Integer)) { + ImmDisp = Tok.getIntVal(); + AsmToken ImmDispToken = Parser.Lex(); // Eat the integer. + + if (isParsingInlineAsm()) + InstInfo->AsmRewrites->push_back( + AsmRewrite(AOK_ImmPrefix, ImmDispToken.getLoc())); + + if (getLexer().isNot(AsmToken::LBrac)) { + // An immediate following a 'segment register', 'colon' token sequence can + // be followed by a bracketed expression. If it isn't we know we have our + // final segment override. + const MCExpr *Disp = MCConstantExpr::Create(ImmDisp, getContext()); + return X86Operand::CreateMem(SegReg, Disp, /*BaseReg=*/0, /*IndexReg=*/0, + /*Scale=*/1, Start, ImmDispToken.getEndLoc(), + Size); + } } if (getLexer().is(AsmToken::LBrac)) - return ParseIntelBracExpression(SegReg, Size); + return ParseIntelBracExpression(SegReg, Start, ImmDisp, Size); - if (!ParseRegister(SegReg, Start, End)) { - // Handel SegReg : [ ... ] - if (getLexer().isNot(AsmToken::Colon)) - return ErrorOperand(Start, "Expected ':' token!"); - Parser.Lex(); // Eat : - if (getLexer().isNot(AsmToken::LBrac)) - return ErrorOperand(Start, "Expected '[' token!"); - return ParseIntelBracExpression(SegReg, Size); + const MCExpr *Val; + SMLoc End; + if (!isParsingInlineAsm()) { + if (getParser().parsePrimaryExpr(Val, End)) + return ErrorOperand(Tok.getLoc(), "unknown token in expression"); + + return X86Operand::CreateMem(Val, Start, End, Size); } - const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); - if (getParser().parseExpression(Disp, End)) + InlineAsmIdentifierInfo Info; + StringRef Identifier = Tok.getString(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) return 0; + return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0, + /*Scale=*/1, Start, End, Size, Identifier, Info); +} - bool NeedSizeDir = false; - bool IsVarDecl = false; - if (isParsingInlineAsm()) { - if (const MCSymbolRefExpr *SymRef = dyn_cast(Disp)) { - const MCSymbol &Sym = SymRef->getSymbol(); - // FIXME: The SemaLookup will fail if the name is anything other then an - // identifier. - // FIXME: Pass a valid SMLoc. - unsigned tLength, tSize, tType; - SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength, - tSize, tType, IsVarDecl); - if (!Size) - Size = tType * 8; // Size is in terms of bits in this context. - NeedSizeDir = Size > 0; - } - } - if (!isParsingInlineAsm()) - return X86Operand::CreateMem(Disp, Start, End, Size); - else { - // If this is not a VarDecl then assume it is a FuncDecl or some other label - // reference. We need an 'r' constraint here, so we need to create register - // operand to ensure proper matching. Just pick a GPR based on the size of - // a pointer. - if (!IsVarDecl) { - unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; - return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true); - } +/// ParseIntelMemOperand - Parse intel style memory operand. +X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start, + unsigned Size) { + const AsmToken &Tok = Parser.getTok(); + SMLoc End; + + // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ]. + if (getLexer().is(AsmToken::LBrac)) + return ParseIntelBracExpression(/*SegReg=*/0, Start, ImmDisp, Size); + + const MCExpr *Val; + if (!isParsingInlineAsm()) { + if (getParser().parsePrimaryExpr(Val, End)) + return ErrorOperand(Tok.getLoc(), "unknown token in expression"); - // When parsing inline assembly we set the base register to a non-zero value - // as we don't know the actual value at this time. This is necessary to - // get the matching correct in some cases. - return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0, - /*Scale*/1, Start, End, Size, NeedSizeDir); + return X86Operand::CreateMem(Val, Start, End, Size); } + + InlineAsmIdentifierInfo Info; + StringRef Identifier = Tok.getString(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return 0; + return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0, + /*Scale=*/1, Start, End, Size, Identifier, Info); } /// Parse the '.' operator. bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, - const MCExpr **NewDisp, - SmallString<64> &Err) { - AsmToken Tok = *&Parser.getTok(); - uint64_t OrigDispVal, DotDispVal; + const MCExpr *&NewDisp) { + const AsmToken &Tok = Parser.getTok(); + int64_t OrigDispVal, DotDispVal; // FIXME: Handle non-constant expressions. - if (const MCConstantExpr *OrigDisp = dyn_cast(Disp)) { + if (const MCConstantExpr *OrigDisp = dyn_cast(Disp)) OrigDispVal = OrigDisp->getValue(); - } else { - Err = "Non-constant offsets are not supported!"; - return true; - } + else + return Error(Tok.getLoc(), "Non-constant offsets are not supported!"); // Drop the '.'. StringRef DotDispStr = Tok.getString().drop_front(1); @@ -1100,23 +1736,15 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, APInt DotDisp; DotDispStr.getAsInteger(10, DotDisp); DotDispVal = DotDisp.getZExtValue(); - } else if (Tok.is(AsmToken::Identifier)) { - // We should only see an identifier when parsing the original inline asm. - // The front-end should rewrite this in terms of immediates. - assert (isParsingInlineAsm() && "Unexpected field name!"); - + } else if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { unsigned DotDisp; std::pair BaseMember = DotDispStr.split('.'); if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second, - DotDisp)) { - Err = "Unable to lookup field reference!"; - return true; - } + DotDisp)) + return Error(Tok.getLoc(), "Unable to lookup field reference!"); DotDispVal = DotDisp; - } else { - Err = "Unexpected token type!"; - return true; - } + } else + return Error(Tok.getLoc(), "Unexpected token type!"); if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data()); @@ -1126,22 +1754,24 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, Val)); } - *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); + NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); return false; } /// Parse the 'offset' operator. This operator is used to specify the /// location rather then the content of a variable. -X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { - SMLoc OffsetOfLoc = Start; +X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { + const AsmToken &Tok = Parser.getTok(); + SMLoc OffsetOfLoc = Tok.getLoc(); Parser.Lex(); // Eat offset. - Start = Parser.getTok().getLoc(); - assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); - SMLoc End; const MCExpr *Val; - if (getParser().parseExpression(Val, End)) - return ErrorOperand(Start, "Unable to parse expression!"); + InlineAsmIdentifierInfo Info; + SMLoc Start = Tok.getLoc(), End; + StringRef Identifier = Tok.getString(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return 0; // Don't emit the offset operator. InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); @@ -1149,9 +1779,10 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { // The offset operator will have an 'r' constraint, thus we need to create // register operand to ensure proper matching. Just pick a GPR based on // the size of a pointer. - unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; + unsigned RegNo = + is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX); return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true, - OffsetOfLoc); + OffsetOfLoc, Identifier, Info.OpDecl); } enum IntelOperatorKind { @@ -1166,34 +1797,28 @@ enum IntelOperatorKind { /// variable. A variable's size is the product of its LENGTH and TYPE. The /// TYPE operator returns the size of a C or C++ type or variable. If the /// variable is an array, TYPE returns the size of a single element. -X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) { - SMLoc TypeLoc = Start; - Parser.Lex(); // Eat offset. - Start = Parser.getTok().getLoc(); - assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); - - SMLoc End; - const MCExpr *Val; - if (getParser().parseExpression(Val, End)) +X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { + const AsmToken &Tok = Parser.getTok(); + SMLoc TypeLoc = Tok.getLoc(); + Parser.Lex(); // Eat operator. + + const MCExpr *Val = 0; + InlineAsmIdentifierInfo Info; + SMLoc Start = Tok.getLoc(), End; + StringRef Identifier = Tok.getString(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/true, End)) return 0; - unsigned Length = 0, Size = 0, Type = 0; - if (const MCSymbolRefExpr *SymRef = dyn_cast(Val)) { - const MCSymbol &Sym = SymRef->getSymbol(); - // FIXME: The SemaLookup will fail if the name is anything other then an - // identifier. - // FIXME: Pass a valid SMLoc. - bool IsVarDecl; - if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length, - Size, Type, IsVarDecl)) - return ErrorOperand(Start, "Unable to lookup expr!"); - } - unsigned CVal; + if (!Info.OpDecl) + return ErrorOperand(Start, "unable to lookup expression"); + + unsigned CVal = 0; switch(OpKind) { default: llvm_unreachable("Unexpected operand kind!"); - case IOK_LENGTH: CVal = Length; break; - case IOK_SIZE: CVal = Size; break; - case IOK_TYPE: CVal = Type; break; + case IOK_LENGTH: CVal = Info.Length; break; + case IOK_SIZE: CVal = Info.Size; break; + case IOK_TYPE: CVal = Info.Type; break; } // Rewrite the type operator and the C or C++ type or variable in terms of an @@ -1202,48 +1827,89 @@ X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) { InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal)); const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext()); - return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false); + return X86Operand::CreateImm(Imm, Start, End); } X86Operand *X86AsmParser::ParseIntelOperand() { - SMLoc Start = Parser.getTok().getLoc(), End; - StringRef AsmTokStr = Parser.getTok().getString(); + const AsmToken &Tok = Parser.getTok(); + SMLoc Start, End; // Offset, length, type and size operators. if (isParsingInlineAsm()) { + StringRef AsmTokStr = Tok.getString(); if (AsmTokStr == "offset" || AsmTokStr == "OFFSET") - return ParseIntelOffsetOfOperator(Start); + return ParseIntelOffsetOfOperator(); if (AsmTokStr == "length" || AsmTokStr == "LENGTH") - return ParseIntelOperator(Start, IOK_LENGTH); + return ParseIntelOperator(IOK_LENGTH); if (AsmTokStr == "size" || AsmTokStr == "SIZE") - return ParseIntelOperator(Start, IOK_SIZE); + return ParseIntelOperator(IOK_SIZE); if (AsmTokStr == "type" || AsmTokStr == "TYPE") - return ParseIntelOperator(Start, IOK_TYPE); + return ParseIntelOperator(IOK_TYPE); } + unsigned Size = getIntelMemOperandSize(Tok.getString()); + if (Size) { + Parser.Lex(); // Eat operand size (e.g., byte, word). + if (Tok.getString() != "PTR" && Tok.getString() != "ptr") + return ErrorOperand(Start, "Expected 'PTR' or 'ptr' token!"); + Parser.Lex(); // Eat ptr. + } + Start = Tok.getLoc(); + // Immediate. - if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) || - getLexer().is(AsmToken::Minus)) { - const MCExpr *Val; - if (!getParser().parseExpression(Val, End)) { - return X86Operand::CreateImm(Val, Start, End); + if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Minus) || + getLexer().is(AsmToken::LParen)) { + AsmToken StartTok = Tok; + IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true, + /*AddImmPrefix=*/false); + if (ParseIntelExpression(SM, End)) + return 0; + + int64_t Imm = SM.getImm(); + if (isParsingInlineAsm()) { + unsigned Len = Tok.getLoc().getPointer() - Start.getPointer(); + if (StartTok.getString().size() == Len) + // Just add a prefix if this wasn't a complex immediate expression. + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, Start)); + else + // Otherwise, rewrite the complex expression as a single immediate. + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, Start, Len, Imm)); + } + + if (getLexer().isNot(AsmToken::LBrac)) { + // If a directional label (ie. 1f or 2b) was parsed above from + // ParseIntelExpression() then SM.getSym() was set to a pointer to + // to the MCExpr with the directional local symbol and this is a + // memory operand not an immediate operand. + if (SM.getSym()) + return X86Operand::CreateMem(SM.getSym(), Start, End, Size); + + const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext()); + return X86Operand::CreateImm(ImmExpr, Start, End); } + + // Only positive immediates are valid. + if (Imm < 0) + return ErrorOperand(Start, "expected a positive immediate displacement " + "before bracketed expr."); + + // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ]. + return ParseIntelMemOperand(Imm, Start, Size); } // Register. unsigned RegNo = 0; if (!ParseRegister(RegNo, Start, End)) { // If this is a segment register followed by a ':', then this is the start - // of a memory reference, otherwise this is a normal register reference. + // of a segment override, otherwise this is a normal register reference. if (getLexer().isNot(AsmToken::Colon)) return X86Operand::CreateReg(RegNo, Start, End); - getParser().Lex(); // Eat the colon. - return ParseIntelMemOperand(RegNo, Start); + return ParseIntelSegmentOverride(/*SegReg=*/RegNo, Start, Size); } // Memory operand. - return ParseIntelMemOperand(0, Start); + return ParseIntelMemOperand(/*Disp=*/0, Start, Size); } X86Operand *X86AsmParser::ParseATTOperand() { @@ -1267,7 +1933,6 @@ X86Operand *X86AsmParser::ParseATTOperand() { if (getLexer().isNot(AsmToken::Colon)) return X86Operand::CreateReg(RegNo, Start, End); - getParser().Lex(); // Eat the colon. return ParseMemOperand(RegNo, Start); } @@ -1340,10 +2005,11 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // If we reached here, then we just ate the ( of the memory operand. Process // the rest of the memory operand. unsigned BaseReg = 0, IndexReg = 0, Scale = 1; - SMLoc IndexLoc; + SMLoc IndexLoc, BaseLoc; if (getLexer().is(AsmToken::Percent)) { SMLoc StartLoc, EndLoc; + BaseLoc = Parser.getTok().getLoc(); if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0; if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { Error(StartLoc, "eiz and riz can only be used as index registers", @@ -1386,6 +2052,11 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { } // Validate the scale amount. + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && + ScaleVal != 1) { + Error(Loc, "scale factor in 16-bit address must be 1"); + return 0; + } if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){ Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); return 0; @@ -1416,6 +2087,21 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { SMLoc MemEnd = Parser.getTok().getEndLoc(); Parser.Lex(); // Eat the ')'. + // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed, + // and then only in non-64-bit modes. Except for DX, which is a special case + // because an unofficial form of in/out instructions uses it. + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && + (is64BitMode() || (BaseReg != X86::BX && BaseReg != X86::BP && + BaseReg != X86::SI && BaseReg != X86::DI)) && + BaseReg != X86::DX) { + Error(BaseLoc, "invalid 16-bit base register"); + return 0; + } + if (BaseReg == 0 && + X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) { + Error(IndexLoc, "16-bit memory operand may not include only index register"); + return 0; + } // If we have both a base register and an index register make sure they are // both 64-bit or 32-bit registers. // To support VSIB, IndexReg can be 128-bit or 256-bit registers. @@ -1424,16 +2110,30 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) && IndexReg != X86::RIZ) { - Error(IndexLoc, "index register is 32-bit, but base register is 64-bit"); + Error(BaseLoc, "base register is 64-bit, but index register is not"); return 0; } if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) && (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) && IndexReg != X86::EIZ){ - Error(IndexLoc, "index register is 64-bit, but base register is 32-bit"); + Error(BaseLoc, "base register is 32-bit, but index register is not"); return 0; } + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) { + if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) || + X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) { + Error(BaseLoc, "base register is 16-bit, but index register is not"); + return 0; + } + if (((BaseReg == X86::BX || BaseReg == X86::BP) && + IndexReg != X86::SI && IndexReg != X86::DI) || + ((BaseReg == X86::SI || BaseReg == X86::DI) && + IndexReg != X86::BX && IndexReg != X86::BP)) { + Error(BaseLoc, "invalid 16-bit base/index register combination"); + return 0; + } + } } return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, @@ -1530,11 +2230,8 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) { // Parse '*' modifier. - if (getLexer().is(AsmToken::Star)) { - SMLoc Loc = Parser.getTok().getLoc(); - Operands.push_back(X86Operand::CreateToken("*", Loc)); - Parser.Lex(); // Eat the star. - } + if (getLexer().is(AsmToken::Star)) + Operands.push_back(X86Operand::CreateToken("*", consumeToken())); // Read the first operand. if (X86Operand *Op = ParseOperand()) @@ -1556,6 +2253,42 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, } } + if (STI.getFeatureBits() & X86::FeatureAVX512) { + // Parse mask register {%k1} + if (getLexer().is(AsmToken::LCurly)) { + Operands.push_back(X86Operand::CreateToken("{", consumeToken())); + if (X86Operand *Op = ParseOperand()) { + Operands.push_back(Op); + if (!getLexer().is(AsmToken::RCurly)) { + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + return Error(Loc, "Expected } at this point"); + } + Operands.push_back(X86Operand::CreateToken("}", consumeToken())); + } else { + Parser.eatToEndOfStatement(); + return true; + } + } + // TODO: add parsing of broadcasts {1to8}, {1to16} + // Parse "zeroing non-masked" semantic {z} + if (getLexer().is(AsmToken::LCurly)) { + Operands.push_back(X86Operand::CreateToken("{z}", consumeToken())); + if (!getLexer().is(AsmToken::Identifier) || getLexer().getTok().getIdentifier() != "z") { + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + return Error(Loc, "Expected z at this point"); + } + Parser.Lex(); // Eat the z + if (!getLexer().is(AsmToken::RCurly)) { + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + return Error(Loc, "Expected } at this point"); + } + Parser.Lex(); // Eat the } + } + } + if (getLexer().isNot(AsmToken::EndOfStatement)) { SMLoc Loc = getLexer().getLoc(); Parser.eatToEndOfStatement(); @@ -1638,64 +2371,48 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, delete &Op2; } } - // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]" - if (Name.startswith("lods") && Operands.size() == 3 && + // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate + // values of $SIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("lods") && Operands.size() == 1 && (Name == "lods" || Name == "lodsb" || Name == "lodsw" || - Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) { - X86Operand *Op1 = static_cast(Operands[1]); - X86Operand *Op2 = static_cast(Operands[2]); - if (isSrcOp(*Op1) && Op2->isReg()) { - const char *ins; - unsigned reg = Op2->getReg(); - bool isLods = Name == "lods"; - if (reg == X86::AL && (isLods || Name == "lodsb")) - ins = "lodsb"; - else if (reg == X86::AX && (isLods || Name == "lodsw")) - ins = "lodsw"; - else if (reg == X86::EAX && (isLods || Name == "lodsl")) - ins = "lodsl"; - else if (reg == X86::RAX && (isLods || Name == "lodsq")) - ins = "lodsq"; - else - ins = NULL; - if (ins != NULL) { - Operands.pop_back(); - Operands.pop_back(); - delete Op1; - delete Op2; - if (Name != ins) - static_cast(Operands[0])->setTokenValue(ins); - } - } - } - // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]" - if (Name.startswith("stos") && Operands.size() == 3 && + Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) + Operands.push_back(DefaultMemSIOperand(NameLoc)); + + // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate + // values of $DIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("stos") && Operands.size() == 1 && (Name == "stos" || Name == "stosb" || Name == "stosw" || - Name == "stosl" || (is64BitMode() && Name == "stosq"))) { - X86Operand *Op1 = static_cast(Operands[1]); - X86Operand *Op2 = static_cast(Operands[2]); - if (isDstOp(*Op2) && Op1->isReg()) { - const char *ins; - unsigned reg = Op1->getReg(); - bool isStos = Name == "stos"; - if (reg == X86::AL && (isStos || Name == "stosb")) - ins = "stosb"; - else if (reg == X86::AX && (isStos || Name == "stosw")) - ins = "stosw"; - else if (reg == X86::EAX && (isStos || Name == "stosl")) - ins = "stosl"; - else if (reg == X86::RAX && (isStos || Name == "stosq")) - ins = "stosq"; - else - ins = NULL; - if (ins != NULL) { - Operands.pop_back(); - Operands.pop_back(); - delete Op1; - delete Op2; - if (Name != ins) - static_cast(Operands[0])->setTokenValue(ins); + Name == "stosl" || Name == "stosd" || Name == "stosq")) + Operands.push_back(DefaultMemDIOperand(NameLoc)); + + // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate + // values of $DIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("scas") && Operands.size() == 1 && + (Name == "scas" || Name == "scasb" || Name == "scasw" || + Name == "scasl" || Name == "scasd" || Name == "scasq")) + Operands.push_back(DefaultMemDIOperand(NameLoc)); + + // Add default SI and DI operands to "cmps[bwlq]". + if (Name.startswith("cmps") && + (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" || + Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) { + if (Operands.size() == 1) { + if (isParsingIntelSyntax()) { + Operands.push_back(DefaultMemSIOperand(NameLoc)); + Operands.push_back(DefaultMemDIOperand(NameLoc)); + } else { + Operands.push_back(DefaultMemDIOperand(NameLoc)); + Operands.push_back(DefaultMemSIOperand(NameLoc)); } + } else if (Operands.size() == 3) { + X86Operand &Op = *(X86Operand*)Operands.begin()[1]; + X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; + if (!doSrcDstMatch(Op, Op2)) + return Error(Op.getStartLoc(), + "mismatching source and destination index registers"); } } @@ -1801,6 +2518,61 @@ processInstruction(MCInst &Inst, case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8); case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8); case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8); + case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8); + case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8); + case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8); + case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8); + case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8); + case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8); + case X86::VMOVAPDrr: + case X86::VMOVAPDYrr: + case X86::VMOVAPSrr: + case X86::VMOVAPSYrr: + case X86::VMOVDQArr: + case X86::VMOVDQAYrr: + case X86::VMOVDQUrr: + case X86::VMOVDQUYrr: + case X86::VMOVUPDrr: + case X86::VMOVUPDYrr: + case X86::VMOVUPSrr: + case X86::VMOVUPSYrr: { + if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) || + !X86II::isX86_64ExtendedReg(Inst.getOperand(1).getReg())) + return false; + + unsigned NewOpc; + switch (Inst.getOpcode()) { + default: llvm_unreachable("Invalid opcode"); + case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; + case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; + case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; + case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; + case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; + case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; + case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; + case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; + case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; + case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; + case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; + case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; + } + Inst.setOpcode(NewOpc); + return true; + } + case X86::VMOVSDrr: + case X86::VMOVSSrr: { + if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) || + !X86II::isX86_64ExtendedReg(Inst.getOperand(2).getReg())) + return false; + unsigned NewOpc; + switch (Inst.getOpcode()) { + default: llvm_unreachable("Invalid opcode"); + case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; + case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; + } + Inst.setOpcode(NewOpc); + return true; + } } } @@ -1813,7 +2585,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, assert(!Operands.empty() && "Unexpect empty operand list!"); X86Operand *Op = static_cast(Operands[0]); assert(Op->isToken() && "Leading operand should always be a mnemonic!"); - ArrayRef EmptyRanges = ArrayRef(); + ArrayRef EmptyRanges = None; // First, handle aliases that expand to multiple instructions. // FIXME: This should be replaced with a real .td file alias mechanism. @@ -1915,25 +2687,25 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, unsigned Match1, Match2, Match3, Match4; Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match1 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[1]; Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match2 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[2]; Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match3 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; Tmp[Base.size()] = Suffixes[3]; Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, - isParsingIntelSyntax()); + MatchingInlineAsm, isParsingIntelSyntax()); // If this returned as a missing feature failure, remember that. if (Match4 == Match_MissingFeature) ErrorInfoMissingFeature = ErrorInfoIgnore; @@ -2057,11 +2829,9 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) { } else if (IDVal.startswith(".intel_syntax")) { getParser().setAssemblerDialect(1); if (getLexer().isNot(AsmToken::EndOfStatement)) { - if(Parser.getTok().getString() == "noprefix") { - // FIXME : Handle noprefix + // FIXME: Handle noprefix + if (Parser.getTok().getString() == "noprefix") Parser.Lex(); - } else - return true; } return false; } @@ -2075,7 +2845,7 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { for (;;) { const MCExpr *Value; if (getParser().parseExpression(Value)) - return true; + return false; getParser().getStreamer().EmitValue(Value, Size); @@ -2083,8 +2853,10 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { break; // FIXME: Improve diagnostic. - if (getLexer().isNot(AsmToken::Comma)) - return Error(L, "unexpected token in directive"); + if (getLexer().isNot(AsmToken::Comma)) { + Error(L, "unexpected token in directive"); + return false; + } Parser.Lex(); } } @@ -2094,22 +2866,29 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { } /// ParseDirectiveCode -/// ::= .code32 | .code64 +/// ::= .code16 | .code32 | .code64 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) { - if (IDVal == ".code32") { + if (IDVal == ".code16") { Parser.Lex(); - if (is64BitMode()) { - SwitchMode(); + if (!is16BitMode()) { + SwitchMode(X86::Mode16Bit); + getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); + } + } else if (IDVal == ".code32") { + Parser.Lex(); + if (!is32BitMode()) { + SwitchMode(X86::Mode32Bit); getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); } } else if (IDVal == ".code64") { Parser.Lex(); if (!is64BitMode()) { - SwitchMode(); + SwitchMode(X86::Mode64Bit); getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64); } } else { - return Error(L, "unexpected directive " + IDVal); + Error(L, "unknown directive " + IDVal); + return false; } return false;