X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FAsmParser%2FX86AsmParser.cpp;h=b30eeeb0e037f0b7a419f7d687d3adb180111893;hb=c50f986b4d8ecbf3437c67e987e26e268ef5f06a;hp=3d56acf917234c389c2bdbe1c76af393faa4b291;hpb=f9d2d2dc89f0c2d39f597038ee723fb9c9af91da;p=oota-llvm.git diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index 3d56acf9172..b30eeeb0e03 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -8,6 +8,9 @@ //===----------------------------------------------------------------------===// #include "MCTargetDesc/X86BaseInfo.h" +#include "X86AsmInstrumentation.h" +#include "X86AsmParserCommon.h" +#include "X86Operand.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" @@ -17,6 +20,7 @@ #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCParser/MCAsmLexer.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" @@ -28,19 +32,23 @@ #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" +#include using namespace llvm; namespace { -struct X86Operand; static const char OpPrecedence[] = { - 0, // IC_PLUS - 0, // IC_MINUS - 1, // IC_MULTIPLY - 1, // IC_DIVIDE - 2, // IC_RPAREN - 3, // IC_LPAREN + 0, // IC_OR + 1, // IC_AND + 2, // IC_LSHIFT + 2, // IC_RSHIFT + 3, // IC_PLUS + 3, // IC_MINUS + 4, // IC_MULTIPLY + 4, // IC_DIVIDE + 5, // IC_RPAREN + 6, // IC_LPAREN 0, // IC_IMM 0 // IC_REGISTER }; @@ -48,10 +56,22 @@ static const char OpPrecedence[] = { class X86AsmParser : public MCTargetAsmParser { MCSubtargetInfo &STI; MCAsmParser &Parser; + const MCInstrInfo &MII; ParseInstructionInfo *InstInfo; + std::unique_ptr Instrumentation; private: + SMLoc consumeToken() { + SMLoc Result = Parser.getTok().getLoc(); + Parser.Lex(); + return Result; + } + enum InfixCalculatorTok { - IC_PLUS = 0, + IC_OR = 0, + IC_AND, + IC_LSHIFT, + IC_RSHIFT, + IC_PLUS, IC_MINUS, IC_MULTIPLY, IC_DIVIDE, @@ -176,6 +196,30 @@ private: Val = Op1.second / Op2.second; OperandStack.push_back(std::make_pair(IC_IMM, Val)); break; + case IC_OR: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Or operation with an immediate and a register!"); + Val = Op1.second | Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_AND: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "And operation with an immediate and a register!"); + Val = Op1.second & Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_LSHIFT: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Left shift operation with an immediate and a register!"); + Val = Op1.second << Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; + case IC_RSHIFT: + assert (Op1.first == IC_IMM && Op2.first == IC_IMM && + "Right shift operation with an immediate and a register!"); + Val = Op1.second >> Op2.second; + OperandStack.push_back(std::make_pair(IC_IMM, Val)); + break; } } } @@ -185,6 +229,10 @@ private: }; enum IntelExprState { + IES_OR, + IES_AND, + IES_LSHIFT, + IES_RSHIFT, IES_PLUS, IES_MINUS, IES_MULTIPLY, @@ -211,7 +259,7 @@ private: public: IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) : State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0), - Scale(1), Imm(imm), Sym(0), StopOnLBrac(stoponlbrac), + Scale(1), Imm(imm), Sym(nullptr), StopOnLBrac(stoponlbrac), AddImmPrefix(addimmprefix) { Info.clear(); } unsigned getBaseReg() { return BaseReg; } @@ -231,6 +279,66 @@ private: return Info; } + void onOr() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_OR; + IC.pushOperator(IC_OR); + break; + } + PrevState = CurrState; + } + void onAnd() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_AND; + IC.pushOperator(IC_AND); + break; + } + PrevState = CurrState; + } + void onLShift() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_LSHIFT; + IC.pushOperator(IC_LSHIFT); + break; + } + PrevState = CurrState; + } + void onRShift() { + IntelExprState CurrState = State; + switch (State) { + default: + State = IES_ERROR; + break; + case IES_INTEGER: + case IES_RPAREN: + case IES_REGISTER: + State = IES_RSHIFT; + IC.pushOperator(IC_RSHIFT); + break; + } + PrevState = CurrState; + } void onPlus() { IntelExprState CurrState = State; switch (State) { @@ -337,7 +445,7 @@ private: break; } } - void onInteger(int64_t TmpInt) { + bool onInteger(int64_t TmpInt, StringRef &ErrMsg) { IntelExprState CurrState = State; switch (State) { default: @@ -345,6 +453,10 @@ private: break; case IES_PLUS: case IES_MINUS: + case IES_OR: + case IES_AND: + case IES_LSHIFT: + case IES_RSHIFT: case IES_DIVIDE: case IES_MULTIPLY: case IES_LPAREN: @@ -354,9 +466,15 @@ private: assert (!IndexReg && "IndexReg already set!"); IndexReg = TmpReg; Scale = TmpInt; + if(Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) { + ErrMsg = "scale factor in address must be 1, 2, 4 or 8"; + return true; + } // Get the scale and replace the 'Register * Scale' with '0'. IC.popOperator(); } else if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_OR || PrevState == IES_AND || + PrevState == IES_LSHIFT || PrevState == IES_RSHIFT || PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || PrevState == IES_LPAREN || PrevState == IES_LBRAC) && CurrState == IES_MINUS) { @@ -369,6 +487,7 @@ private: break; } PrevState = CurrState; + return false; } void onStar() { PrevState = State; @@ -442,11 +561,17 @@ private: break; case IES_PLUS: case IES_MINUS: + case IES_OR: + case IES_AND: + case IES_LSHIFT: + case IES_RSHIFT: case IES_MULTIPLY: case IES_DIVIDE: case IES_LPAREN: // FIXME: We don't handle this type of unary minus, yet. if ((PrevState == IES_PLUS || PrevState == IES_MINUS || + PrevState == IES_OR || PrevState == IES_AND || + PrevState == IES_LSHIFT || PrevState == IES_RSHIFT || PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE || PrevState == IES_LPAREN || PrevState == IES_LBRAC) && CurrState == IES_MINUS) { @@ -486,61 +611,91 @@ private: return Parser.Error(L, Msg, Ranges); } - X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) { + bool ErrorAndEatStatement(SMLoc L, const Twine &Msg, + ArrayRef Ranges = None, + bool MatchingInlineAsm = false) { + Parser.eatToEndOfStatement(); + return Error(L, Msg, Ranges, MatchingInlineAsm); + } + + std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg) { Error(Loc, Msg); - return 0; - } - - X86Operand *ParseOperand(); - X86Operand *ParseATTOperand(); - X86Operand *ParseIntelOperand(); - X86Operand *ParseIntelOffsetOfOperator(); - X86Operand *ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp); - X86Operand *ParseIntelOperator(unsigned OpKind); - X86Operand *ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size); - X86Operand *ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc, - unsigned Size); - X86Operand *ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End); - X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start, - int64_t ImmDisp, unsigned Size); - X86Operand *ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, - InlineAsmIdentifierInfo &Info, - bool IsUnevaluatedOperand, SMLoc &End); - - X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); - - X86Operand *CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, - unsigned BaseReg, unsigned IndexReg, - unsigned Scale, SMLoc Start, SMLoc End, - unsigned Size, StringRef Identifier, - InlineAsmIdentifierInfo &Info); + return nullptr; + } + + std::unique_ptr DefaultMemSIOperand(SMLoc Loc); + std::unique_ptr DefaultMemDIOperand(SMLoc Loc); + std::unique_ptr ParseOperand(); + std::unique_ptr ParseATTOperand(); + std::unique_ptr ParseIntelOperand(); + std::unique_ptr ParseIntelOffsetOfOperator(); + bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp); + std::unique_ptr ParseIntelOperator(unsigned OpKind); + std::unique_ptr + ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size); + std::unique_ptr + ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc, unsigned Size); + bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End); + std::unique_ptr ParseIntelBracExpression(unsigned SegReg, + SMLoc Start, + int64_t ImmDisp, + unsigned Size); + bool ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier, + InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End); + + std::unique_ptr ParseMemOperand(unsigned SegReg, SMLoc StartLoc); + + std::unique_ptr + CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, + unsigned IndexReg, unsigned Scale, SMLoc Start, + SMLoc End, unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info); bool ParseDirectiveWord(unsigned Size, SMLoc L); bool ParseDirectiveCode(StringRef IDVal, SMLoc L); - bool processInstruction(MCInst &Inst, - const SmallVectorImpl &Ops); + bool processInstruction(MCInst &Inst, const OperandVector &Ops); + + /// Wrapper around MCStreamer::EmitInstruction(). Possibly adds + /// instrumentation around Inst. + void EmitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out); bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl &Operands, - MCStreamer &Out, unsigned &ErrorInfo, - bool MatchingInlineAsm); + OperandVector &Operands, MCStreamer &Out, + unsigned &ErrorInfo, + bool MatchingInlineAsm) override; - /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi) - /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode. - bool isSrcOp(X86Operand &Op); + /// doSrcDstMatch - Returns true if operands are matching in their + /// word size (%si and %di, %esi and %edi, etc.). Order depends on + /// the parsing mode (Intel vs. AT&T). + bool doSrcDstMatch(X86Operand &Op1, X86Operand &Op2); - /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi) - /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode. - bool isDstOp(X86Operand &Op); + /// Parses AVX512 specific operand primitives: masked registers ({%k}, {z}) + /// and memory broadcasting ({1to}) primitives, updating Operands vector if required. + /// \return \c true if no parsing errors occurred, \c false otherwise. + bool HandleAVX512Operand(OperandVector &Operands, + const MCParsedAsmOperand &Op); bool is64BitMode() const { // FIXME: Can tablegen auto-generate this? return (STI.getFeatureBits() & X86::Mode64Bit) != 0; } - void SwitchMode() { - unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit)); + bool is32BitMode() const { + // FIXME: Can tablegen auto-generate this? + return (STI.getFeatureBits() & X86::Mode32Bit) != 0; + } + bool is16BitMode() const { + // FIXME: Can tablegen auto-generate this? + return (STI.getFeatureBits() & X86::Mode16Bit) != 0; + } + void SwitchMode(uint64_t mode) { + uint64_t oldMode = STI.getFeatureBits() & + (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit); + unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(oldMode | mode)); setAvailableFeatures(FB); + assert(mode == (STI.getFeatureBits() & + (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit))); } bool isParsingIntelSyntax() { @@ -556,19 +711,24 @@ private: /// } public: - X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) - : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) { + X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser, + const MCInstrInfo &mii, + const MCTargetOptions &Options) + : MCTargetAsmParser(), STI(sti), Parser(parser), MII(mii), + InstInfo(nullptr) { // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); + Instrumentation.reset( + CreateX86AsmInstrumentation(Options, Parser.getContext(), STI)); } - virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); - virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, - SMLoc NameLoc, - SmallVectorImpl &Operands); + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; + + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override; - virtual bool ParseDirective(AsmToken DirectiveID); + bool ParseDirective(AsmToken DirectiveID) override; }; } // end anonymous namespace @@ -579,433 +739,63 @@ static unsigned MatchRegisterName(StringRef Name); /// } -static bool isImmSExti16i8Value(uint64_t Value) { - return (( Value <= 0x000000000000007FULL)|| - (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)|| - (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); -} - -static bool isImmSExti32i8Value(uint64_t Value) { - return (( Value <= 0x000000000000007FULL)|| - (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)|| - (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); -} - -static bool isImmZExtu32u8Value(uint64_t Value) { - return (Value <= 0x00000000000000FFULL); -} - -static bool isImmSExti64i8Value(uint64_t Value) { - return (( Value <= 0x000000000000007FULL)|| - (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); -} - -static bool isImmSExti64i32Value(uint64_t Value) { - return (( Value <= 0x000000007FFFFFFFULL)|| - (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); -} -namespace { - -/// X86Operand - Instances of this class represent a parsed X86 machine -/// instruction. -struct X86Operand : public MCParsedAsmOperand { - enum KindTy { - Token, - Register, - Immediate, - Memory - } Kind; - - SMLoc StartLoc, EndLoc; - SMLoc OffsetOfLoc; - StringRef SymName; - void *OpDecl; - bool AddressOf; - - struct TokOp { - const char *Data; - unsigned Length; - }; - - struct RegOp { - unsigned RegNo; - }; - - struct ImmOp { - const MCExpr *Val; - }; - - struct MemOp { - unsigned SegReg; - const MCExpr *Disp; - unsigned BaseReg; - unsigned IndexReg; - unsigned Scale; - unsigned Size; - }; - - union { - struct TokOp Tok; - struct RegOp Reg; - struct ImmOp Imm; - struct MemOp Mem; - }; - - X86Operand(KindTy K, SMLoc Start, SMLoc End) - : Kind(K), StartLoc(Start), EndLoc(End) {} - - StringRef getSymName() { return SymName; } - void *getOpDecl() { return OpDecl; } - - /// getStartLoc - Get the location of the first token of this operand. - SMLoc getStartLoc() const { return StartLoc; } - /// getEndLoc - Get the location of the last token of this operand. - SMLoc getEndLoc() const { return EndLoc; } - /// getLocRange - Get the range between the first and last token of this - /// operand. - SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } - /// getOffsetOfLoc - Get the location of the offset operator. - SMLoc getOffsetOfLoc() const { return OffsetOfLoc; } - - virtual void print(raw_ostream &OS) const {} - - StringRef getToken() const { - assert(Kind == Token && "Invalid access!"); - return StringRef(Tok.Data, Tok.Length); - } - void setTokenValue(StringRef Value) { - assert(Kind == Token && "Invalid access!"); - Tok.Data = Value.data(); - Tok.Length = Value.size(); - } - - unsigned getReg() const { - assert(Kind == Register && "Invalid access!"); - return Reg.RegNo; - } - - const MCExpr *getImm() const { - assert(Kind == Immediate && "Invalid access!"); - return Imm.Val; - } - - const MCExpr *getMemDisp() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Disp; - } - unsigned getMemSegReg() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.SegReg; - } - unsigned getMemBaseReg() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.BaseReg; - } - unsigned getMemIndexReg() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.IndexReg; - } - unsigned getMemScale() const { - assert(Kind == Memory && "Invalid access!"); - return Mem.Scale; - } - - bool isToken() const {return Kind == Token; } - - bool isImm() const { return Kind == Immediate; } - - bool isImmSExti16i8() const { - if (!isImm()) - return false; - - // If this isn't a constant expr, just assume it fits and let relaxation - // handle it. - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) - return true; - - // Otherwise, check the value is in a range that makes sense for this - // extension. - return isImmSExti16i8Value(CE->getValue()); - } - bool isImmSExti32i8() const { - if (!isImm()) - return false; - - // If this isn't a constant expr, just assume it fits and let relaxation - // handle it. - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) - return true; - - // Otherwise, check the value is in a range that makes sense for this - // extension. - return isImmSExti32i8Value(CE->getValue()); - } - bool isImmZExtu32u8() const { - if (!isImm()) - return false; - - // If this isn't a constant expr, just assume it fits and let relaxation - // handle it. - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) - return true; - - // Otherwise, check the value is in a range that makes sense for this - // extension. - return isImmZExtu32u8Value(CE->getValue()); - } - bool isImmSExti64i8() const { - if (!isImm()) - return false; - - // If this isn't a constant expr, just assume it fits and let relaxation - // handle it. - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) +static bool CheckBaseRegAndIndexReg(unsigned BaseReg, unsigned IndexReg, + StringRef &ErrMsg) { + // If we have both a base register and an index register make sure they are + // both 64-bit or 32-bit registers. + // To support VSIB, IndexReg can be 128-bit or 256-bit registers. + if (BaseReg != 0 && IndexReg != 0) { + if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) && + (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || + X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) && + IndexReg != X86::RIZ) { + ErrMsg = "base register is 64-bit, but index register is not"; return true; - - // Otherwise, check the value is in a range that makes sense for this - // extension. - return isImmSExti64i8Value(CE->getValue()); - } - bool isImmSExti64i32() const { - if (!isImm()) - return false; - - // If this isn't a constant expr, just assume it fits and let relaxation - // handle it. - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) + } + if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) && + (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || + X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) && + IndexReg != X86::EIZ){ + ErrMsg = "base register is 32-bit, but index register is not"; return true; - - // Otherwise, check the value is in a range that makes sense for this - // extension. - return isImmSExti64i32Value(CE->getValue()); - } - - bool isOffsetOf() const { - return OffsetOfLoc.getPointer(); - } - - bool needAddressOf() const { - return AddressOf; - } - - bool isMem() const { return Kind == Memory; } - bool isMem8() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 8); - } - bool isMem16() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 16); - } - bool isMem32() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 32); - } - bool isMem64() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 64); - } - bool isMem80() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 80); - } - bool isMem128() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 128); - } - bool isMem256() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 256); - } - bool isMem512() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 512); - } - - bool isMemVX32() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 32) && - getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; - } - bool isMemVY32() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 32) && - getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; - } - bool isMemVX64() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 64) && - getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; - } - bool isMemVY64() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 64) && - getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; - } - bool isMemVZ32() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 32) && - getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; - } - bool isMemVZ64() const { - return Kind == Memory && (!Mem.Size || Mem.Size == 64) && - getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; - } - - bool isAbsMem() const { - return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && - !getMemIndexReg() && getMemScale() == 1; - } - - bool isMemOffs8() const { - return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && - !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8); - } - bool isMemOffs16() const { - return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && - !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16); - } - bool isMemOffs32() const { - return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && - !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32); - } - bool isMemOffs64() const { - return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && - !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64); - } - - bool isReg() const { return Kind == Register; } - - void addExpr(MCInst &Inst, const MCExpr *Expr) const { - // Add as immediates when possible. - if (const MCConstantExpr *CE = dyn_cast(Expr)) - Inst.addOperand(MCOperand::CreateImm(CE->getValue())); - else - Inst.addOperand(MCOperand::CreateExpr(Expr)); - } - - void addRegOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::CreateReg(getReg())); - } - - void addImmOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - addExpr(Inst, getImm()); - } - - void addMemOperands(MCInst &Inst, unsigned N) const { - assert((N == 5) && "Invalid number of operands!"); - Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); - Inst.addOperand(MCOperand::CreateImm(getMemScale())); - Inst.addOperand(MCOperand::CreateReg(getMemIndexReg())); - addExpr(Inst, getMemDisp()); - Inst.addOperand(MCOperand::CreateReg(getMemSegReg())); - } - - void addAbsMemOperands(MCInst &Inst, unsigned N) const { - assert((N == 1) && "Invalid number of operands!"); - // Add as immediates when possible. - if (const MCConstantExpr *CE = dyn_cast(getMemDisp())) - Inst.addOperand(MCOperand::CreateImm(CE->getValue())); - else - Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); - } - - void addMemOffsOperands(MCInst &Inst, unsigned N) const { - assert((N == 1) && "Invalid number of operands!"); - // Add as immediates when possible. - if (const MCConstantExpr *CE = dyn_cast(getMemDisp())) - Inst.addOperand(MCOperand::CreateImm(CE->getValue())); - else - Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); - } - - static X86Operand *CreateToken(StringRef Str, SMLoc Loc) { - SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); - X86Operand *Res = new X86Operand(Token, Loc, EndLoc); - Res->Tok.Data = Str.data(); - Res->Tok.Length = Str.size(); - return Res; - } - - static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, - bool AddressOf = false, - SMLoc OffsetOfLoc = SMLoc(), - StringRef SymName = StringRef(), - void *OpDecl = 0) { - X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc); - Res->Reg.RegNo = RegNo; - Res->AddressOf = AddressOf; - Res->OffsetOfLoc = OffsetOfLoc; - Res->SymName = SymName; - Res->OpDecl = OpDecl; - return Res; - } - - static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){ - X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc); - Res->Imm.Val = Val; - return Res; - } - - /// Create an absolute memory operand. - static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, StringRef SymName = StringRef(), - void *OpDecl = 0) { - X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); - Res->Mem.SegReg = 0; - Res->Mem.Disp = Disp; - Res->Mem.BaseReg = 0; - Res->Mem.IndexReg = 0; - Res->Mem.Scale = 1; - Res->Mem.Size = Size; - Res->SymName = SymName; - Res->OpDecl = OpDecl; - Res->AddressOf = false; - return Res; - } - - /// Create a generalized memory operand. - static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp, - unsigned BaseReg, unsigned IndexReg, - unsigned Scale, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0, - StringRef SymName = StringRef(), - void *OpDecl = 0) { - // We should never just have a displacement, that should be parsed as an - // absolute memory operand. - assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); - - // The scale should always be one of {1,2,4,8}. - assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) && - "Invalid scale!"); - X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); - Res->Mem.SegReg = SegReg; - Res->Mem.Disp = Disp; - Res->Mem.BaseReg = BaseReg; - Res->Mem.IndexReg = IndexReg; - Res->Mem.Scale = Scale; - Res->Mem.Size = Size; - Res->SymName = SymName; - Res->OpDecl = OpDecl; - Res->AddressOf = false; - return Res; + } + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) { + if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) || + X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) { + ErrMsg = "base register is 16-bit, but index register is not"; + return true; + } + if (((BaseReg == X86::BX || BaseReg == X86::BP) && + IndexReg != X86::SI && IndexReg != X86::DI) || + ((BaseReg == X86::SI || BaseReg == X86::DI) && + IndexReg != X86::BX && IndexReg != X86::BP)) { + ErrMsg = "invalid 16-bit base/index register combination"; + return true; + } + } } -}; - -} // end anonymous namespace. - -bool X86AsmParser::isSrcOp(X86Operand &Op) { - unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI; - - return (Op.isMem() && - (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) && - isa(Op.Mem.Disp) && - cast(Op.Mem.Disp)->getValue() == 0 && - Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0); + return false; } -bool X86AsmParser::isDstOp(X86Operand &Op) { - unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI; +bool X86AsmParser::doSrcDstMatch(X86Operand &Op1, X86Operand &Op2) +{ + // Return true and let a normal complaint about bogus operands happen. + if (!Op1.isMem() || !Op2.isMem()) + return true; - return Op.isMem() && - (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) && - isa(Op.Mem.Disp) && - cast(Op.Mem.Disp)->getValue() == 0 && - Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0; + // Actually these might be the other way round if Intel syntax is + // being used. It doesn't matter. + unsigned diReg = Op1.Mem.BaseReg; + unsigned siReg = Op2.Mem.BaseReg; + + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR16RegClassID].contains(diReg); + if (X86MCRegisterClasses[X86::GR32RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR32RegClassID].contains(diReg); + if (X86MCRegisterClasses[X86::GR64RegClassID].contains(siReg)) + return X86MCRegisterClasses[X86::GR64RegClassID].contains(diReg); + // Again, return true and let another error happen. + return true; } bool X86AsmParser::ParseRegister(unsigned &RegNo, @@ -1035,7 +825,7 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo, RegNo = MatchRegisterName(Tok.getString().lower()); if (!is64BitMode()) { - // FIXME: This should be done using Requires and + // FIXME: This should be done using Requires and // Requires so "eiz" usage in 64-bit instructions can be also // checked. // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a @@ -1117,7 +907,23 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo, return false; } -X86Operand *X86AsmParser::ParseOperand() { +std::unique_ptr X86AsmParser::DefaultMemSIOperand(SMLoc Loc) { + unsigned basereg = + is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI); + const MCExpr *Disp = MCConstantExpr::Create(0, getContext()); + return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg, + /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0); +} + +std::unique_ptr X86AsmParser::DefaultMemDIOperand(SMLoc Loc) { + unsigned basereg = + is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI); + const MCExpr *Disp = MCConstantExpr::Create(0, getContext()); + return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg, + /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0); +} + +std::unique_ptr X86AsmParser::ParseOperand() { if (isParsingIntelSyntax()) return ParseIntelOperand(); return ParseATTOperand(); @@ -1133,26 +939,34 @@ static unsigned getIntelMemOperandSize(StringRef OpStr) { .Cases("XWORD", "xword", 80) .Cases("XMMWORD", "xmmword", 128) .Cases("YMMWORD", "ymmword", 256) + .Cases("ZMMWORD", "zmmword", 512) + .Cases("OPAQUE", "opaque", -1U) // needs to be non-zero, but doesn't matter .Default(0); return Size; } -X86Operand * -X86AsmParser::CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, - unsigned BaseReg, unsigned IndexReg, - unsigned Scale, SMLoc Start, SMLoc End, - unsigned Size, StringRef Identifier, - InlineAsmIdentifierInfo &Info){ - if (isa(Disp)) { - // If this is not a VarDecl then assume it is a FuncDecl or some other label - // reference. We need an 'r' constraint here, so we need to create register - // operand to ensure proper matching. Just pick a GPR based on the size of - // a pointer. - if (!Info.IsVarDecl) { - unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; - return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true, - SMLoc(), Identifier, Info.OpDecl); - } +std::unique_ptr X86AsmParser::CreateMemForInlineAsm( + unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, + unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier, + InlineAsmIdentifierInfo &Info) { + // If this is not a VarDecl then assume it is a FuncDecl or some other label + // reference. We need an 'r' constraint here, so we need to create register + // operand to ensure proper matching. Just pick a GPR based on the size of + // a pointer. + if (isa(Disp) && !Info.IsVarDecl) { + unsigned RegNo = + is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX); + return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true, + SMLoc(), Identifier, Info.OpDecl); + } + + // We either have a direct symbol reference, or an offset from a symbol. The + // parser always puts the symbol on the LHS, so look there for size + // calculation purposes. + const MCBinaryExpr *BinOp = dyn_cast(Disp); + bool IsSymRef = + isa(BinOp ? BinOp->getLHS() : Disp); + if (IsSymRef) { if (!Size) { Size = Info.Type * 8; // Size is in terms of bits in this context. if (Size) @@ -1231,8 +1045,7 @@ RewriteIntelBracExpression(SmallVectorImpl *AsmRewrites, } } -X86Operand * -X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { +bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { const AsmToken &Tok = Parser.getTok(); bool Done = false; @@ -1254,7 +1067,7 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { Done = true; break; } - return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + return Error(Tok.getLoc(), "unknown token in expression"); } case AsmToken::EndOfStatement: { Done = true; @@ -1273,48 +1086,84 @@ X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) { } else { if (!isParsingInlineAsm()) { if (getParser().parsePrimaryExpr(Val, End)) - return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); + return Error(Tok.getLoc(), "Unexpected identifier!"); } else { - InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, - /*Unevaluated*/ false, End)) - return Err; + // This is a dot operator, not an adjacent identifier. + if (Identifier.find('.') != StringRef::npos) { + return false; + } else { + InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo(); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return true; + } } SM.onIdentifierExpr(Val, Identifier); UpdateLocLex = false; break; } - return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); + return Error(Tok.getLoc(), "Unexpected identifier!"); } - case AsmToken::Integer: + case AsmToken::Integer: { + StringRef ErrMsg; if (isParsingInlineAsm() && SM.getAddImmPrefix()) InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, Tok.getLoc())); - SM.onInteger(Tok.getIntVal()); + // Look for 'b' or 'f' following an Integer as a directional label + SMLoc Loc = getTok().getLoc(); + int64_t IntVal = getTok().getIntVal(); + End = consumeToken(); + UpdateLocLex = false; + if (getLexer().getKind() == AsmToken::Identifier) { + StringRef IDVal = getTok().getString(); + if (IDVal == "f" || IDVal == "b") { + MCSymbol *Sym = + getContext().GetDirectionalLocalSymbol(IntVal, IDVal == "b"); + MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; + const MCExpr *Val = + MCSymbolRefExpr::Create(Sym, Variant, getContext()); + if (IDVal == "b" && Sym->isUndefined()) + return Error(Loc, "invalid reference to undefined symbol"); + StringRef Identifier = Sym->getName(); + SM.onIdentifierExpr(Val, Identifier); + End = consumeToken(); + } else { + if (SM.onInteger(IntVal, ErrMsg)) + return Error(Loc, ErrMsg); + } + } else { + if (SM.onInteger(IntVal, ErrMsg)) + return Error(Loc, ErrMsg); + } break; + } case AsmToken::Plus: SM.onPlus(); break; case AsmToken::Minus: SM.onMinus(); break; case AsmToken::Star: SM.onStar(); break; case AsmToken::Slash: SM.onDivide(); break; + case AsmToken::Pipe: SM.onOr(); break; + case AsmToken::Amp: SM.onAnd(); break; + case AsmToken::LessLess: + SM.onLShift(); break; + case AsmToken::GreaterGreater: + SM.onRShift(); break; case AsmToken::LBrac: SM.onLBrac(); break; case AsmToken::RBrac: SM.onRBrac(); break; case AsmToken::LParen: SM.onLParen(); break; case AsmToken::RParen: SM.onRParen(); break; } if (SM.hadError()) - return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + return Error(Tok.getLoc(), "unknown token in expression"); - if (!Done && UpdateLocLex) { - End = Tok.getLoc(); - Parser.Lex(); // Consume the token. - } + if (!Done && UpdateLocLex) + End = consumeToken(); } - return 0; + return false; } -X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, - int64_t ImmDisp, - unsigned Size) { +std::unique_ptr +X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, + int64_t ImmDisp, unsigned Size) { const AsmToken &Tok = Parser.getTok(); SMLoc BracLoc = Tok.getLoc(), End = Tok.getEndLoc(); if (getLexer().isNot(AsmToken::LBrac)) @@ -1326,10 +1175,10 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, // may have already parsed an immediate displacement before the bracketed // expression. IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true); - if (X86Operand *Err = ParseIntelExpression(SM, End)) - return Err; + if (ParseIntelExpression(SM, End)) + return nullptr; - const MCExpr *Disp; + const MCExpr *Disp = nullptr; if (const MCExpr *Sym = SM.getSym()) { // A symbolic displacement. Disp = Sym; @@ -1337,16 +1186,23 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, RewriteIntelBracExpression(InstInfo->AsmRewrites, SM.getSymName(), ImmDisp, SM.getImm(), BracLoc, StartInBrac, End); - } else { - // An immediate displacement only. - Disp = MCConstantExpr::Create(SM.getImm(), getContext()); } - // Parse the dot operator (e.g., [ebx].foo.bar). - if (Tok.getString().startswith(".")) { + if (SM.getImm() || !Disp) { + const MCExpr *Imm = MCConstantExpr::Create(SM.getImm(), getContext()); + if (Disp) + Disp = MCBinaryExpr::CreateAdd(Disp, Imm, getContext()); + else + Disp = Imm; // An immediate displacement only. + } + + // Parse struct field access. Intel requires a dot, but MSVC doesn't. MSVC + // will in fact do global lookup the field name inside all global typedefs, + // but we don't emulate that. + if (Tok.getString().find('.') != StringRef::npos) { const MCExpr *NewDisp; - if (X86Operand *Err = ParseIntelDotOperator(Disp, NewDisp)) - return Err; + if (ParseIntelDotOperator(Disp, NewDisp)) + return nullptr; End = Tok.getEndLoc(); Parser.Lex(); // Eat the field. @@ -1364,6 +1220,11 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, else return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); } + StringRef ErrMsg; + if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) { + Error(StartInBrac, ErrMsg); + return nullptr; + } return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, End, Size); } @@ -1374,13 +1235,12 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, } // Inline assembly may use variable names with namespace alias qualifiers. -X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, - StringRef &Identifier, - InlineAsmIdentifierInfo &Info, - bool IsUnevaluatedOperand, - SMLoc &End) { +bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, + StringRef &Identifier, + InlineAsmIdentifierInfo &Info, + bool IsUnevaluatedOperand, SMLoc &End) { assert (isParsingInlineAsm() && "Expected to be parsing inline assembly."); - Val = 0; + Val = nullptr; StringRef LineBuf(Identifier.data()); SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand); @@ -1403,13 +1263,13 @@ X86Operand *X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, MCSymbol *Sym = getContext().GetOrCreateSymbol(Identifier); MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; Val = MCSymbolRefExpr::Create(Sym, Variant, getParser().getContext()); - return 0; + return false; } /// \brief Parse intel style segment override. -X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, - SMLoc Start, - unsigned Size) { +std::unique_ptr +X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, + unsigned Size) { assert(SegReg != 0 && "Tried to parse a segment override without a segment!"); const AsmToken &Tok = Parser.getTok(); // Eat colon. if (Tok.isNot(AsmToken::Colon)) @@ -1443,49 +1303,82 @@ X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc End; if (!isParsingInlineAsm()) { if (getParser().parsePrimaryExpr(Val, End)) - return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + return ErrorOperand(Tok.getLoc(), "unknown token in expression"); return X86Operand::CreateMem(Val, Start, End, Size); } InlineAsmIdentifierInfo Info; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, - /*Unevaluated*/ false, End)) - return Err; + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return nullptr; return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0, /*Scale=*/1, Start, End, Size, Identifier, Info); } /// ParseIntelMemOperand - Parse intel style memory operand. -X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start, - unsigned Size) { +std::unique_ptr X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, + SMLoc Start, + unsigned Size) { const AsmToken &Tok = Parser.getTok(); SMLoc End; // Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ]. if (getLexer().is(AsmToken::LBrac)) return ParseIntelBracExpression(/*SegReg=*/0, Start, ImmDisp, Size); + assert(ImmDisp == 0); const MCExpr *Val; if (!isParsingInlineAsm()) { if (getParser().parsePrimaryExpr(Val, End)) - return ErrorOperand(Tok.getLoc(), "Unexpected token!"); + return ErrorOperand(Tok.getLoc(), "unknown token in expression"); return X86Operand::CreateMem(Val, Start, End, Size); } InlineAsmIdentifierInfo Info; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, - /*Unevaluated*/ false, End)) - return Err; - return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0, - /*Scale=*/1, Start, End, Size, Identifier, Info); + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return nullptr; + + if (!getLexer().is(AsmToken::LBrac)) + return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0, + /*Scale=*/1, Start, End, Size, Identifier, Info); + + Parser.Lex(); // Eat '[' + + // Parse Identifier [ ImmDisp ] + IntelExprStateMachine SM(/*ImmDisp=*/0, /*StopOnLBrac=*/true, + /*AddImmPrefix=*/false); + if (ParseIntelExpression(SM, End)) + return nullptr; + + if (SM.getSym()) { + Error(Start, "cannot use more than one symbol in memory operand"); + return nullptr; + } + if (SM.getBaseReg()) { + Error(Start, "cannot use base register with variable reference"); + return nullptr; + } + if (SM.getIndexReg()) { + Error(Start, "cannot use index register with variable reference"); + return nullptr; + } + + const MCExpr *Disp = MCConstantExpr::Create(SM.getImm(), getContext()); + // BaseReg is non-zero to avoid assertions. In the context of inline asm, + // we're pointing to a local variable in memory, so the base register is + // really the frame or stack pointer. + return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/1, /*IndexReg=*/0, + /*Scale=*/1, Start, End, Size, Identifier, + Info.OpDecl); } /// Parse the '.' operator. -X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, +bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp) { const AsmToken &Tok = Parser.getTok(); int64_t OrigDispVal, DotDispVal; @@ -1494,10 +1387,12 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, if (const MCConstantExpr *OrigDisp = dyn_cast(Disp)) OrigDispVal = OrigDisp->getValue(); else - return ErrorOperand(Tok.getLoc(), "Non-constant offsets are not supported!"); + return Error(Tok.getLoc(), "Non-constant offsets are not supported!"); - // Drop the '.'. - StringRef DotDispStr = Tok.getString().drop_front(1); + // Drop the optional '.'. + StringRef DotDispStr = Tok.getString(); + if (DotDispStr.startswith(".")) + DotDispStr = DotDispStr.drop_front(1); // .Imm gets lexed as a real. if (Tok.is(AsmToken::Real)) { @@ -1509,10 +1404,10 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, std::pair BaseMember = DotDispStr.split('.'); if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second, DotDisp)) - return ErrorOperand(Tok.getLoc(), "Unable to lookup field reference!"); + return Error(Tok.getLoc(), "Unable to lookup field reference!"); DotDispVal = DotDisp; } else - return ErrorOperand(Tok.getLoc(), "Unexpected token type!"); + return Error(Tok.getLoc(), "Unexpected token type!"); if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data()); @@ -1523,12 +1418,12 @@ X86Operand *X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, } NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); - return 0; + return false; } /// Parse the 'offset' operator. This operator is used to specify the /// location rather then the content of a variable. -X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { +std::unique_ptr X86AsmParser::ParseIntelOffsetOfOperator() { const AsmToken &Tok = Parser.getTok(); SMLoc OffsetOfLoc = Tok.getLoc(); Parser.Lex(); // Eat offset. @@ -1537,9 +1432,9 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { InlineAsmIdentifierInfo Info; SMLoc Start = Tok.getLoc(), End; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, - /*Unevaluated*/ false, End)) - return Err; + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/false, End)) + return nullptr; // Don't emit the offset operator. InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); @@ -1547,7 +1442,8 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { // The offset operator will have an 'r' constraint, thus we need to create // register operand to ensure proper matching. Just pick a GPR based on // the size of a pointer. - unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; + unsigned RegNo = + is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX); return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true, OffsetOfLoc, Identifier, Info.OpDecl); } @@ -1564,18 +1460,21 @@ enum IntelOperatorKind { /// variable. A variable's size is the product of its LENGTH and TYPE. The /// TYPE operator returns the size of a C or C++ type or variable. If the /// variable is an array, TYPE returns the size of a single element. -X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { +std::unique_ptr X86AsmParser::ParseIntelOperator(unsigned OpKind) { const AsmToken &Tok = Parser.getTok(); SMLoc TypeLoc = Tok.getLoc(); Parser.Lex(); // Eat operator. - const MCExpr *Val = 0; + const MCExpr *Val = nullptr; InlineAsmIdentifierInfo Info; SMLoc Start = Tok.getLoc(), End; StringRef Identifier = Tok.getString(); - if (X86Operand *Err = ParseIntelIdentifier(Val, Identifier, Info, - /*Unevaluated*/ true, End)) - return Err; + if (ParseIntelIdentifier(Val, Identifier, Info, + /*Unevaluated=*/true, End)) + return nullptr; + + if (!Info.OpDecl) + return ErrorOperand(Start, "unable to lookup expression"); unsigned CVal = 0; switch(OpKind) { @@ -1594,7 +1493,7 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { return X86Operand::CreateImm(Imm, Start, End); } -X86Operand *X86AsmParser::ParseIntelOperand() { +std::unique_ptr X86AsmParser::ParseIntelOperand() { const AsmToken &Tok = Parser.getTok(); SMLoc Start, End; @@ -1626,8 +1525,8 @@ X86Operand *X86AsmParser::ParseIntelOperand() { AsmToken StartTok = Tok; IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true, /*AddImmPrefix=*/false); - if (X86Operand *Err = ParseIntelExpression(SM, End)) - return Err; + if (ParseIntelExpression(SM, End)) + return nullptr; int64_t Imm = SM.getImm(); if (isParsingInlineAsm()) { @@ -1641,6 +1540,13 @@ X86Operand *X86AsmParser::ParseIntelOperand() { } if (getLexer().isNot(AsmToken::LBrac)) { + // If a directional label (ie. 1f or 2b) was parsed above from + // ParseIntelExpression() then SM.getSym() was set to a pointer to + // to the MCExpr with the directional local symbol and this is a + // memory operand not an immediate operand. + if (SM.getSym()) + return X86Operand::CreateMem(SM.getSym(), Start, End, Size); + const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext()); return X86Operand::CreateImm(ImmExpr, Start, End); } @@ -1669,7 +1575,7 @@ X86Operand *X86AsmParser::ParseIntelOperand() { return ParseIntelMemOperand(/*Disp=*/0, Start, Size); } -X86Operand *X86AsmParser::ParseATTOperand() { +std::unique_ptr X86AsmParser::ParseATTOperand() { switch (getLexer().getKind()) { default: // Parse a memory operand with no segment register. @@ -1678,11 +1584,11 @@ X86Operand *X86AsmParser::ParseATTOperand() { // Read the register. unsigned RegNo; SMLoc Start, End; - if (ParseRegister(RegNo, Start, End)) return 0; + if (ParseRegister(RegNo, Start, End)) return nullptr; if (RegNo == X86::EIZ || RegNo == X86::RIZ) { Error(Start, "%eiz and %riz can only be used as index registers", SMRange(Start, End)); - return 0; + return nullptr; } // If this is a segment register followed by a ':', then this is the start @@ -1699,15 +1605,82 @@ X86Operand *X86AsmParser::ParseATTOperand() { Parser.Lex(); const MCExpr *Val; if (getParser().parseExpression(Val, End)) - return 0; + return nullptr; return X86Operand::CreateImm(Val, Start, End); } } } +bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands, + const MCParsedAsmOperand &Op) { + if(STI.getFeatureBits() & X86::FeatureAVX512) { + if (getLexer().is(AsmToken::LCurly)) { + // Eat "{" and mark the current place. + const SMLoc consumedToken = consumeToken(); + // Distinguish {1to} from {%k}. + if(getLexer().is(AsmToken::Integer)) { + // Parse memory broadcasting ({1to}). + if (getLexer().getTok().getIntVal() != 1) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected 1to at this point"); + Parser.Lex(); // Eat "1" of 1to8 + if (!getLexer().is(AsmToken::Identifier) || + !getLexer().getTok().getIdentifier().startswith("to")) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected 1to at this point"); + // Recognize only reasonable suffixes. + const char *BroadcastPrimitive = + StringSwitch(getLexer().getTok().getIdentifier()) + .Case("to8", "{1to8}") + .Case("to16", "{1to16}") + .Default(nullptr); + if (!BroadcastPrimitive) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Invalid memory broadcast primitive."); + Parser.Lex(); // Eat "toN" of 1toN + if (!getLexer().is(AsmToken::RCurly)) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected } at this point"); + Parser.Lex(); // Eat "}" + Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive, + consumedToken)); + // No AVX512 specific primitives can pass + // after memory broadcasting, so return. + return true; + } else { + // Parse mask register {%k1} + Operands.push_back(X86Operand::CreateToken("{", consumedToken)); + if (std::unique_ptr Op = ParseOperand()) { + Operands.push_back(std::move(Op)); + if (!getLexer().is(AsmToken::RCurly)) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected } at this point"); + Operands.push_back(X86Operand::CreateToken("}", consumeToken())); + + // Parse "zeroing non-masked" semantic {z} + if (getLexer().is(AsmToken::LCurly)) { + Operands.push_back(X86Operand::CreateToken("{z}", consumeToken())); + if (!getLexer().is(AsmToken::Identifier) || + getLexer().getTok().getIdentifier() != "z") + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected z at this point"); + Parser.Lex(); // Eat the z + if (!getLexer().is(AsmToken::RCurly)) + return !ErrorAndEatStatement(getLexer().getLoc(), + "Expected } at this point"); + Parser.Lex(); // Eat the } + } + } + } + } + } + return true; +} + /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix /// has already been parsed if present. -X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { +std::unique_ptr X86AsmParser::ParseMemOperand(unsigned SegReg, + SMLoc MemStart) { // We have to disambiguate a parenthesized expression "(4+5)" from the start // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The @@ -1716,7 +1689,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); if (getLexer().isNot(AsmToken::LParen)) { SMLoc ExprEnd; - if (getParser().parseExpression(Disp, ExprEnd)) return 0; + if (getParser().parseExpression(Disp, ExprEnd)) return nullptr; // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. @@ -1743,7 +1716,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // It must be an parenthesized expression, parse it now. if (getParser().parseParenExpression(Disp, ExprEnd)) - return 0; + return nullptr; // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. @@ -1762,15 +1735,16 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // If we reached here, then we just ate the ( of the memory operand. Process // the rest of the memory operand. unsigned BaseReg = 0, IndexReg = 0, Scale = 1; - SMLoc IndexLoc; + SMLoc IndexLoc, BaseLoc; if (getLexer().is(AsmToken::Percent)) { SMLoc StartLoc, EndLoc; - if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0; + BaseLoc = Parser.getTok().getLoc(); + if (ParseRegister(BaseReg, StartLoc, EndLoc)) return nullptr; if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { Error(StartLoc, "eiz and riz can only be used as index registers", SMRange(StartLoc, EndLoc)); - return 0; + return nullptr; } } @@ -1786,7 +1760,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this. if (getLexer().is(AsmToken::Percent)) { SMLoc L; - if (ParseRegister(IndexReg, L, L)) return 0; + if (ParseRegister(IndexReg, L, L)) return nullptr; if (getLexer().isNot(AsmToken::RParen)) { // Parse the scale amount: @@ -1794,7 +1768,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { if (getLexer().isNot(AsmToken::Comma)) { Error(Parser.getTok().getLoc(), "expected comma in scale expression"); - return 0; + return nullptr; } Parser.Lex(); // Eat the comma. @@ -1804,13 +1778,18 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { int64_t ScaleVal; if (getParser().parseAbsoluteExpression(ScaleVal)){ Error(Loc, "expected scale expression"); - return 0; + return nullptr; } // Validate the scale amount. + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && + ScaleVal != 1) { + Error(Loc, "scale factor in 16-bit address must be 1"); + return nullptr; + } if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){ Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); - return 0; + return nullptr; } Scale = (unsigned)ScaleVal; } @@ -1822,7 +1801,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { int64_t Value; if (getParser().parseAbsoluteExpression(Value)) - return 0; + return nullptr; if (Value != 1) Warning(Loc, "scale factor without index register is ignored"); @@ -1833,38 +1812,39 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. if (getLexer().isNot(AsmToken::RParen)) { Error(Parser.getTok().getLoc(), "unexpected token in memory operand"); - return 0; + return nullptr; } SMLoc MemEnd = Parser.getTok().getEndLoc(); Parser.Lex(); // Eat the ')'. - // If we have both a base register and an index register make sure they are - // both 64-bit or 32-bit registers. - // To support VSIB, IndexReg can be 128-bit or 256-bit registers. - if (BaseReg != 0 && IndexReg != 0) { - if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) && - (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || - X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) && - IndexReg != X86::RIZ) { - Error(IndexLoc, "index register is 32-bit, but base register is 64-bit"); - return 0; - } - if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) && - (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || - X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) && - IndexReg != X86::EIZ){ - Error(IndexLoc, "index register is 64-bit, but base register is 32-bit"); - return 0; - } + // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed, + // and then only in non-64-bit modes. Except for DX, which is a special case + // because an unofficial form of in/out instructions uses it. + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && + (is64BitMode() || (BaseReg != X86::BX && BaseReg != X86::BP && + BaseReg != X86::SI && BaseReg != X86::DI)) && + BaseReg != X86::DX) { + Error(BaseLoc, "invalid 16-bit base register"); + return nullptr; + } + if (BaseReg == 0 && + X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) { + Error(IndexLoc, "16-bit memory operand may not include only index register"); + return nullptr; + } + + StringRef ErrMsg; + if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) { + Error(BaseLoc, ErrMsg); + return nullptr; } return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, MemStart, MemEnd); } -bool X86AsmParser:: -ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, - SmallVectorImpl &Operands) { +bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) { InstInfo = &Info; StringRef PatchedName = Name; @@ -1874,7 +1854,7 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, PatchedName = PatchedName.substr(0, Name.size()-1); // FIXME: Hack to recognize cmp{ss,sd,ps,pd}. - const MCExpr *ExtraImmOp = 0; + const MCExpr *ExtraImmOp = nullptr; if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) && (PatchedName.endswith("ss") || PatchedName.endswith("sd") || PatchedName.endswith("ps") || PatchedName.endswith("pd"))) { @@ -1952,84 +1932,35 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) { // Parse '*' modifier. - if (getLexer().is(AsmToken::Star)) { - SMLoc Loc = Parser.getTok().getLoc(); - Operands.push_back(X86Operand::CreateToken("*", Loc)); - Parser.Lex(); // Eat the star. - } - - // Read the first operand. - if (X86Operand *Op = ParseOperand()) - Operands.push_back(Op); - else { - Parser.eatToEndOfStatement(); - return true; - } - - while (getLexer().is(AsmToken::Comma)) { - Parser.Lex(); // Eat the comma. - - // Parse and remember the operand. - if (X86Operand *Op = ParseOperand()) - Operands.push_back(Op); - else { - Parser.eatToEndOfStatement(); - return true; - } - } - - if (STI.getFeatureBits() & X86::FeatureAVX512) { - // Parse mask register {%k1} - if (getLexer().is(AsmToken::LCurly)) { - SMLoc Loc = Parser.getTok().getLoc(); - Operands.push_back(X86Operand::CreateToken("{", Loc)); - Parser.Lex(); // Eat the { - if (X86Operand *Op = ParseOperand()) { - Operands.push_back(Op); - if (!getLexer().is(AsmToken::RCurly)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "Expected } at this point"); - } - Loc = Parser.getTok().getLoc(); - Operands.push_back(X86Operand::CreateToken("}", Loc)); - Parser.Lex(); // Eat the } - } else { - Parser.eatToEndOfStatement(); + if (getLexer().is(AsmToken::Star)) + Operands.push_back(X86Operand::CreateToken("*", consumeToken())); + + // Read the operands. + while(1) { + if (std::unique_ptr Op = ParseOperand()) { + Operands.push_back(std::move(Op)); + if (!HandleAVX512Operand(Operands, *Operands.back())) return true; - } - } - // Parse "zeroing non-masked" semantic {z} - if (getLexer().is(AsmToken::LCurly)) { - SMLoc Loc = Parser.getTok().getLoc(); - Operands.push_back(X86Operand::CreateToken("{z}", Loc)); - Parser.Lex(); // Eat the { - if (!getLexer().is(AsmToken::Identifier) || getLexer().getTok().getIdentifier() != "z") { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "Expected z at this point"); - } - Parser.Lex(); // Eat the z - if (!getLexer().is(AsmToken::RCurly)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "Expected } at this point"); - } - Parser.Lex(); // Eat the } + } else { + Parser.eatToEndOfStatement(); + return true; } - } + // check for comma and eat it + if (getLexer().is(AsmToken::Comma)) + Parser.Lex(); + else + break; + } - if (getLexer().isNot(AsmToken::EndOfStatement)) { - SMLoc Loc = getLexer().getLoc(); - Parser.eatToEndOfStatement(); - return Error(Loc, "unexpected token in argument list"); - } - } + if (getLexer().isNot(AsmToken::EndOfStatement)) + return ErrorAndEatStatement(getLexer().getLoc(), + "unexpected token in argument list"); + } - if (getLexer().is(AsmToken::EndOfStatement)) - Parser.Lex(); // Consume the EndOfStatement - else if (isPrefix && getLexer().is(AsmToken::Slash)) - Parser.Lex(); // Consume the prefix separator Slash + // Consume the EndOfStatement or the prefix separator Slash + if (getLexer().is(AsmToken::EndOfStatement) || + (isPrefix && getLexer().is(AsmToken::Slash))) + Parser.Lex(); if (ExtraImmOp && isParsingIntelSyntax()) Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc)); @@ -2039,126 +1970,122 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, // documented form in various unofficial manuals, so a lot of code uses it. if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") && Operands.size() == 3) { - X86Operand &Op = *(X86Operand*)Operands.back(); + X86Operand &Op = (X86Operand &)*Operands.back(); if (Op.isMem() && Op.Mem.SegReg == 0 && isa(Op.Mem.Disp) && cast(Op.Mem.Disp)->getValue() == 0 && Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) { SMLoc Loc = Op.getEndLoc(); Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); - delete &Op; } } // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al". if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") && Operands.size() == 3) { - X86Operand &Op = *(X86Operand*)Operands.begin()[1]; + X86Operand &Op = (X86Operand &)*Operands[1]; if (Op.isMem() && Op.Mem.SegReg == 0 && isa(Op.Mem.Disp) && cast(Op.Mem.Disp)->getValue() == 0 && Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) { SMLoc Loc = Op.getEndLoc(); - Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); - delete &Op; - } - } - // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]" - if (Name.startswith("ins") && Operands.size() == 3 && - (Name == "insb" || Name == "insw" || Name == "insl")) { - X86Operand &Op = *(X86Operand*)Operands.begin()[1]; - X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; - if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) { - Operands.pop_back(); - Operands.pop_back(); - delete &Op; - delete &Op2; - } - } - - // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]" - if (Name.startswith("outs") && Operands.size() == 3 && - (Name == "outsb" || Name == "outsw" || Name == "outsl")) { - X86Operand &Op = *(X86Operand*)Operands.begin()[1]; - X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; - if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) { - Operands.pop_back(); - Operands.pop_back(); - delete &Op; - delete &Op2; - } - } - - // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]" - if (Name.startswith("movs") && Operands.size() == 3 && - (Name == "movsb" || Name == "movsw" || Name == "movsl" || - (is64BitMode() && Name == "movsq"))) { - X86Operand &Op = *(X86Operand*)Operands.begin()[1]; - X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; - if (isSrcOp(Op) && isDstOp(Op2)) { - Operands.pop_back(); - Operands.pop_back(); - delete &Op; - delete &Op2; - } - } - // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]" - if (Name.startswith("lods") && Operands.size() == 3 && + Operands[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); + } + } + + // Append default arguments to "ins[bwld]" + if (Name.startswith("ins") && Operands.size() == 1 && + (Name == "insb" || Name == "insw" || Name == "insl" || + Name == "insd" )) { + if (isParsingIntelSyntax()) { + Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc)); + Operands.push_back(DefaultMemDIOperand(NameLoc)); + } else { + Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc)); + Operands.push_back(DefaultMemDIOperand(NameLoc)); + } + } + + // Append default arguments to "outs[bwld]" + if (Name.startswith("outs") && Operands.size() == 1 && + (Name == "outsb" || Name == "outsw" || Name == "outsl" || + Name == "outsd" )) { + if (isParsingIntelSyntax()) { + Operands.push_back(DefaultMemSIOperand(NameLoc)); + Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc)); + } else { + Operands.push_back(DefaultMemSIOperand(NameLoc)); + Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc)); + } + } + + // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate + // values of $SIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("lods") && Operands.size() == 1 && (Name == "lods" || Name == "lodsb" || Name == "lodsw" || - Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) { - X86Operand *Op1 = static_cast(Operands[1]); - X86Operand *Op2 = static_cast(Operands[2]); - if (isSrcOp(*Op1) && Op2->isReg()) { - const char *ins; - unsigned reg = Op2->getReg(); - bool isLods = Name == "lods"; - if (reg == X86::AL && (isLods || Name == "lodsb")) - ins = "lodsb"; - else if (reg == X86::AX && (isLods || Name == "lodsw")) - ins = "lodsw"; - else if (reg == X86::EAX && (isLods || Name == "lodsl")) - ins = "lodsl"; - else if (reg == X86::RAX && (isLods || Name == "lodsq")) - ins = "lodsq"; - else - ins = NULL; - if (ins != NULL) { - Operands.pop_back(); - Operands.pop_back(); - delete Op1; - delete Op2; - if (Name != ins) - static_cast(Operands[0])->setTokenValue(ins); + Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) + Operands.push_back(DefaultMemSIOperand(NameLoc)); + + // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate + // values of $DIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("stos") && Operands.size() == 1 && + (Name == "stos" || Name == "stosb" || Name == "stosw" || + Name == "stosl" || Name == "stosd" || Name == "stosq")) + Operands.push_back(DefaultMemDIOperand(NameLoc)); + + // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate + // values of $DIREG according to the mode. It would be nice if this + // could be achieved with InstAlias in the tables. + if (Name.startswith("scas") && Operands.size() == 1 && + (Name == "scas" || Name == "scasb" || Name == "scasw" || + Name == "scasl" || Name == "scasd" || Name == "scasq")) + Operands.push_back(DefaultMemDIOperand(NameLoc)); + + // Add default SI and DI operands to "cmps[bwlq]". + if (Name.startswith("cmps") && + (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" || + Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) { + if (Operands.size() == 1) { + if (isParsingIntelSyntax()) { + Operands.push_back(DefaultMemSIOperand(NameLoc)); + Operands.push_back(DefaultMemDIOperand(NameLoc)); + } else { + Operands.push_back(DefaultMemDIOperand(NameLoc)); + Operands.push_back(DefaultMemSIOperand(NameLoc)); } + } else if (Operands.size() == 3) { + X86Operand &Op = (X86Operand &)*Operands[1]; + X86Operand &Op2 = (X86Operand &)*Operands[2]; + if (!doSrcDstMatch(Op, Op2)) + return Error(Op.getStartLoc(), + "mismatching source and destination index registers"); } } - // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]" - if (Name.startswith("stos") && Operands.size() == 3 && - (Name == "stos" || Name == "stosb" || Name == "stosw" || - Name == "stosl" || (is64BitMode() && Name == "stosq"))) { - X86Operand *Op1 = static_cast(Operands[1]); - X86Operand *Op2 = static_cast(Operands[2]); - if (isDstOp(*Op2) && Op1->isReg()) { - const char *ins; - unsigned reg = Op1->getReg(); - bool isStos = Name == "stos"; - if (reg == X86::AL && (isStos || Name == "stosb")) - ins = "stosb"; - else if (reg == X86::AX && (isStos || Name == "stosw")) - ins = "stosw"; - else if (reg == X86::EAX && (isStos || Name == "stosl")) - ins = "stosl"; - else if (reg == X86::RAX && (isStos || Name == "stosq")) - ins = "stosq"; - else - ins = NULL; - if (ins != NULL) { - Operands.pop_back(); - Operands.pop_back(); - delete Op1; - delete Op2; - if (Name != ins) - static_cast(Operands[0])->setTokenValue(ins); + + // Add default SI and DI operands to "movs[bwlq]". + if ((Name.startswith("movs") && + (Name == "movs" || Name == "movsb" || Name == "movsw" || + Name == "movsl" || Name == "movsd" || Name == "movsq")) || + (Name.startswith("smov") && + (Name == "smov" || Name == "smovb" || Name == "smovw" || + Name == "smovl" || Name == "smovd" || Name == "smovq"))) { + if (Operands.size() == 1) { + if (Name == "movsd") + Operands.back() = X86Operand::CreateToken("movsl", NameLoc); + if (isParsingIntelSyntax()) { + Operands.push_back(DefaultMemDIOperand(NameLoc)); + Operands.push_back(DefaultMemSIOperand(NameLoc)); + } else { + Operands.push_back(DefaultMemSIOperand(NameLoc)); + Operands.push_back(DefaultMemDIOperand(NameLoc)); } + } else if (Operands.size() == 3) { + X86Operand &Op = (X86Operand &)*Operands[1]; + X86Operand &Op2 = (X86Operand &)*Operands[2]; + if (!doSrcDstMatch(Op, Op2)) + return Error(Op.getStartLoc(), + "mismatching source and destination index registers"); } } @@ -2171,31 +2098,26 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, Operands.size() == 3) { if (isParsingIntelSyntax()) { // Intel syntax - X86Operand *Op1 = static_cast(Operands[2]); - if (Op1->isImm() && isa(Op1->getImm()) && - cast(Op1->getImm())->getValue() == 1) { - delete Operands[2]; + X86Operand &Op1 = static_cast(*Operands[2]); + if (Op1.isImm() && isa(Op1.getImm()) && + cast(Op1.getImm())->getValue() == 1) Operands.pop_back(); - } } else { - X86Operand *Op1 = static_cast(Operands[1]); - if (Op1->isImm() && isa(Op1->getImm()) && - cast(Op1->getImm())->getValue() == 1) { - delete Operands[1]; + X86Operand &Op1 = static_cast(*Operands[1]); + if (Op1.isImm() && isa(Op1.getImm()) && + cast(Op1.getImm())->getValue() == 1) Operands.erase(Operands.begin() + 1); - } } } // Transforms "int $3" into "int3" as a size optimization. We can't write an // instalias with an immediate operand yet. if (Name == "int" && Operands.size() == 2) { - X86Operand *Op1 = static_cast(Operands[1]); - if (Op1->isImm() && isa(Op1->getImm()) && - cast(Op1->getImm())->getValue() == 3) { - delete Operands[1]; + X86Operand &Op1 = static_cast(*Operands[1]); + if (Op1.isImm() && isa(Op1.getImm()) && + cast(Op1.getImm())->getValue() == 3) { Operands.erase(Operands.begin() + 1); - static_cast(Operands[0])->setTokenValue("int3"); + static_cast(*Operands[0]).setTokenValue("int3"); } } @@ -2241,9 +2163,7 @@ static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode, return convertToSExti8(Inst, Opcode, X86::RAX, isCmp); } -bool X86AsmParser:: -processInstruction(MCInst &Inst, - const SmallVectorImpl &Ops) { +bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) { switch (Inst.getOpcode()) { default: return false; case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8); @@ -2270,47 +2190,101 @@ processInstruction(MCInst &Inst, case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8); case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8); case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8); + case X86::VMOVAPDrr: + case X86::VMOVAPDYrr: + case X86::VMOVAPSrr: + case X86::VMOVAPSYrr: + case X86::VMOVDQArr: + case X86::VMOVDQAYrr: + case X86::VMOVDQUrr: + case X86::VMOVDQUYrr: + case X86::VMOVUPDrr: + case X86::VMOVUPDYrr: + case X86::VMOVUPSrr: + case X86::VMOVUPSYrr: { + if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) || + !X86II::isX86_64ExtendedReg(Inst.getOperand(1).getReg())) + return false; + + unsigned NewOpc; + switch (Inst.getOpcode()) { + default: llvm_unreachable("Invalid opcode"); + case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; + case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; + case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; + case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; + case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; + case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; + case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; + case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; + case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; + case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; + case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; + case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; + } + Inst.setOpcode(NewOpc); + return true; + } + case X86::VMOVSDrr: + case X86::VMOVSSrr: { + if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) || + !X86II::isX86_64ExtendedReg(Inst.getOperand(2).getReg())) + return false; + unsigned NewOpc; + switch (Inst.getOpcode()) { + default: llvm_unreachable("Invalid opcode"); + case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; + case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; + } + Inst.setOpcode(NewOpc); + return true; + } } } static const char *getSubtargetFeatureName(unsigned Val); -bool X86AsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl &Operands, - MCStreamer &Out, unsigned &ErrorInfo, - bool MatchingInlineAsm) { + +void X86AsmParser::EmitInstruction(MCInst &Inst, OperandVector &Operands, + MCStreamer &Out) { + Instrumentation->InstrumentInstruction(Inst, Operands, getContext(), MII, + Out); + Out.EmitInstruction(Inst, STI); +} + +bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) { assert(!Operands.empty() && "Unexpect empty operand list!"); - X86Operand *Op = static_cast(Operands[0]); - assert(Op->isToken() && "Leading operand should always be a mnemonic!"); + X86Operand &Op = static_cast(*Operands[0]); + assert(Op.isToken() && "Leading operand should always be a mnemonic!"); ArrayRef EmptyRanges = None; // First, handle aliases that expand to multiple instructions. // FIXME: This should be replaced with a real .td file alias mechanism. // Also, MatchInstructionImpl should actually *do* the EmitInstruction // call. - if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" || - Op->getToken() == "fstsww" || Op->getToken() == "fstcww" || - Op->getToken() == "finit" || Op->getToken() == "fsave" || - Op->getToken() == "fstenv" || Op->getToken() == "fclex") { + if (Op.getToken() == "fstsw" || Op.getToken() == "fstcw" || + Op.getToken() == "fstsww" || Op.getToken() == "fstcww" || + Op.getToken() == "finit" || Op.getToken() == "fsave" || + Op.getToken() == "fstenv" || Op.getToken() == "fclex") { MCInst Inst; Inst.setOpcode(X86::WAIT); Inst.setLoc(IDLoc); if (!MatchingInlineAsm) - Out.EmitInstruction(Inst); - - const char *Repl = - StringSwitch(Op->getToken()) - .Case("finit", "fninit") - .Case("fsave", "fnsave") - .Case("fstcw", "fnstcw") - .Case("fstcww", "fnstcw") - .Case("fstenv", "fnstenv") - .Case("fstsw", "fnstsw") - .Case("fstsww", "fnstsw") - .Case("fclex", "fnclex") - .Default(0); + EmitInstruction(Inst, Operands, Out); + + const char *Repl = StringSwitch(Op.getToken()) + .Case("finit", "fninit") + .Case("fsave", "fnsave") + .Case("fstcw", "fnstcw") + .Case("fstcww", "fnstcw") + .Case("fstenv", "fnstenv") + .Case("fstsw", "fnstsw") + .Case("fstsww", "fnstsw") + .Case("fclex", "fnclex") + .Default(nullptr); assert(Repl && "Unknown wait-prefixed instruction"); - delete Operands[0]; Operands[0] = X86Operand::CreateToken(Repl, IDLoc); } @@ -2332,7 +2306,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, Inst.setLoc(IDLoc); if (!MatchingInlineAsm) - Out.EmitInstruction(Inst); + EmitInstruction(Inst, Operands, Out); Opcode = Inst.getOpcode(); return false; case Match_MissingFeature: { @@ -2363,11 +2337,11 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // following hack. // Change the operand to point to a temporary token. - StringRef Base = Op->getToken(); + StringRef Base = Op.getToken(); SmallString<16> Tmp; Tmp += Base; Tmp += ' '; - Op->setTokenValue(Tmp.str()); + Op.setTokenValue(Tmp.str()); // If this instruction starts with an 'f', then it is a floating point stack // instruction. These come in up to three forms for 32-bit, 64-bit, and @@ -2408,7 +2382,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, ErrorInfoMissingFeature = ErrorInfoIgnore; // Restore the old token. - Op->setTokenValue(Base); + Op.setTokenValue(Base); // If exactly one matched, then we treat that as a successful match (and the // instruction will already have been filled in correctly, since the failing @@ -2419,7 +2393,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, if (NumSuccessfulMatches == 1) { Inst.setLoc(IDLoc); if (!MatchingInlineAsm) - Out.EmitInstruction(Inst); + EmitInstruction(Inst, Operands, Out); Opcode = Inst.getOpcode(); return false; } @@ -2458,8 +2432,8 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) && (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) { if (!WasOriginallyInvalidOperand) { - ArrayRef Ranges = MatchingInlineAsm ? EmptyRanges : - Op->getLocRange(); + ArrayRef Ranges = + MatchingInlineAsm ? EmptyRanges : Op.getLocRange(); return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'", Ranges, MatchingInlineAsm); } @@ -2470,10 +2444,10 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, "too few operands for instruction", EmptyRanges, MatchingInlineAsm); - X86Operand *Operand = (X86Operand*)Operands[ErrorInfo]; - if (Operand->getStartLoc().isValid()) { - SMRange OperandRange = Operand->getLocRange(); - return Error(Operand->getStartLoc(), "invalid operand for instruction", + X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo]; + if (Operand.getStartLoc().isValid()) { + SMRange OperandRange = Operand.getLocRange(); + return Error(Operand.getStartLoc(), "invalid operand for instruction", OperandRange, MatchingInlineAsm); } } @@ -2526,11 +2500,9 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) { } else if (IDVal.startswith(".intel_syntax")) { getParser().setAssemblerDialect(1); if (getLexer().isNot(AsmToken::EndOfStatement)) { - if(Parser.getTok().getString() == "noprefix") { - // FIXME : Handle noprefix + // FIXME: Handle noprefix + if (Parser.getTok().getString() == "noprefix") Parser.Lex(); - } else - return true; } return false; } @@ -2544,7 +2516,7 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { for (;;) { const MCExpr *Value; if (getParser().parseExpression(Value)) - return true; + return false; getParser().getStreamer().EmitValue(Value, Size); @@ -2552,8 +2524,10 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { break; // FIXME: Improve diagnostic. - if (getLexer().isNot(AsmToken::Comma)) - return Error(L, "unexpected token in directive"); + if (getLexer().isNot(AsmToken::Comma)) { + Error(L, "unexpected token in directive"); + return false; + } Parser.Lex(); } } @@ -2563,22 +2537,29 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { } /// ParseDirectiveCode -/// ::= .code32 | .code64 +/// ::= .code16 | .code32 | .code64 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) { - if (IDVal == ".code32") { + if (IDVal == ".code16") { Parser.Lex(); - if (is64BitMode()) { - SwitchMode(); + if (!is16BitMode()) { + SwitchMode(X86::Mode16Bit); + getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); + } + } else if (IDVal == ".code32") { + Parser.Lex(); + if (!is32BitMode()) { + SwitchMode(X86::Mode32Bit); getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); } } else if (IDVal == ".code64") { Parser.Lex(); if (!is64BitMode()) { - SwitchMode(); + SwitchMode(X86::Mode64Bit); getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64); } } else { - return Error(L, "unexpected directive " + IDVal); + Error(L, "unknown directive " + IDVal); + return false; } return false;