X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrInfo.td;h=51174fc805ca74ff5699fc3c01f93b1792e4a5cd;hp=60e2f9b0b091abc31313300e21db867bbd362b6b;hb=e446a19cc1f0e6115635284e037ca7aee47f8578;hpb=1415ca1781051bc823aa8e670dc1d0fad05bd3de diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 60e2f9b0b09..51174fc805c 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -32,7 +32,8 @@ def SDTX86Cmov : SDTypeProfile<1, 4, // Unary and binary operator instructions that set EFLAGS as a side-effect. def SDTUnaryArithWithFlags : SDTypeProfile<2, 1, - [SDTCisInt<0>, SDTCisVT<1, i32>]>; + [SDTCisSameAs<0, 2>, + SDTCisInt<0>, SDTCisVT<1, i32>]>; def SDTBinaryArithWithFlags : SDTypeProfile<2, 2, [SDTCisSameAs<0, 2>, @@ -105,8 +106,6 @@ def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; -def SDT_X86WIN_FTOL : SDTypeProfile<0, 1, [SDTCisFP<0>]>; - def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; @@ -155,27 +154,6 @@ def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair, [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def X86AtomAdd64 : SDNode<"X86ISD::ATOMADD64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomSub64 : SDNode<"X86ISD::ATOMSUB64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomOr64 : SDNode<"X86ISD::ATOMOR64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomXor64 : SDNode<"X86ISD::ATOMXOR64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomAnd64 : SDNode<"X86ISD::ATOMAND64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomNand64 : SDNode<"X86ISD::ATOMNAND64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; -def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary, - [SDNPHasChain, SDNPMayStore, - SDNPMayLoad, SDNPMemOperand]>; def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; @@ -206,10 +184,18 @@ def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr, def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void, [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>; +def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void, + [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>; +def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void, + [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>; def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>; def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>; +def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER", + SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, + SDTCisInt<1>]>>; + def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; @@ -249,10 +235,6 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags, def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags, [SDNPCommutative]>; -def X86blsi : SDNode<"X86ISD::BLSI", SDTIntUnaryOp>; -def X86blsmsk : SDNode<"X86ISD::BLSMSK", SDTIntUnaryOp>; -def X86blsr : SDNode<"X86ISD::BLSR", SDTIntUnaryOp>; -def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntShiftOp>; def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>; def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>; @@ -266,9 +248,6 @@ def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA, def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -def X86WinFTOL : SDNode<"X86ISD::WIN_FTOL", SDT_X86WIN_FTOL, - [SDNPHasChain, SDNPOutGlue]>; - //===----------------------------------------------------------------------===// // X86 Operand Definitions. // @@ -282,134 +261,99 @@ def ptr_rc_nosp : PointerLikeRegClass<1>; def X86MemAsmOperand : AsmOperandClass { let Name = "Mem"; } -def X86Mem8AsmOperand : AsmOperandClass { - let Name = "Mem8"; let RenderMethod = "addMemOperands"; -} -def X86Mem16AsmOperand : AsmOperandClass { - let Name = "Mem16"; let RenderMethod = "addMemOperands"; -} -def X86Mem32AsmOperand : AsmOperandClass { - let Name = "Mem32"; let RenderMethod = "addMemOperands"; -} -def X86Mem64AsmOperand : AsmOperandClass { - let Name = "Mem64"; let RenderMethod = "addMemOperands"; -} -def X86Mem80AsmOperand : AsmOperandClass { - let Name = "Mem80"; let RenderMethod = "addMemOperands"; -} -def X86Mem128AsmOperand : AsmOperandClass { - let Name = "Mem128"; let RenderMethod = "addMemOperands"; -} -def X86Mem256AsmOperand : AsmOperandClass { - let Name = "Mem256"; let RenderMethod = "addMemOperands"; -} -def X86Mem512AsmOperand : AsmOperandClass { - let Name = "Mem512"; let RenderMethod = "addMemOperands"; -} - -// Gather mem operands -def X86MemVX32Operand : AsmOperandClass { - let Name = "MemVX32"; let RenderMethod = "addMemOperands"; -} -def X86MemVY32Operand : AsmOperandClass { - let Name = "MemVY32"; let RenderMethod = "addMemOperands"; -} -def X86MemVZ32Operand : AsmOperandClass { - let Name = "MemVZ32"; let RenderMethod = "addMemOperands"; -} -def X86MemVX64Operand : AsmOperandClass { - let Name = "MemVX64"; let RenderMethod = "addMemOperands"; -} -def X86MemVY64Operand : AsmOperandClass { - let Name = "MemVY64"; let RenderMethod = "addMemOperands"; -} -def X86MemVZ64Operand : AsmOperandClass { - let Name = "MemVZ64"; let RenderMethod = "addMemOperands"; +let RenderMethod = "addMemOperands" in { + def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; } + def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; } + def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; } + def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; } + def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; } + def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; } + def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; } + def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; } + // Gather mem operands + def X86MemVX32Operand : AsmOperandClass { let Name = "MemVX32"; } + def X86MemVY32Operand : AsmOperandClass { let Name = "MemVY32"; } + def X86MemVZ32Operand : AsmOperandClass { let Name = "MemVZ32"; } + def X86MemVX64Operand : AsmOperandClass { let Name = "MemVX64"; } + def X86MemVY64Operand : AsmOperandClass { let Name = "MemVY64"; } + def X86MemVZ64Operand : AsmOperandClass { let Name = "MemVZ64"; } + def X86MemVX32XOperand : AsmOperandClass { let Name = "MemVX32X"; } + def X86MemVY32XOperand : AsmOperandClass { let Name = "MemVY32X"; } + def X86MemVX64XOperand : AsmOperandClass { let Name = "MemVX64X"; } + def X86MemVY64XOperand : AsmOperandClass { let Name = "MemVY64X"; } } def X86AbsMemAsmOperand : AsmOperandClass { let Name = "AbsMem"; let SuperClasses = [X86MemAsmOperand]; } -class X86MemOperand : Operand { + +class X86MemOperand : Operand { let PrintMethod = printMethod; let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); - let ParserMatchClass = X86MemAsmOperand; + let ParserMatchClass = parserMatchClass; + let OperandType = "OPERAND_MEMORY"; +} + +// Gather mem operands +class X86VMemOperand + : X86MemOperand { + let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, i8imm); } -let OperandType = "OPERAND_MEMORY" in { +def anymem : X86MemOperand<"printanymem">; + def opaque32mem : X86MemOperand<"printopaquemem">; def opaque48mem : X86MemOperand<"printopaquemem">; def opaque80mem : X86MemOperand<"printopaquemem">; def opaque512mem : X86MemOperand<"printopaquemem">; -def i8mem : X86MemOperand<"printi8mem"> { - let ParserMatchClass = X86Mem8AsmOperand; } -def i16mem : X86MemOperand<"printi16mem"> { - let ParserMatchClass = X86Mem16AsmOperand; } -def i32mem : X86MemOperand<"printi32mem"> { - let ParserMatchClass = X86Mem32AsmOperand; } -def i64mem : X86MemOperand<"printi64mem"> { - let ParserMatchClass = X86Mem64AsmOperand; } -def i128mem : X86MemOperand<"printi128mem"> { - let ParserMatchClass = X86Mem128AsmOperand; } -def i256mem : X86MemOperand<"printi256mem"> { - let ParserMatchClass = X86Mem256AsmOperand; } -def i512mem : X86MemOperand<"printi512mem"> { - let ParserMatchClass = X86Mem512AsmOperand; } -def f32mem : X86MemOperand<"printf32mem"> { - let ParserMatchClass = X86Mem32AsmOperand; } -def f64mem : X86MemOperand<"printf64mem"> { - let ParserMatchClass = X86Mem64AsmOperand; } -def f80mem : X86MemOperand<"printf80mem"> { - let ParserMatchClass = X86Mem80AsmOperand; } -def f128mem : X86MemOperand<"printf128mem"> { - let ParserMatchClass = X86Mem128AsmOperand; } -def f256mem : X86MemOperand<"printf256mem">{ - let ParserMatchClass = X86Mem256AsmOperand; } -def f512mem : X86MemOperand<"printf512mem">{ - let ParserMatchClass = X86Mem512AsmOperand; } -def v512mem : Operand { - let PrintMethod = "printf512mem"; - let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm); - let ParserMatchClass = X86Mem512AsmOperand; } +def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>; +def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>; +def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>; +def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>; +def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>; +def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>; +def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>; +def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>; +def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>; +def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>; +def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>; +def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>; +def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>; + +def v512mem : X86VMemOperand; // Gather mem operands -def vx32mem : X86MemOperand<"printi32mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm); - let ParserMatchClass = X86MemVX32Operand; } -def vy32mem : X86MemOperand<"printi32mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm); - let ParserMatchClass = X86MemVY32Operand; } -def vx64mem : X86MemOperand<"printi64mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR128, i32imm, i8imm); - let ParserMatchClass = X86MemVX64Operand; } -def vy64mem : X86MemOperand<"printi64mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm); - let ParserMatchClass = X86MemVY64Operand; } -def vy64xmem : X86MemOperand<"printi64mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR256X, i32imm, i8imm); - let ParserMatchClass = X86MemVY64Operand; } -def vz32mem : X86MemOperand<"printi32mem">{ - let MIOperandInfo = (ops ptr_rc, i16imm, VR512, i32imm, i8imm); - let ParserMatchClass = X86MemVZ32Operand; } -def vz64mem : X86MemOperand<"printi64mem">{ - let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm); - let ParserMatchClass = X86MemVZ64Operand; } -} - -// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of -// plain GR64, so that it doesn't potentially require a REX prefix. -def i8mem_NOREX : Operand { +def vx32mem : X86VMemOperand; +def vy32mem : X86VMemOperand; +def vx64mem : X86VMemOperand; +def vy64mem : X86VMemOperand; + +def vx32xmem : X86VMemOperand; +def vx64xmem : X86VMemOperand; +def vy32xmem : X86VMemOperand; +def vy64xmem : X86VMemOperand; +def vz32mem : X86VMemOperand; +def vz64mem : X86VMemOperand; + +// A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead +// of a plain GPR, so that it doesn't potentially require a REX prefix. +def ptr_rc_norex : PointerLikeRegClass<2>; +def ptr_rc_norex_nosp : PointerLikeRegClass<3>; + +def i8mem_NOREX : Operand { let PrintMethod = "printi8mem"; - let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX_NOSP, i32imm, i8imm); + let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm, i8imm); let ParserMatchClass = X86Mem8AsmOperand; let OperandType = "OPERAND_MEMORY"; } // GPRs available for tailcall. // It represents GR32_TC, GR64_TC or GR64_TCW64. -def ptr_rc_tailcall : PointerLikeRegClass<2>; +def ptr_rc_tailcall : PointerLikeRegClass<4>; // Special i32mem for addresses of load folding tail calls. These are not // allowed to use callee-saved registers since they must be scheduled @@ -445,134 +389,184 @@ def brtarget8 : Operand; } -def X86SrcIdx8Operand : AsmOperandClass { - let Name = "SrcIdx8"; - let RenderMethod = "addSrcIdxOperands"; - let SuperClasses = [X86Mem8AsmOperand]; -} -def X86SrcIdx16Operand : AsmOperandClass { - let Name = "SrcIdx16"; - let RenderMethod = "addSrcIdxOperands"; - let SuperClasses = [X86Mem16AsmOperand]; -} -def X86SrcIdx32Operand : AsmOperandClass { - let Name = "SrcIdx32"; - let RenderMethod = "addSrcIdxOperands"; - let SuperClasses = [X86Mem32AsmOperand]; -} -def X86SrcIdx64Operand : AsmOperandClass { - let Name = "SrcIdx64"; - let RenderMethod = "addSrcIdxOperands"; - let SuperClasses = [X86Mem64AsmOperand]; -} -def X86DstIdx8Operand : AsmOperandClass { - let Name = "DstIdx8"; - let RenderMethod = "addDstIdxOperands"; - let SuperClasses = [X86Mem8AsmOperand]; -} -def X86DstIdx16Operand : AsmOperandClass { - let Name = "DstIdx16"; - let RenderMethod = "addDstIdxOperands"; - let SuperClasses = [X86Mem16AsmOperand]; -} -def X86DstIdx32Operand : AsmOperandClass { - let Name = "DstIdx32"; - let RenderMethod = "addDstIdxOperands"; - let SuperClasses = [X86Mem32AsmOperand]; -} -def X86DstIdx64Operand : AsmOperandClass { - let Name = "DstIdx64"; - let RenderMethod = "addDstIdxOperands"; - let SuperClasses = [X86Mem64AsmOperand]; -} -def X86MemOffs8AsmOperand : AsmOperandClass { - let Name = "MemOffs8"; - let RenderMethod = "addMemOffsOperands"; - let SuperClasses = [X86Mem8AsmOperand]; -} -def X86MemOffs16AsmOperand : AsmOperandClass { - let Name = "MemOffs16"; - let RenderMethod = "addMemOffsOperands"; - let SuperClasses = [X86Mem16AsmOperand]; -} -def X86MemOffs32AsmOperand : AsmOperandClass { - let Name = "MemOffs32"; - let RenderMethod = "addMemOffsOperands"; - let SuperClasses = [X86Mem32AsmOperand]; -} -def X86MemOffs64AsmOperand : AsmOperandClass { - let Name = "MemOffs64"; - let RenderMethod = "addMemOffsOperands"; - let SuperClasses = [X86Mem64AsmOperand]; -} -let OperandType = "OPERAND_MEMORY" in { -def srcidx8 : Operand { - let ParserMatchClass = X86SrcIdx8Operand; - let MIOperandInfo = (ops ptr_rc, i8imm); - let PrintMethod = "printSrcIdx8"; } -def srcidx16 : Operand { - let ParserMatchClass = X86SrcIdx16Operand; - let MIOperandInfo = (ops ptr_rc, i8imm); - let PrintMethod = "printSrcIdx16"; } -def srcidx32 : Operand { - let ParserMatchClass = X86SrcIdx32Operand; - let MIOperandInfo = (ops ptr_rc, i8imm); - let PrintMethod = "printSrcIdx32"; } -def srcidx64 : Operand { - let ParserMatchClass = X86SrcIdx64Operand; +// Special parser to detect 16-bit mode to select 16-bit displacement. +def X86AbsMem16AsmOperand : AsmOperandClass { + let Name = "AbsMem16"; + let RenderMethod = "addAbsMemOperands"; + let SuperClasses = [X86AbsMemAsmOperand]; +} + +// Branch targets have OtherVT type and print as pc-relative values. +let OperandType = "OPERAND_PCREL", + PrintMethod = "printPCRelImm" in { +let ParserMatchClass = X86AbsMem16AsmOperand in + def brtarget16 : Operand; +let ParserMatchClass = X86AbsMemAsmOperand in + def brtarget32 : Operand; +} + +let RenderMethod = "addSrcIdxOperands" in { + def X86SrcIdx8Operand : AsmOperandClass { + let Name = "SrcIdx8"; + let SuperClasses = [X86Mem8AsmOperand]; + } + def X86SrcIdx16Operand : AsmOperandClass { + let Name = "SrcIdx16"; + let SuperClasses = [X86Mem16AsmOperand]; + } + def X86SrcIdx32Operand : AsmOperandClass { + let Name = "SrcIdx32"; + let SuperClasses = [X86Mem32AsmOperand]; + } + def X86SrcIdx64Operand : AsmOperandClass { + let Name = "SrcIdx64"; + let SuperClasses = [X86Mem64AsmOperand]; + } +} // RenderMethod = "addSrcIdxOperands" + +let RenderMethod = "addDstIdxOperands" in { + def X86DstIdx8Operand : AsmOperandClass { + let Name = "DstIdx8"; + let SuperClasses = [X86Mem8AsmOperand]; + } + def X86DstIdx16Operand : AsmOperandClass { + let Name = "DstIdx16"; + let SuperClasses = [X86Mem16AsmOperand]; + } + def X86DstIdx32Operand : AsmOperandClass { + let Name = "DstIdx32"; + let SuperClasses = [X86Mem32AsmOperand]; + } + def X86DstIdx64Operand : AsmOperandClass { + let Name = "DstIdx64"; + let SuperClasses = [X86Mem64AsmOperand]; + } +} // RenderMethod = "addDstIdxOperands" + +let RenderMethod = "addMemOffsOperands" in { + def X86MemOffs16_8AsmOperand : AsmOperandClass { + let Name = "MemOffs16_8"; + let SuperClasses = [X86Mem8AsmOperand]; + } + def X86MemOffs16_16AsmOperand : AsmOperandClass { + let Name = "MemOffs16_16"; + let SuperClasses = [X86Mem16AsmOperand]; + } + def X86MemOffs16_32AsmOperand : AsmOperandClass { + let Name = "MemOffs16_32"; + let SuperClasses = [X86Mem32AsmOperand]; + } + def X86MemOffs32_8AsmOperand : AsmOperandClass { + let Name = "MemOffs32_8"; + let SuperClasses = [X86Mem8AsmOperand]; + } + def X86MemOffs32_16AsmOperand : AsmOperandClass { + let Name = "MemOffs32_16"; + let SuperClasses = [X86Mem16AsmOperand]; + } + def X86MemOffs32_32AsmOperand : AsmOperandClass { + let Name = "MemOffs32_32"; + let SuperClasses = [X86Mem32AsmOperand]; + } + def X86MemOffs32_64AsmOperand : AsmOperandClass { + let Name = "MemOffs32_64"; + let SuperClasses = [X86Mem64AsmOperand]; + } + def X86MemOffs64_8AsmOperand : AsmOperandClass { + let Name = "MemOffs64_8"; + let SuperClasses = [X86Mem8AsmOperand]; + } + def X86MemOffs64_16AsmOperand : AsmOperandClass { + let Name = "MemOffs64_16"; + let SuperClasses = [X86Mem16AsmOperand]; + } + def X86MemOffs64_32AsmOperand : AsmOperandClass { + let Name = "MemOffs64_32"; + let SuperClasses = [X86Mem32AsmOperand]; + } + def X86MemOffs64_64AsmOperand : AsmOperandClass { + let Name = "MemOffs64_64"; + let SuperClasses = [X86Mem64AsmOperand]; + } +} // RenderMethod = "addMemOffsOperands" + +class X86SrcIdxOperand + : X86MemOperand { let MIOperandInfo = (ops ptr_rc, i8imm); - let PrintMethod = "printSrcIdx64"; } -def dstidx8 : Operand { - let ParserMatchClass = X86DstIdx8Operand; - let MIOperandInfo = (ops ptr_rc); - let PrintMethod = "printDstIdx8"; } -def dstidx16 : Operand { - let ParserMatchClass = X86DstIdx16Operand; - let MIOperandInfo = (ops ptr_rc); - let PrintMethod = "printDstIdx16"; } -def dstidx32 : Operand { - let ParserMatchClass = X86DstIdx32Operand; - let MIOperandInfo = (ops ptr_rc); - let PrintMethod = "printDstIdx32"; } -def dstidx64 : Operand { - let ParserMatchClass = X86DstIdx64Operand; +} + +class X86DstIdxOperand + : X86MemOperand { let MIOperandInfo = (ops ptr_rc); - let PrintMethod = "printDstIdx64"; } -def offset8 : Operand { - let ParserMatchClass = X86MemOffs8AsmOperand; - let MIOperandInfo = (ops i64imm, i8imm); - let PrintMethod = "printMemOffs8"; } -def offset16 : Operand { - let ParserMatchClass = X86MemOffs16AsmOperand; - let MIOperandInfo = (ops i64imm, i8imm); - let PrintMethod = "printMemOffs16"; } -def offset32 : Operand { - let ParserMatchClass = X86MemOffs32AsmOperand; - let MIOperandInfo = (ops i64imm, i8imm); - let PrintMethod = "printMemOffs32"; } -def offset64 : Operand { - let ParserMatchClass = X86MemOffs64AsmOperand; - let MIOperandInfo = (ops i64imm, i8imm); - let PrintMethod = "printMemOffs64"; } } +def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>; +def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>; +def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>; +def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>; +def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>; +def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>; +def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>; +def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>; + +class X86MemOffsOperand + : X86MemOperand { + let MIOperandInfo = (ops immOperand, i8imm); +} + +def offset16_8 : X86MemOffsOperand; +def offset16_16 : X86MemOffsOperand; +def offset16_32 : X86MemOffsOperand; +def offset32_8 : X86MemOffsOperand; +def offset32_16 : X86MemOffsOperand; +def offset32_32 : X86MemOffsOperand; +def offset32_64 : X86MemOffsOperand; +def offset64_8 : X86MemOffsOperand; +def offset64_16 : X86MemOffsOperand; +def offset64_32 : X86MemOffsOperand; +def offset64_64 : X86MemOffsOperand; def SSECC : Operand { - let PrintMethod = "printSSECC"; + let PrintMethod = "printSSEAVXCC"; let OperandType = "OPERAND_IMMEDIATE"; } +def i8immZExt3 : ImmLeaf= 0 && Imm < 8; +}]>; + def AVXCC : Operand { - let PrintMethod = "printAVXCC"; + let PrintMethod = "printSSEAVXCC"; let OperandType = "OPERAND_IMMEDIATE"; } -class ImmSExtAsmOperandClass : AsmOperandClass { - let SuperClasses = [ImmAsmOperand]; - let RenderMethod = "addImmOperands"; +def i8immZExt5 : ImmLeaf= 0 && Imm < 32; +}]>; + +def AVX512ICC : Operand { + let PrintMethod = "printSSEAVXCC"; + let OperandType = "OPERAND_IMMEDIATE"; +} + +def XOPCC : Operand { + let PrintMethod = "printXOPCC"; + let OperandType = "OPERAND_IMMEDIATE"; } -class ImmZExtAsmOperandClass : AsmOperandClass { +class ImmSExtAsmOperandClass : AsmOperandClass { let SuperClasses = [ImmAsmOperand]; let RenderMethod = "addImmOperands"; } @@ -584,11 +578,15 @@ def X86GR32orGR64AsmOperand : AsmOperandClass { def GR32orGR64 : RegisterOperand { let ParserMatchClass = X86GR32orGR64AsmOperand; } - +def AVX512RCOperand : AsmOperandClass { + let Name = "AVX512RC"; +} def AVX512RC : Operand { let PrintMethod = "printRoundingControl"; let OperandType = "OPERAND_IMMEDIATE"; + let ParserMatchClass = AVX512RCOperand; } + // Sign-extended immediate classes. We don't need to define the full lattice // here because there is no instruction with an ambiguity between ImmSExti64i32 // and ImmSExti32i8. @@ -616,12 +614,6 @@ def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass { let Name = "ImmSExti32i8"; } -// [0, 0x000000FF] -def ImmZExtu32u8AsmOperand : ImmZExtAsmOperandClass { - let Name = "ImmZExtu32u8"; -} - - // [0, 0x0000007F] | // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass { @@ -630,6 +622,14 @@ def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass { ImmSExti64i32AsmOperand]; } +// Unsigned immediate used by SSE/AVX instructions +// [0, 0xFF] +// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] +def ImmUnsignedi8AsmOperand : AsmOperandClass { + let Name = "ImmUnsignedi8"; + let RenderMethod = "addImmOperands"; +} + // A couple of more descriptive operand definitions. // 16-bits but only 8 bits are significant. def i16i8imm : Operand { @@ -641,11 +641,6 @@ def i32i8imm : Operand { let ParserMatchClass = ImmSExti32i8AsmOperand; let OperandType = "OPERAND_IMMEDIATE"; } -// 32-bits but only 8 bits are significant, and those 8 bits are unsigned. -def u32u8imm : Operand { - let ParserMatchClass = ImmZExtu32u8AsmOperand; - let OperandType = "OPERAND_IMMEDIATE"; -} // 64-bits but only 32 bits are significant. def i64i32imm : Operand { @@ -653,6 +648,27 @@ def i64i32imm : Operand { let OperandType = "OPERAND_IMMEDIATE"; } +// 64-bits but only 8 bits are significant. +def i64i8imm : Operand { + let ParserMatchClass = ImmSExti64i8AsmOperand; + let OperandType = "OPERAND_IMMEDIATE"; +} + +// Unsigned 8-bit immediate used by SSE/AVX instructions. +def u8imm : Operand { + let PrintMethod = "printU8Imm"; + let ParserMatchClass = ImmUnsignedi8AsmOperand; + let OperandType = "OPERAND_IMMEDIATE"; +} + +// 32-bit immediate but only 8-bits are significant and they are unsigned. +// Used by some SSE/AVX instructions that use intrinsics. +def i32u8imm : Operand { + let PrintMethod = "printU8Imm"; + let ParserMatchClass = ImmUnsignedi8AsmOperand; + let OperandType = "OPERAND_IMMEDIATE"; +} + // 64-bits but only 32 bits are significant, and those bits are treated as being // pc relative. def i64i32imm_pcrel : Operand { @@ -661,21 +677,15 @@ def i64i32imm_pcrel : Operand { let OperandType = "OPERAND_PCREL"; } -// 64-bits but only 8 bits are significant. -def i64i8imm : Operand { - let ParserMatchClass = ImmSExti64i8AsmOperand; - let OperandType = "OPERAND_IMMEDIATE"; -} - def lea64_32mem : Operand { - let PrintMethod = "printi32mem"; + let PrintMethod = "printanymem"; let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm); let ParserMatchClass = X86MemAsmOperand; } // Memory operands that use 64-bit pointers in both ILP32 and LP64. def lea64mem : Operand { - let PrintMethod = "printi64mem"; + let PrintMethod = "printanymem"; let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm); let ParserMatchClass = X86MemAsmOperand; } @@ -712,6 +722,8 @@ def tls64addr : ComplexPattern; +def vectoraddr : ComplexPattern; + //===----------------------------------------------------------------------===// // X86 Instruction Predicate Definitions. def HasCMov : Predicate<"Subtarget->hasCMov()">; @@ -729,6 +741,7 @@ def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">; def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">; def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">; def HasSSE41 : Predicate<"Subtarget->hasSSE41()">; +def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">; def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">; def HasSSE42 : Predicate<"Subtarget->hasSSE42()">; def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">; @@ -740,10 +753,23 @@ def HasAVX512 : Predicate<"Subtarget->hasAVX512()">, AssemblerPredicate<"FeatureAVX512", "AVX-512 ISA">; def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">; def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">; -def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">; -def HasCDI : Predicate<"Subtarget->hasCDI()">; -def HasPFI : Predicate<"Subtarget->hasPFI()">; -def HasERI : Predicate<"Subtarget->hasERI()">; +def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">; +def HasCDI : Predicate<"Subtarget->hasCDI()">, + AssemblerPredicate<"FeatureCDI", "AVX-512 CD ISA">; +def HasPFI : Predicate<"Subtarget->hasPFI()">, + AssemblerPredicate<"FeaturePFI", "AVX-512 PF ISA">; +def HasERI : Predicate<"Subtarget->hasERI()">, + AssemblerPredicate<"FeatureERI", "AVX-512 ER ISA">; +def HasDQI : Predicate<"Subtarget->hasDQI()">, + AssemblerPredicate<"FeatureDQI", "AVX-512 DQ ISA">; +def NoDQI : Predicate<"!Subtarget->hasDQI()">; +def HasBWI : Predicate<"Subtarget->hasBWI()">, + AssemblerPredicate<"FeatureBWI", "AVX-512 BW ISA">; +def NoBWI : Predicate<"!Subtarget->hasBWI()">; +def HasVLX : Predicate<"Subtarget->hasVLX()">, + AssemblerPredicate<"FeatureVLX", "AVX-512 VL ISA">; +def NoVLX : Predicate<"!Subtarget->hasVLX()">; +def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">; def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">; def HasAES : Predicate<"Subtarget->hasAES()">; @@ -770,11 +796,14 @@ def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">; def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">; def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">; def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">; +def HasMPX : Predicate<"Subtarget->hasMPX()">; def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">; def Not64BitMode : Predicate<"!Subtarget->is64Bit()">, AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">; def In64BitMode : Predicate<"Subtarget->is64Bit()">, AssemblerPredicate<"Mode64Bit", "64-bit mode">; +def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">; +def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">; def In16BitMode : Predicate<"Subtarget->is16Bit()">, AssemblerPredicate<"Mode16Bit", "16-bit mode">; def Not16BitMode : Predicate<"!Subtarget->is16Bit()">, @@ -782,6 +811,9 @@ def Not16BitMode : Predicate<"!Subtarget->is16Bit()">, def In32BitMode : Predicate<"Subtarget->is32Bit()">, AssemblerPredicate<"Mode32Bit", "32-bit mode">; def IsWin64 : Predicate<"Subtarget->isTargetWin64()">; +def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">; +def IsPS4 : Predicate<"Subtarget->isTargetPS4()">; +def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">; def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">; def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">; def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">; @@ -797,6 +829,8 @@ def OptForSpeed : Predicate<"!OptForSize">; def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">; def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">; def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">; +def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">; +def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">; //===----------------------------------------------------------------------===// // X86 Instruction Format Definitions. @@ -827,21 +861,60 @@ def X86_COND_O : PatLeaf<(i8 13)>; def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE def X86_COND_S : PatLeaf<(i8 15)>; -let FastIselShouldIgnore = 1 in { // FastIsel should ignore all simm8 instrs. - def i16immSExt8 : ImmLeaf; - def i32immSExt8 : ImmLeaf; - def i64immSExt8 : ImmLeaf; -} +// Predicate used to help when pattern matching LZCNT/TZCNT. +def X86_COND_E_OR_NE : ImmLeaf; -def i64immSExt32 : ImmLeaf; + +def i16immSExt8 : ImmLeaf(Imm); }]>; +def i32immSExt8 : ImmLeaf(Imm); }]>; +def i64immSExt8 : ImmLeaf(Imm); }]>; + +// If we have multiple users of an immediate, it's much smaller to reuse +// the register, rather than encode the immediate in every instruction. +// This has the risk of increasing register pressure from stretched live +// ranges, however, the immediates should be trivial to rematerialize by +// the RA in the event of high register pressure. +// TODO : This is currently enabled for stores and binary ops. There are more +// cases for which this can be enabled, though this catches the bulk of the +// issues. +// TODO2 : This should really also be enabled under O2, but there's currently +// an issue with RA where we don't pull the constants into their users +// when we rematerialize them. I'll follow-up on enabling O2 after we fix that +// issue. +// TODO3 : This is currently limited to single basic blocks (DAG creation +// pulls block immediates to the top and merges them if necessary). +// Eventually, it would be nice to allow ConstantHoisting to merge constants +// globally for potentially added savings. +// +def imm8_su : PatLeaf<(i8 imm), [{ + return !shouldAvoidImmediateInstFormsForSize(N); +}]>; +def imm16_su : PatLeaf<(i16 imm), [{ + return !shouldAvoidImmediateInstFormsForSize(N); +}]>; +def imm32_su : PatLeaf<(i32 imm), [{ + return !shouldAvoidImmediateInstFormsForSize(N); +}]>; + +def i16immSExt8_su : PatLeaf<(i16immSExt8), [{ + return !shouldAvoidImmediateInstFormsForSize(N); +}]>; +def i32immSExt8_su : PatLeaf<(i32immSExt8), [{ + return !shouldAvoidImmediateInstFormsForSize(N); +}]>; + + +def i64immSExt32 : ImmLeaf(Imm); }]>; // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit // unsigned field. -def i64immZExt32 : ImmLeaf; +def i64immZExt32 : ImmLeaf(Imm); }]>; def i64immZExt32SExt8 : ImmLeaf(Imm) && isInt<8>(static_cast(Imm)); }]>; // Helper fragments for loads. @@ -929,12 +1002,12 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{ // // Nop -let neverHasSideEffects = 1, SchedRW = [WriteZero] in { +let hasSideEffects = 0, SchedRW = [WriteZero] in { def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>; - def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero), - "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize; - def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero), - "nop{l}\t$zero", [], IIC_NOP>, TB; + def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero), + "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize16; + def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero), + "nop{l}\t$zero", [], IIC_NOP>, TB, OpSize32; } @@ -943,12 +1016,12 @@ def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl), "enter\t$len, $lvl", [], IIC_ENTER>, Sched<[WriteMicrocoded]>; let SchedRW = [WriteALU] in { -let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in +let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", [], IIC_LEAVE>, Requires<[Not64BitMode]>; -let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in +let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", [], IIC_LEAVE>, Requires<[In64BitMode]>; @@ -958,111 +1031,120 @@ def LEAVE64 : I<0xC9, RawFrm, // Miscellaneous Instructions. // -let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in { +let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in { let mayLoad = 1, SchedRW = [WriteLoad] in { def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [], - IIC_POP_REG16>, OpSize; + IIC_POP_REG16>, OpSize16; def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [], - IIC_POP_REG>, OpSize16, Requires<[Not64BitMode]>; + IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>; def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [], - IIC_POP_REG>, OpSize; + IIC_POP_REG>, OpSize16; def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", [], - IIC_POP_MEM>, OpSize; + IIC_POP_MEM>, OpSize16; def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [], - IIC_POP_REG>, OpSize16, Requires<[Not64BitMode]>; + IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>; def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [], - IIC_POP_MEM>, Requires<[Not64BitMode]>; - -def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize; -def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>, - OpSize16, Requires<[Not64BitMode]>; + IIC_POP_MEM>, OpSize32, Requires<[Not64BitMode]>; } // mayLoad, SchedRW let mayStore = 1, SchedRW = [WriteStore] in { def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[], - IIC_PUSH_REG>, OpSize; + IIC_PUSH_REG>, OpSize16; def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[], - IIC_PUSH_REG>, OpSize16, Requires<[Not64BitMode]>; + IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>; def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[], - IIC_PUSH_REG>, OpSize; -def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[], - IIC_PUSH_MEM>, OpSize; + IIC_PUSH_REG>, OpSize16; def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[], - IIC_PUSH_REG>, OpSize16, Requires<[Not64BitMode]>; -def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[], - IIC_PUSH_MEM>, OpSize16, Requires<[Not64BitMode]>; + IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>; def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm), - "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize, - Requires<[Not64BitMode]>; -def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm), - "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize16, - Requires<[Not64BitMode]>; + "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16; def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm), - "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize, + "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16; + +def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm), + "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32, Requires<[Not64BitMode]>; def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), - "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize16, + "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32, Requires<[Not64BitMode]>; +} // mayStore, SchedRW + +let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in { +def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[], + IIC_PUSH_MEM>, OpSize16; +def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[], + IIC_PUSH_MEM>, OpSize32, Requires<[Not64BitMode]>; +} // mayLoad, mayStore, SchedRW + +} +let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, hasSideEffects=0, + SchedRW = [WriteLoad] in { +def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, + OpSize16; +def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>, + OpSize32, Requires<[Not64BitMode]>; +} + +let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, hasSideEffects=0, + SchedRW = [WriteStore] in { def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>, - OpSize; + OpSize16; def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>, - OpSize16, Requires<[Not64BitMode]>; - -} // mayStore, SchedRW + OpSize32, Requires<[Not64BitMode]>; } -let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in { +let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in { let mayLoad = 1, SchedRW = [WriteLoad] in { def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], - IIC_POP_REG>, Requires<[In64BitMode]>; + IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>; def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [], - IIC_POP_REG>, Requires<[In64BitMode]>; + IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>; def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", [], - IIC_POP_MEM>, Requires<[In64BitMode]>; + IIC_POP_MEM>, OpSize32, Requires<[In64BitMode]>; } // mayLoad, SchedRW let mayStore = 1, SchedRW = [WriteStore] in { def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", [], - IIC_PUSH_REG>, Requires<[In64BitMode]>; + IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>; def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", [], - IIC_PUSH_REG>, Requires<[In64BitMode]>; -def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [], - IIC_PUSH_MEM>, Requires<[In64BitMode]>; + IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>; } // mayStore, SchedRW +let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in { +def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [], + IIC_PUSH_MEM>, OpSize32, Requires<[In64BitMode]>; +} // mayLoad, mayStore, SchedRW } -let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1, +let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1, SchedRW = [WriteStore] in { def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm), "push{q}\t$imm", [], IIC_PUSH_IMM>, Requires<[In64BitMode]>; -def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm), - "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize, - Requires<[In64BitMode]>; def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm), - "push{q}\t$imm", [], IIC_PUSH_IMM>, Requires<[In64BitMode]>; + "push{q}\t$imm", [], IIC_PUSH_IMM>, OpSize32, + Requires<[In64BitMode]>; } -let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in +let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>, - Requires<[In64BitMode]>, Sched<[WriteLoad]>; -let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in + OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>; +let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, hasSideEffects=0 in def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>, - Requires<[In64BitMode]>, Sched<[WriteStore]>; + OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>; let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP], - mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in { + mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in { def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", [], IIC_POP_A>, - OpSize16, Requires<[Not64BitMode]>; + OpSize32, Requires<[Not64BitMode]>; def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", [], IIC_POP_A>, - OpSize, Requires<[Not64BitMode]>; + OpSize16, Requires<[Not64BitMode]>; } let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], - mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in { + mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in { def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", [], IIC_PUSH_A>, - OpSize16, Requires<[Not64BitMode]>; + OpSize32, Requires<[Not64BitMode]>; def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", [], IIC_PUSH_A>, - OpSize, Requires<[Not64BitMode]>; + OpSize16, Requires<[Not64BitMode]>; } let Constraints = "$src = $dst", SchedRW = [WriteALU] in { @@ -1070,7 +1152,7 @@ let Constraints = "$src = $dst", SchedRW = [WriteALU] in { def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "bswap{l}\t$dst", - [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, OpSize16, TB; + [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, OpSize32, TB; def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src), "bswap{q}\t$dst", @@ -1082,56 +1164,52 @@ let Defs = [EFLAGS] in { def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "bsf{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))], - IIC_BIT_SCAN_REG>, TB, OpSize, Sched<[WriteShift]>; + IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>; def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "bsf{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, OpSize, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>; def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "bsf{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))], - IIC_BIT_SCAN_REG>, TB, OpSize16, - Sched<[WriteShift]>; + IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>; def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "bsf{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, OpSize16, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>; def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "bsf{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))], - IIC_BIT_SCAN_REG>, TB, Sched<[WriteShift]>; + IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>; def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "bsf{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>; def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "bsr{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))], - IIC_BIT_SCAN_REG>, - TB, OpSize, Sched<[WriteShift]>; + IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>; def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "bsr{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, - OpSize, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>; def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "bsr{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))], - IIC_BIT_SCAN_REG>, TB, OpSize16, - Sched<[WriteShift]>; + IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>; def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "bsr{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, OpSize16, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>; def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "bsr{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BIT_SCAN_REG>, TB, - Sched<[WriteShift]>; + [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], + IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>; def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "bsr{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))], - IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>; + IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>; } // Defs = [EFLAGS] let SchedRW = [WriteMicrocoded] in { @@ -1140,9 +1218,9 @@ let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in { def MOVSB : I<0xA4, RawFrmDstSrc, (outs dstidx8:$dst), (ins srcidx8:$src), "movsb\t{$src, $dst|$dst, $src}", [], IIC_MOVS>; def MOVSW : I<0xA5, RawFrmDstSrc, (outs dstidx16:$dst), (ins srcidx16:$src), - "movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize; + "movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16; def MOVSL : I<0xA5, RawFrmDstSrc, (outs dstidx32:$dst), (ins srcidx32:$src), - "movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16; + "movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize32; def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs dstidx64:$dst), (ins srcidx64:$src), "movsq\t{$src, $dst|$dst, $src}", [], IIC_MOVS>; } @@ -1153,44 +1231,52 @@ def STOSB : I<0xAA, RawFrmDst, (outs dstidx8:$dst), (ins), "stosb\t{%al, $dst|$dst, al}", [], IIC_STOS>; let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in def STOSW : I<0xAB, RawFrmDst, (outs dstidx16:$dst), (ins), - "stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize; + "stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize16; let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in def STOSL : I<0xAB, RawFrmDst, (outs dstidx32:$dst), (ins), - "stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize16; + "stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize32; let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in def STOSQ : RI<0xAB, RawFrmDst, (outs dstidx64:$dst), (ins), "stosq\t{%rax, $dst|$dst, rax}", [], IIC_STOS>; -def SCAS8 : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst), +// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI +let Defs = [EDI,EFLAGS], Uses = [AL,EDI,EFLAGS] in +def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst), "scasb\t{$dst, %al|al, $dst}", [], IIC_SCAS>; -def SCAS16 : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst), - "scasw\t{$dst, %ax|ax, $dst}", [], IIC_SCAS>, OpSize; -def SCAS32 : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst), - "scas{l|d}\t{$dst, %eax|eax, $dst}", [], IIC_SCAS>, OpSize16; -def SCAS64 : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst), - "scasq\t{$dst, %rax|rax, $dst}", [], IIC_SCAS>; - -def CMPS8 : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src), +let Defs = [EDI,EFLAGS], Uses = [AX,EDI,EFLAGS] in +def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst), + "scasw\t{$dst, %ax|ax, $dst}", [], IIC_SCAS>, OpSize16; +let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,EFLAGS] in +def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst), + "scas{l|d}\t{$dst, %eax|eax, $dst}", [], IIC_SCAS>, OpSize32; +let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,EFLAGS] in +def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst), + "scasq\t{$dst, %rax|rax, $dst}", [], IIC_SCAS>; + +// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI +let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,EFLAGS] in { +def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src), "cmpsb\t{$dst, $src|$src, $dst}", [], IIC_CMPS>; -def CMPS16 : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src), - "cmpsw\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize; -def CMPS32 : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src), - "cmps{l|d}\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize16; -def CMPS64 : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src), - "cmpsq\t{$dst, $src|$src, $dst}", [], IIC_CMPS>; +def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src), + "cmpsw\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize16; +def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src), + "cmps{l|d}\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize32; +def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src), + "cmpsq\t{$dst, $src|$src, $dst}", [], IIC_CMPS>; +} } // SchedRW //===----------------------------------------------------------------------===// // Move Instructions. // let SchedRW = [WriteMove] in { -let neverHasSideEffects = 1 in { +let hasSideEffects = 0 in { def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src), "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), - "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize; + "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16; def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), - "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16; + "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32; def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; } @@ -1201,29 +1287,41 @@ def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src), [(set GR8:$dst, imm:$src)], IIC_MOV>; def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src), "mov{w}\t{$src, $dst|$dst, $src}", - [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize; + [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize16; def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, imm:$src)], IIC_MOV>, OpSize16; -def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src), - "movabs{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, imm:$src)], IIC_MOV>; + [(set GR32:$dst, imm:$src)], IIC_MOV>, OpSize32; def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>; } +let isReMaterializable = 1 in { +def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src), + "movabs{q}\t{$src, $dst|$dst, $src}", + [(set GR64:$dst, imm:$src)], IIC_MOV>; +} + +// Longer forms that use a ModR/M byte. Needed for disassembler +let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in { +def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src), + "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; +def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src), + "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16; +def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src), + "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32; +} } // SchedRW let SchedRW = [WriteStore] in { def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src), "mov{b}\t{$src, $dst|$dst, $src}", - [(store (i8 imm:$src), addr:$dst)], IIC_MOV_MEM>; + [(store (i8 imm8_su:$src), addr:$dst)], IIC_MOV_MEM>; def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src), "mov{w}\t{$src, $dst|$dst, $src}", - [(store (i16 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize; + [(store (i16 imm16_su:$src), addr:$dst)], IIC_MOV_MEM>, OpSize16; def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", - [(store (i32 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize16; + [(store (i32 imm32_su:$src), addr:$dst)], IIC_MOV_MEM>, OpSize32; def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>; @@ -1231,83 +1329,102 @@ def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), let hasSideEffects = 0 in { -/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a -/// 32-bit offset from the segment base. These are only valid in x86-32 mode. +/// Memory offset versions of moves. The immediate is an address mode sized +/// offset from the segment base. let SchedRW = [WriteALU] in { let mayLoad = 1 in { -def MOV8o8a : Ii32 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src), - "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, - Requires<[In32BitMode]>; -def MOV16o16a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src), - "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, OpSize, - Requires<[In32BitMode]>; -def MOV32o32a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src), - "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>, - OpSize16, Requires<[In32BitMode]>; - -def MOV8o8a_16 : Ii16 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src), - "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, - AdSize, Requires<[In16BitMode]>; -def MOV16o16a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src), - "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, OpSize, - AdSize, Requires<[In16BitMode]>; -def MOV32o32a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src), - "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>, - AdSize, OpSize16, Requires<[In16BitMode]>; +let Defs = [AL] in +def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src), + "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, + AdSize32; +let Defs = [AX] in +def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src), + "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, + OpSize16, AdSize32; +let Defs = [EAX] in +def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src), + "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>, + OpSize32, AdSize32; +let Defs = [RAX] in +def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src), + "mov{q}\t{$src, %rax|rax, $src}", [], IIC_MOV_MEM>, + AdSize32; + +let Defs = [AL] in +def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src), + "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, AdSize16; +let Defs = [AX] in +def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src), + "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, + OpSize16, AdSize16; +let Defs = [EAX] in +def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src), + "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>, + AdSize16, OpSize32; } let mayStore = 1 in { -def MOV8ao8 : Ii32 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins), - "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, - Requires<[In32BitMode]>; -def MOV16ao16 : Ii32 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins), - "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, OpSize, - Requires<[In32BitMode]>; -def MOV32ao32 : Ii32 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins), - "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>, - OpSize16, Requires<[In32BitMode]>; - -def MOV8ao8_16 : Ii16 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins), - "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, - AdSize, Requires<[In16BitMode]>; -def MOV16ao16_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins), - "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, OpSize, - AdSize, Requires<[In16BitMode]>; -def MOV32ao32_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins), - "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>, - OpSize16, AdSize, Requires<[In16BitMode]>; +let Uses = [AL] in +def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs offset32_8:$dst), (ins), + "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize32; +let Uses = [AX] in +def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_16:$dst), (ins), + "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, + OpSize16, AdSize32; +let Uses = [EAX] in +def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_32:$dst), (ins), + "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>, + OpSize32, AdSize32; +let Uses = [RAX] in +def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs offset32_64:$dst), (ins), + "mov{q}\t{%rax, $dst|$dst, rax}", [], IIC_MOV_MEM>, + AdSize32; + +let Uses = [AL] in +def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs offset16_8:$dst), (ins), + "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize16; +let Uses = [AX] in +def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_16:$dst), (ins), + "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, + OpSize16, AdSize16; +let Uses = [EAX] in +def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_32:$dst), (ins), + "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>, + OpSize32, AdSize16; } } // These forms all have full 64-bit absolute addresses in their instructions // and use the movabs mnemonic to indicate this specific form. let mayLoad = 1 in { -def MOV64o8a : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src), - "movabs{b}\t{$src, %al|al, $src}", []>, - Requires<[In64BitMode]>; -def MOV64o16a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src), - "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize, - Requires<[In64BitMode]>; -def MOV64o32a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src), - "movabs{l}\t{$src, %eax|eax, $src}", []>, - Requires<[In64BitMode]>; -def MOV64o64a : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64:$src), - "movabs{q}\t{$src, %rax|rax, $src}", []>, - Requires<[In64BitMode]>; +let Defs = [AL] in +def MOV8ao64 : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src), + "movabs{b}\t{$src, %al|al, $src}", []>, AdSize64; +let Defs = [AX] in +def MOV16ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src), + "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16, AdSize64; +let Defs = [EAX] in +def MOV32ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src), + "movabs{l}\t{$src, %eax|eax, $src}", []>, OpSize32, + AdSize64; +let Defs = [RAX] in +def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src), + "movabs{q}\t{$src, %rax|rax, $src}", []>, AdSize64; } let mayStore = 1 in { -def MOV64ao8 : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins), - "movabs{b}\t{%al, $dst|$dst, al}", []>, - Requires<[In64BitMode]>; -def MOV64ao16 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins), - "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize, - Requires<[In64BitMode]>; -def MOV64ao32 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins), - "movabs{l}\t{%eax, $dst|$dst, eax}", []>, - Requires<[In64BitMode]>; -def MOV64ao64 : RIi64<0xA3, RawFrmMemOffs, (outs offset64:$dst), (ins), - "movabs{q}\t{%rax, $dst|$dst, rax}", []>, - Requires<[In64BitMode]>; +let Uses = [AL] in +def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset64_8:$dst), (ins), + "movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64; +let Uses = [AX] in +def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_16:$dst), (ins), + "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64; +let Uses = [EAX] in +def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_32:$dst), (ins), + "movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32, + AdSize64; +let Uses = [RAX] in +def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs offset64_64:$dst), (ins), + "movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64; } } // hasSideEffects = 0 @@ -1316,9 +1433,9 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src), "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), - "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize; + "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16; def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), - "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16; + "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32; def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>; } @@ -1329,10 +1446,10 @@ def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), [(set GR8:$dst, (loadi8 addr:$src))], IIC_MOV_MEM>; def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "mov{w}\t{$src, $dst|$dst, $src}", - [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize; + [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize16; def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "mov{l}\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>, OpSize16; + [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>, OpSize32; def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (load addr:$src))], IIC_MOV_MEM>; @@ -1344,10 +1461,10 @@ def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src), [(store GR8:$src, addr:$dst)], IIC_MOV_MEM>; def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "mov{w}\t{$src, $dst|$dst, $src}", - [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize; + [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize16; def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), "mov{l}\t{$src, $dst|$dst, $src}", - [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>, OpSize16; + [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>, OpSize32; def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store GR64:$src, addr:$dst)], IIC_MOV_MEM>; @@ -1357,17 +1474,17 @@ def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), // that they can be used for copying and storing h registers, which can't be // encoded when a REX prefix is present. let isCodeGenOnly = 1 in { -let neverHasSideEffects = 1 in +let hasSideEffects = 0 in def MOV8rr_NOREX : I<0x88, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>, Sched<[WriteMove]>; -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, hasSideEffects = 0 in def MOV8mr_NOREX : I<0x88, MRMDestMem, (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV_MEM>, Sched<[WriteStore]>; -let mayLoad = 1, neverHasSideEffects = 1, +let mayLoad = 1, hasSideEffects = 0, canFoldAsLoad = 1, isReMaterializable = 1 in def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), @@ -1381,7 +1498,7 @@ let SchedRW = [WriteALU] in { let Defs = [EFLAGS], Uses = [AH] in def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", [(set EFLAGS, (X86sahf AH))], IIC_AHF>; -let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in +let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", [], IIC_AHF>; // AH = flags } // SchedRW @@ -1394,11 +1511,11 @@ let SchedRW = [WriteALU] in { def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))], IIC_BT_RR>, - OpSize, TB; + OpSize16, TB; def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))], IIC_BT_RR>, - OpSize16, TB; + OpSize32, TB; def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))], IIC_BT_RR>, TB; @@ -1415,13 +1532,13 @@ let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteALULd] in { // [(X86bt (loadi16 addr:$src1), GR16:$src2), // (implicit EFLAGS)] [], IIC_BT_MR - >, OpSize, TB, Requires<[FastBTMem]>; + >, OpSize16, TB, Requires<[FastBTMem]>; def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi32 addr:$src1), GR32:$src2), // (implicit EFLAGS)] [], IIC_BT_MR - >, OpSize16, TB, Requires<[FastBTMem]>; + >, OpSize32, TB, Requires<[FastBTMem]>; def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi64 addr:$src1), GR64:$src2), @@ -1434,11 +1551,11 @@ let SchedRW = [WriteALU] in { def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))], - IIC_BT_RI>, OpSize, TB; + IIC_BT_RI>, OpSize16, TB; def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))], - IIC_BT_RI>, OpSize16, TB; + IIC_BT_RI>, OpSize32, TB; def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))], @@ -1452,11 +1569,11 @@ let SchedRW = [WriteALU] in { def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2)) - ], IIC_BT_MI>, OpSize, TB; + ], IIC_BT_MI>, OpSize16, TB; def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2)) - ], IIC_BT_MI>, OpSize16, TB; + ], IIC_BT_MI>, OpSize32, TB; def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi64 addr:$src1), @@ -1467,10 +1584,10 @@ let hasSideEffects = 0 in { let SchedRW = [WriteALU] in { def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize, TB; + OpSize16, TB; def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize16, TB; + OpSize32, TB; def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; } // SchedRW @@ -1478,10 +1595,10 @@ def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize, TB; + OpSize16, TB; def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize16, TB; + OpSize32, TB; def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } @@ -1489,10 +1606,10 @@ def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), let SchedRW = [WriteALU] in { def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize, TB; + OpSize16, TB; def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize16, TB; + OpSize32, TB; def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; } // SchedRW @@ -1500,10 +1617,10 @@ def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize, TB; + OpSize16, TB; def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize16, TB; + OpSize32, TB; def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB; } @@ -1511,10 +1628,10 @@ def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), let SchedRW = [WriteALU] in { def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize, TB; + OpSize16, TB; def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize16, TB; + OpSize32, TB; def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; } // SchedRW @@ -1522,10 +1639,10 @@ def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize, TB; + OpSize16, TB; def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize16, TB; + OpSize32, TB; def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } @@ -1533,10 +1650,10 @@ def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), let SchedRW = [WriteALU] in { def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize, TB; + OpSize16, TB; def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize16, TB; + OpSize32, TB; def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; } // SchedRW @@ -1544,10 +1661,10 @@ def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2), let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize, TB; + OpSize16, TB; def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize16, TB; + OpSize32, TB; def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB; } @@ -1555,21 +1672,21 @@ def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2), let SchedRW = [WriteALU] in { def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize, TB; + OpSize16, TB; def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, - OpSize16, TB; + OpSize32, TB; def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), - "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; + "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB; } // SchedRW let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), - "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize, TB; + "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, + OpSize16, TB; def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), - "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, - OpSize16, TB; + "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, + OpSize32, TB; def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB; } @@ -1577,10 +1694,10 @@ def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), let SchedRW = [WriteALU] in { def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize, TB; + OpSize16, TB; def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2), "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, - OpSize16, TB; + OpSize32, TB; def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB; } // SchedRW @@ -1588,10 +1705,10 @@ def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2), let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize, TB; + OpSize16, TB; def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, - OpSize16, TB; + OpSize32, TB; def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB; } @@ -1621,14 +1738,14 @@ multiclass ATOMIC_SWAP opc8, bits<8> opc, string mnemonic, string frag, [(set GR16:$dst, (!cast(frag # "_16") addr:$ptr, GR16:$val))], - itin>, OpSize; + itin>, OpSize16; def NAME#32rm : I(frag # "_32") addr:$ptr, GR32:$val))], - itin>, OpSize16; + itin>, OpSize32; def NAME#64rm : RI; def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src), - "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>, OpSize; + "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>, + OpSize16; def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src), "xchg{l}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>, - OpSize16; + OpSize32; def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), "xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>; } // Swap between EAX and other registers. +let Uses = [AX], Defs = [AX] in def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), - "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize; + "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize16; +let Uses = [EAX], Defs = [EAX] in def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>, - OpSize16, Requires<[Not64BitMode]>; + OpSize32, Requires<[Not64BitMode]>; +let Uses = [EAX], Defs = [EAX] in // Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding. // xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP. def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src), "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>, - Requires<[In64BitMode]>; + OpSize32, Requires<[In64BitMode]>; +let Uses = [RAX], Defs = [RAX] in def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), "xchg{q}\t{$src, %rax|rax, $src}", [], IIC_XCHG_REG>; } // SchedRW @@ -1675,10 +1797,10 @@ def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB; def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB, - OpSize; + OpSize16; def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB, - OpSize16; + OpSize32; def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB; } // SchedRW @@ -1688,10 +1810,10 @@ def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB; def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB, - OpSize; + OpSize16; def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB, - OpSize16; + OpSize32; def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB; @@ -1703,10 +1825,10 @@ def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), IIC_CMPXCHG_REG8>, TB; def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), "cmpxchg{w}\t{$src, $dst|$dst, $src}", [], - IIC_CMPXCHG_REG>, TB, OpSize; + IIC_CMPXCHG_REG>, TB, OpSize16; def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), "cmpxchg{l}\t{$src, $dst|$dst, $src}", [], - IIC_CMPXCHG_REG>, TB, OpSize16; + IIC_CMPXCHG_REG>, TB, OpSize32; def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "cmpxchg{q}\t{$src, $dst|$dst, $src}", [], IIC_CMPXCHG_REG>, TB; @@ -1719,10 +1841,10 @@ def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), IIC_CMPXCHG_MEM8>, TB; def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "cmpxchg{w}\t{$src, $dst|$dst, $src}", [], - IIC_CMPXCHG_MEM>, TB, OpSize; + IIC_CMPXCHG_MEM>, TB, OpSize16; def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), "cmpxchg{l}\t{$src, $dst|$dst, $src}", [], - IIC_CMPXCHG_MEM>, TB, OpSize16; + IIC_CMPXCHG_MEM>, TB, OpSize32; def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "cmpxchg{q}\t{$src, $dst|$dst, $src}", [], IIC_CMPXCHG_MEM>, TB; @@ -1761,23 +1883,41 @@ def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>; // String manipulation instructions let SchedRW = [WriteMicrocoded] in { +// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI +let Defs = [AL,ESI], Uses = [ESI,EFLAGS] in def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src), "lodsb\t{$src, %al|al, $src}", [], IIC_LODS>; +let Defs = [AX,ESI], Uses = [ESI,EFLAGS] in def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src), - "lodsw\t{$src, %ax|ax, $src}", [], IIC_LODS>, OpSize; + "lodsw\t{$src, %ax|ax, $src}", [], IIC_LODS>, OpSize16; +let Defs = [EAX,ESI], Uses = [ESI,EFLAGS] in def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src), - "lods{l|d}\t{$src, %eax|eax, $src}", [], IIC_LODS>, OpSize16; + "lods{l|d}\t{$src, %eax|eax, $src}", [], IIC_LODS>, OpSize32; +let Defs = [RAX,ESI], Uses = [ESI,EFLAGS] in def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src), "lodsq\t{$src, %rax|rax, $src}", [], IIC_LODS>; } let SchedRW = [WriteSystem] in { +// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI +let Defs = [ESI], Uses = [DX,ESI,EFLAGS] in { def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src), "outsb\t{$src, %dx|dx, $src}", [], IIC_OUTS>; def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src), - "outsw\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize; + "outsw\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize16; def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src), - "outs{l|d}\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize16; + "outs{l|d}\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize32; +} + +// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI +let Defs = [EDI], Uses = [DX,EDI,EFLAGS] in { +def INSB : I<0x6C, RawFrmDst, (outs dstidx8:$dst), (ins), + "insb\t{%dx, $dst|$dst, dx}", [], IIC_INS>; +def INSW : I<0x6D, RawFrmDst, (outs dstidx16:$dst), (ins), + "insw\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize16; +def INSL : I<0x6D, RawFrmDst, (outs dstidx32:$dst), (ins), + "ins{l|d}\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize32; +} } // Flag instructions @@ -1832,10 +1972,10 @@ def DAS : I<0x2F, RawFrm, (outs), (ins), "das", [], IIC_DAS>, let SchedRW = [WriteSystem] in { // Check Array Index Against Bounds def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), - "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize, + "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize16, Requires<[Not64BitMode]>; def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), - "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize16, + "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize32, Requires<[Not64BitMode]>; // Adjust RPL Field of Segment Selector @@ -1855,29 +1995,29 @@ let Predicates = [HasMOVBE] in { def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "movbe{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>, - OpSize, T8; + OpSize16, T8PS; def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "movbe{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bswap (loadi32 addr:$src)))], IIC_MOVBE>, - OpSize16, T8; + OpSize32, T8PS; def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "movbe{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>, - T8; + T8PS; } let SchedRW = [WriteStore] in { def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), "movbe{w}\t{$src, $dst|$dst, $src}", [(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>, - OpSize, T8; + OpSize16, T8PS; def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), "movbe{l}\t{$src, $dst|$dst, $src}", [(store (bswap GR32:$src), addr:$dst)], IIC_MOVBE>, - OpSize16, T8; + OpSize32, T8PS; def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "movbe{q}\t{$src, $dst|$dst, $src}", [(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>, - T8; + T8PS; } } @@ -1887,10 +2027,10 @@ let Predicates = [HasMOVBE] in { let Predicates = [HasRDRAND], Defs = [EFLAGS] in { def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins), "rdrand{w}\t$dst", - [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize, TB; + [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize16, TB; def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins), "rdrand{l}\t$dst", - [(set GR32:$dst, EFLAGS, (X86rdrand))]>, OpSize16, TB; + [(set GR32:$dst, EFLAGS, (X86rdrand))]>, OpSize32, TB; def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins), "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>, TB; @@ -1902,10 +2042,10 @@ let Predicates = [HasRDRAND], Defs = [EFLAGS] in { let Predicates = [HasRDSEED], Defs = [EFLAGS] in { def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst", - [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize, TB; + [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB; def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst", - [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB; + [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB; def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB; @@ -1918,20 +2058,20 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in { def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "lzcnt{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>, XS, - OpSize; + OpSize16; def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "lzcnt{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (ctlz (loadi16 addr:$src))), - (implicit EFLAGS)]>, XS, OpSize; + (implicit EFLAGS)]>, XS, OpSize16; def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "lzcnt{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>, XS, - OpSize16; + OpSize32; def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "lzcnt{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (ctlz (loadi32 addr:$src))), - (implicit EFLAGS)]>, XS, OpSize16; + (implicit EFLAGS)]>, XS, OpSize32; def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "lzcnt{q}\t{$src, $dst|$dst, $src}", @@ -1943,6 +2083,46 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in { (implicit EFLAGS)]>, XS; } +let Predicates = [HasLZCNT] in { + def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E_OR_NE), + (X86cmp GR16:$src, (i16 0))), + (LZCNT16rr GR16:$src)>; + def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E_OR_NE), + (X86cmp GR32:$src, (i32 0))), + (LZCNT32rr GR32:$src)>; + def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E_OR_NE), + (X86cmp GR64:$src, (i64 0))), + (LZCNT64rr GR64:$src)>; + def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E_OR_NE), + (X86cmp GR16:$src, (i16 0))), + (LZCNT16rr GR16:$src)>; + def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E_OR_NE), + (X86cmp GR32:$src, (i32 0))), + (LZCNT32rr GR32:$src)>; + def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E_OR_NE), + (X86cmp GR64:$src, (i64 0))), + (LZCNT64rr GR64:$src)>; + + def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE), + (X86cmp (loadi16 addr:$src), (i16 0))), + (LZCNT16rm addr:$src)>; + def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE), + (X86cmp (loadi32 addr:$src), (i32 0))), + (LZCNT32rm addr:$src)>; + def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE), + (X86cmp (loadi64 addr:$src), (i64 0))), + (LZCNT64rm addr:$src)>; + def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi16 addr:$src), (i16 0))), + (LZCNT16rm addr:$src)>; + def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi32 addr:$src), (i32 0))), + (LZCNT32rm addr:$src)>; + def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi64 addr:$src), (i64 0))), + (LZCNT64rm addr:$src)>; +} + //===----------------------------------------------------------------------===// // BMI Instructions // @@ -1950,20 +2130,20 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in { def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "tzcnt{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>, XS, - OpSize; + OpSize16; def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), "tzcnt{w}\t{$src, $dst|$dst, $src}", [(set GR16:$dst, (cttz (loadi16 addr:$src))), - (implicit EFLAGS)]>, XS, OpSize; + (implicit EFLAGS)]>, XS, OpSize16; def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "tzcnt{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>, XS, - OpSize16; + OpSize32; def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), "tzcnt{l}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (cttz (loadi32 addr:$src))), - (implicit EFLAGS)]>, XS, OpSize16; + (implicit EFLAGS)]>, XS, OpSize32; def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), "tzcnt{q}\t{$src, $dst|$dst, $src}", @@ -1976,43 +2156,101 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in { } multiclass bmi_bls { + RegisterClass RC, X86MemOperand x86memop> { +let hasSideEffects = 0 in { def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src), !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), - [(set RC:$dst, (OpNode RC:$src)), (implicit EFLAGS)]>, T8, VEX_4V; + []>, T8PS, VEX_4V; + let mayLoad = 1 in def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src), !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), - [(set RC:$dst, (OpNode (ld_frag addr:$src))), (implicit EFLAGS)]>, - T8, VEX_4V; + []>, T8PS, VEX_4V; +} } let Predicates = [HasBMI], Defs = [EFLAGS] in { - defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem, - X86blsr, loadi32>; - defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem, - X86blsr, loadi64>, VEX_W; - defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem, - X86blsmsk, loadi32>; - defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem, - X86blsmsk, loadi64>, VEX_W; - defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem, - X86blsi, loadi32>; - defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem, - X86blsi, loadi64>, VEX_W; + defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>; + defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W; + defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>; + defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W; + defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>; + defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W; } +//===----------------------------------------------------------------------===// +// Pattern fragments to auto generate BMI instructions. +//===----------------------------------------------------------------------===// + +let Predicates = [HasBMI] in { + // FIXME: patterns for the load versions are not implemented + def : Pat<(and GR32:$src, (add GR32:$src, -1)), + (BLSR32rr GR32:$src)>; + def : Pat<(and GR64:$src, (add GR64:$src, -1)), + (BLSR64rr GR64:$src)>; + + def : Pat<(xor GR32:$src, (add GR32:$src, -1)), + (BLSMSK32rr GR32:$src)>; + def : Pat<(xor GR64:$src, (add GR64:$src, -1)), + (BLSMSK64rr GR64:$src)>; + + def : Pat<(and GR32:$src, (ineg GR32:$src)), + (BLSI32rr GR32:$src)>; + def : Pat<(and GR64:$src, (ineg GR64:$src)), + (BLSI64rr GR64:$src)>; +} + +let Predicates = [HasBMI] in { + def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E_OR_NE), + (X86cmp GR16:$src, (i16 0))), + (TZCNT16rr GR16:$src)>; + def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E_OR_NE), + (X86cmp GR32:$src, (i32 0))), + (TZCNT32rr GR32:$src)>; + def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E_OR_NE), + (X86cmp GR64:$src, (i64 0))), + (TZCNT64rr GR64:$src)>; + def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E_OR_NE), + (X86cmp GR16:$src, (i16 0))), + (TZCNT16rr GR16:$src)>; + def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E_OR_NE), + (X86cmp GR32:$src, (i32 0))), + (TZCNT32rr GR32:$src)>; + def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E_OR_NE), + (X86cmp GR64:$src, (i64 0))), + (TZCNT64rr GR64:$src)>; + + def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE), + (X86cmp (loadi16 addr:$src), (i16 0))), + (TZCNT16rm addr:$src)>; + def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE), + (X86cmp (loadi32 addr:$src), (i32 0))), + (TZCNT32rm addr:$src)>; + def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE), + (X86cmp (loadi64 addr:$src), (i64 0))), + (TZCNT64rm addr:$src)>; + def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi16 addr:$src), (i16 0))), + (TZCNT16rm addr:$src)>; + def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi32 addr:$src), (i32 0))), + (TZCNT32rm addr:$src)>; + def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E_OR_NE), + (X86cmp (loadi64 addr:$src), (i64 0))), + (TZCNT64rm addr:$src)>; +} + + multiclass bmi_bextr_bzhi opc, string mnemonic, RegisterClass RC, X86MemOperand x86memop, Intrinsic Int, PatFrag ld_frag> { def rr : I, - T8, VEX_4VOp3; + T8PS, VEX_4VOp3; def rm : I, T8, VEX_4VOp3; + (implicit EFLAGS)]>, T8PS, VEX_4VOp3; } let Predicates = [HasBMI], Defs = [EFLAGS] in { @@ -2029,18 +2267,38 @@ let Predicates = [HasBMI2], Defs = [EFLAGS] in { int_x86_bmi_bzhi_64, loadi64>, VEX_W; } -def : Pat<(X86bzhi GR32:$src1, GR8:$src2), - (BZHI32rr GR32:$src1, - (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; -def : Pat<(X86bzhi (loadi32 addr:$src1), GR8:$src2), - (BZHI32rm addr:$src1, - (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; -def : Pat<(X86bzhi GR64:$src1, GR8:$src2), - (BZHI64rr GR64:$src1, - (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; -def : Pat<(X86bzhi (loadi64 addr:$src1), GR8:$src2), - (BZHI64rm addr:$src1, - (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + +def CountTrailingOnes : SDNodeXFormgetZExtValue()), SDLoc(N)); +}]>; + +def BZHIMask : ImmLeaf(Imm) > 32); +}]>; + +let Predicates = [HasBMI2] in { + def : Pat<(and GR64:$src, BZHIMask:$mask), + (BZHI64rr GR64:$src, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>; + + def : Pat<(and GR32:$src, (add (shl 1, GR8:$lz), -1)), + (BZHI32rr GR32:$src, + (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>; + + def : Pat<(and (loadi32 addr:$src), (add (shl 1, GR8:$lz), -1)), + (BZHI32rm addr:$src, + (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>; + + def : Pat<(and GR64:$src, (add (shl 1, GR8:$lz), -1)), + (BZHI64rr GR64:$src, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>; + + def : Pat<(and (loadi64 addr:$src), (add (shl 1, GR8:$lz), -1)), + (BZHI64rm addr:$src, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>; +} // HasBMI2 let Predicates = [HasBMI] in { def : Pat<(X86bextr GR32:$src1, GR32:$src2), @@ -2205,6 +2463,16 @@ let Predicates = [HasTBM] in { (TZMSK64rr GR64:$src)>; } // HasTBM +//===----------------------------------------------------------------------===// +// Memory Instructions +// + +def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src), + "clflushopt\t$src", []>, PD; +def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src", []>, PD; +def PCOMMIT : I<0xAE, MRM_F8, (outs), (ins), "pcommit", []>, PD; + + //===----------------------------------------------------------------------===// // Subsystems. //===----------------------------------------------------------------------===// @@ -2233,10 +2501,14 @@ include "X86InstrAVX512.td" include "X86InstrMMX.td" include "X86Instr3DNow.td" +// MPX instructions +include "X86InstrMPX.td" + include "X86InstrVMX.td" include "X86InstrSVM.td" include "X86InstrTSX.td" +include "X86InstrSGX.td" // System instructions. include "X86InstrSystem.td" @@ -2300,14 +2572,15 @@ def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>; def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>; def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>; -def : MnemonicAlias<"repe", "rep", "att">; -def : MnemonicAlias<"repz", "rep", "att">; -def : MnemonicAlias<"repnz", "repne", "att">; +def : MnemonicAlias<"repe", "rep">; +def : MnemonicAlias<"repz", "rep">; +def : MnemonicAlias<"repnz", "repne">; def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>; def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>; def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>; +def : MnemonicAlias<"sal", "shl", "intel">; def : MnemonicAlias<"salb", "shlb", "att">; def : MnemonicAlias<"salw", "shlw", "att">; def : MnemonicAlias<"sall", "shll", "att">; @@ -2355,7 +2628,13 @@ def : MnemonicAlias<"fldcww", "fldcw", "att">; def : MnemonicAlias<"fnstcww", "fnstcw", "att">; def : MnemonicAlias<"fnstsww", "fnstsw", "att">; def : MnemonicAlias<"fucomip", "fucompi", "att">; -def : MnemonicAlias<"fwait", "wait", "att">; +def : MnemonicAlias<"fwait", "wait">; + +def : MnemonicAlias<"fxsaveq", "fxsave64", "att">; +def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">; +def : MnemonicAlias<"xsaveq", "xsave64", "att">; +def : MnemonicAlias<"xrstorq", "xrstor64", "att">; +def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">; class CondCodeAlias, Requir // scas aliases. Accept the destination being omitted because it's implicit // in the mnemonic, or the mnemonic suffix being omitted because it's implicit // in the destination. -def : InstAlias<"scasb $dst", (SCAS8 dstidx8:$dst), 0>; -def : InstAlias<"scasw $dst", (SCAS16 dstidx16:$dst), 0>; -def : InstAlias<"scas{l|d} $dst", (SCAS32 dstidx32:$dst), 0>; -def : InstAlias<"scasq $dst", (SCAS64 dstidx64:$dst), 0>, Requires<[In64BitMode]>; -def : InstAlias<"scas {$dst, %al|al, $dst}", (SCAS8 dstidx8:$dst), 0>; -def : InstAlias<"scas {$dst, %ax|ax, $dst}", (SCAS16 dstidx16:$dst), 0>; -def : InstAlias<"scas {$dst, %eax|eax, $dst}", (SCAS32 dstidx32:$dst), 0>; -def : InstAlias<"scas {$dst, %rax|rax, $dst}", (SCAS64 dstidx64:$dst), 0>, Requires<[In64BitMode]>; +def : InstAlias<"scasb $dst", (SCASB dstidx8:$dst), 0>; +def : InstAlias<"scasw $dst", (SCASW dstidx16:$dst), 0>; +def : InstAlias<"scas{l|d} $dst", (SCASL dstidx32:$dst), 0>; +def : InstAlias<"scasq $dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>; +def : InstAlias<"scas {$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>; +def : InstAlias<"scas {$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>; +def : InstAlias<"scas {$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>; +def : InstAlias<"scas {$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>; // div and idiv aliases for explicit A register. def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>; @@ -2542,38 +2821,38 @@ def : InstAlias<"fnstsw" , (FNSTSW16r)>; // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but // this is compatible with what GAS does. -def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>; -def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>; -def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst)>, Requires<[Not16BitMode]>; -def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst)>, Requires<[Not16BitMode]>; -def : InstAlias<"lcall $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>; -def : InstAlias<"ljmp $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>; -def : InstAlias<"lcall *$dst", (FARCALL16m opaque32mem:$dst)>, Requires<[In16BitMode]>; -def : InstAlias<"ljmp *$dst", (FARJMP16m opaque32mem:$dst)>, Requires<[In16BitMode]>; - -def : InstAlias<"call *$dst", (CALL64m i16mem:$dst)>, Requires<[In64BitMode]>; -def : InstAlias<"jmp *$dst", (JMP64m i16mem:$dst)>, Requires<[In64BitMode]>; -def : InstAlias<"call *$dst", (CALL32m i16mem:$dst)>, Requires<[In32BitMode]>; -def : InstAlias<"jmp *$dst", (JMP32m i16mem:$dst)>, Requires<[In32BitMode]>; -def : InstAlias<"call *$dst", (CALL16m i16mem:$dst)>, Requires<[In16BitMode]>; -def : InstAlias<"jmp *$dst", (JMP16m i16mem:$dst)>, Requires<[In16BitMode]>; +def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>; +def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>; +def : InstAlias<"lcall {*}$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>; +def : InstAlias<"ljmp {*}$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>; +def : InstAlias<"lcall $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>; +def : InstAlias<"ljmp $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>; +def : InstAlias<"lcall {*}$dst", (FARCALL16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>; +def : InstAlias<"ljmp {*}$dst", (FARJMP16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>; + +def : InstAlias<"call {*}$dst", (CALL64m i64mem:$dst), 0>, Requires<[In64BitMode]>; +def : InstAlias<"jmp {*}$dst", (JMP64m i64mem:$dst), 0>, Requires<[In64BitMode]>; +def : InstAlias<"call {*}$dst", (CALL32m i32mem:$dst), 0>, Requires<[In32BitMode]>; +def : InstAlias<"jmp {*}$dst", (JMP32m i32mem:$dst), 0>, Requires<[In32BitMode]>; +def : InstAlias<"call {*}$dst", (CALL16m i16mem:$dst), 0>, Requires<[In16BitMode]>; +def : InstAlias<"jmp {*}$dst", (JMP16m i16mem:$dst), 0>, Requires<[In16BitMode]>; // "imul , B" is an alias for "imul , B, B". -def : InstAlias<"imulw $imm, $r", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm)>; -def : InstAlias<"imulw $imm, $r", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm)>; -def : InstAlias<"imull $imm, $r", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm)>; -def : InstAlias<"imull $imm, $r", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm)>; -def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>; -def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>; +def : InstAlias<"imul{w} {$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>; +def : InstAlias<"imul{w} {$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>; +def : InstAlias<"imul{l} {$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>; +def : InstAlias<"imul{l} {$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>; +def : InstAlias<"imul{q} {$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>; +def : InstAlias<"imul{q} {$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>; // inb %dx -> inb %al, %dx def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>; def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>; def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>; -def : InstAlias<"inb\t$port", (IN8ri i8imm:$port), 0>; -def : InstAlias<"inw\t$port", (IN16ri i8imm:$port), 0>; -def : InstAlias<"inl\t$port", (IN32ri i8imm:$port), 0>; +def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>; +def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>; +def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>; // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp @@ -2589,48 +2868,48 @@ def : InstAlias<"jmpl $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>; // Force mov without a suffix with a segment and mem to prefer the 'l' form of // the move. All segment/mem forms are equivalent, this has the shortest // encoding. -def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem)>; -def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg)>; +def : InstAlias<"mov {$mem, $seg|$seg, $mem}", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem), 0>; +def : InstAlias<"mov {$seg, $mem|$mem, $seg}", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg), 0>; // Match 'movq , ' as an alias for movabsq. -def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm)>; +def : InstAlias<"movq {$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>; // Match 'movq GR64, MMX' as an alias for movd. -def : InstAlias<"movq $src, $dst", +def : InstAlias<"movq {$src, $dst|$dst, $src}", (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>; -def : InstAlias<"movq $src, $dst", +def : InstAlias<"movq {$src, $dst|$dst, $src}", (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>; // movsx aliases -def : InstAlias<"movsx $src, $dst", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX32rr8 GR32:$dst, GR8:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX32rr16 GR32:$dst, GR16:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX64rr8 GR64:$dst, GR8:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX64rr16 GR64:$dst, GR16:$src), 0>; -def : InstAlias<"movsx $src, $dst", (MOVSX64rr32 GR64:$dst, GR32:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0>; +def : InstAlias<"movsx {$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0>; // movzx aliases -def : InstAlias<"movzx $src, $dst", (MOVZX16rr8 GR16:$dst, GR8:$src), 0>; -def : InstAlias<"movzx $src, $dst", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0>; -def : InstAlias<"movzx $src, $dst", (MOVZX32rr8 GR32:$dst, GR8:$src), 0>; -def : InstAlias<"movzx $src, $dst", (MOVZX32rr16 GR32:$dst, GR16:$src), 0>; -def : InstAlias<"movzx $src, $dst", (MOVZX64rr8_Q GR64:$dst, GR8:$src), 0>; -def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX64rr8_Q GR64:$dst, GR8:$src), 0>; +def : InstAlias<"movzx {$src, $dst|$dst, $src}", (MOVZX64rr16_Q GR64:$dst, GR16:$src), 0>; // Note: No GR32->GR64 movzx form. // outb %dx -> outb %al, %dx def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>; def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>; def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>; -def : InstAlias<"outb\t$port", (OUT8ir i8imm:$port), 0>; -def : InstAlias<"outw\t$port", (OUT16ir i8imm:$port), 0>; -def : InstAlias<"outl\t$port", (OUT32ir i8imm:$port), 0>; +def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>; +def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>; +def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>; // 'sldt ' can be encoded with either sldtw or sldtq with the same // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity // errors, since its encoding is the most compact. -def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>; +def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>; // shld/shrd op,op -> shld op, op, CL def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>; @@ -2676,19 +2955,60 @@ defm : ShiftRotateByOneAlias<"ror", "ROR">; FIXME */ // test: We accept "testX , " and "testX , " as synonyms. -def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", (TEST8rm GR8 :$val, i8mem :$mem)>; -def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", (TEST16rm GR16:$val, i16mem:$mem)>; -def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", (TEST32rm GR32:$val, i32mem:$mem)>; -def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", (TEST64rm GR64:$val, i64mem:$mem)>; +def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", + (TEST8rm GR8 :$val, i8mem :$mem), 0>; +def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", + (TEST16rm GR16:$val, i16mem:$mem), 0>; +def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", + (TEST32rm GR32:$val, i32mem:$mem), 0>; +def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", + (TEST64rm GR64:$val, i64mem:$mem), 0>; // xchg: We accept "xchgX , " and "xchgX , " as synonyms. -def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", (XCHG8rm GR8 :$val, i8mem :$mem)>; -def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", (XCHG16rm GR16:$val, i16mem:$mem)>; -def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", (XCHG32rm GR32:$val, i32mem:$mem)>; -def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", (XCHG64rm GR64:$val, i64mem:$mem)>; +def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", + (XCHG8rm GR8 :$val, i8mem :$mem), 0>; +def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", + (XCHG16rm GR16:$val, i16mem:$mem), 0>; +def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", + (XCHG32rm GR32:$val, i32mem:$mem), 0>; +def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", + (XCHG64rm GR64:$val, i64mem:$mem), 0>; // xchg: We accept "xchgX , %eax" and "xchgX %eax, " as synonyms. -def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src)>; -def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src)>, Requires<[Not64BitMode]>; -def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>; -def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src)>; +def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>; +def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", + (XCHG32ar GR32:$src), 0>, Requires<[Not64BitMode]>; +def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", + (XCHG32ar64 GR32_NOAX:$src), 0>, Requires<[In64BitMode]>; +def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>; + +// These aliases exist to get the parser to prioritize matching 8-bit +// immediate encodings over matching the implicit ax/eax/rax encodings. By +// explicitly mentioning the A register here, these entries will be ordered +// first due to the more explicit immediate type. +def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>; +def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>; + +def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>; +def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>; + +def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>; +def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;