+def NEG8r : I<0xF6, MRM3r>, // R8 = -R8 = 0-R8
+ II<(ops R8:$dst, R8:$src), "neg $dst">;
+def NEG16r : I<0xF7, MRM3r>, OpSize, // R16 = -R16 = 0-R16
+ II<(ops R16:$dst, R16:$src), "neg $dst">;
+def NEG32r : I<0xF7, MRM3r>, // R32 = -R32 = 0-R32
+ II<(ops R32:$dst, R32:$src), "neg $dst">;
+def NEG8m : Im8 <"neg", 0xF6, MRM3m>; // [mem8] = -[mem8] = 0-[mem8]
+def NEG16m : Im16<"neg", 0xF7, MRM3m>, OpSize; // [mem16] = -[mem16] = 0-[mem16]
+def NEG32m : Im32<"neg", 0xF7, MRM3m>; // [mem32] = -[mem32] = 0-[mem32]
+
+def NOT8r : I<0xF6, MRM2r>, // R8 = ~R8 = R8^-1
+ II<(ops R8:$dst, R8:$src), "not $dst">;
+def NOT16r : I<0xF7, MRM2r>, OpSize, // R16 = ~R16 = R16^-1
+ II<(ops R16:$dst, R16:$src), "not $dst">;
+def NOT32r : I<0xF7, MRM2r>, // R32 = ~R32 = R32^-1
+ II<(ops R32:$dst, R32:$src), "not $dst">;
+def NOT8m : Im8 <"not", 0xF6, MRM2m>; // [mem8] = ~[mem8] = [mem8^-1]
+def NOT16m : Im16<"not", 0xF7, MRM2m>, OpSize; // [mem16] = ~[mem16] = [mem16^-1]
+def NOT32m : Im32<"not", 0xF7, MRM2m>; // [mem32] = ~[mem32] = [mem32^-1]
+
+def INC8r : I<0xFE, MRM0r>, // ++R8
+ II<(ops R8:$dst, R8:$src), "inc $dst">;
+def INC16r : I<0xFF, MRM0r>, OpSize, // ++R16
+ II<(ops R16:$dst, R16:$src), "inc $dst">;
+def INC32r : I<0xFF, MRM0r>, // ++R32
+ II<(ops R32:$dst, R32:$src), "inc $dst">;
+def INC8m : Im8 <"inc", 0xFE, MRM0m>; // ++R8
+def INC16m : Im16<"inc", 0xFF, MRM0m>, OpSize; // ++R16
+def INC32m : Im32<"inc", 0xFF, MRM0m>; // ++R32
+
+def DEC8r : I<0xFE, MRM1r>, // --R8
+ II<(ops R8:$dst, R8:$src), "dec $dst">;
+def DEC16r : I<0xFF, MRM1r>, OpSize, // --R16
+ II<(ops R16:$dst, R16:$src), "dec $dst">;
+def DEC32r : I<0xFF, MRM1r>, // --R32
+ II<(ops R32:$dst, R32:$src), "dec $dst">;
+def DEC8m : Im8 <"dec", 0xFE, MRM1m>; // --[mem8]
+def DEC16m : Im16<"dec", 0xFF, MRM1m>, OpSize; // --[mem16]
+def DEC32m : Im32<"dec", 0xFF, MRM1m>; // --[mem32]
+
+// Logical operators...
+def AND8rr : I<0x20, MRMDestReg>,
+ II<(ops R8:$dst, R8:$src1, R8:$src2), "and $dst, $src2">;
+def AND16rr : I<0x21, MRMDestReg>, OpSize,
+ II<(ops R16:$dst, R16:$src1, R16:$src2), "and $dst, $src2">;
+def AND32rr : I<0x21, MRMDestReg>,
+ II<(ops R32:$dst, R32:$src1, R32:$src2), "and $dst, $src2">;
+def AND8mr : Im8 <"and", 0x20, MRMDestMem>; // [mem8] &= R8
+def AND16mr : Im16 <"and", 0x21, MRMDestMem>, OpSize; // [mem16] &= R16
+def AND32mr : Im32 <"and", 0x21, MRMDestMem>; // [mem32] &= R32
+def AND8rm : Im8 <"and", 0x22, MRMSrcMem >; // R8 &= [mem8]
+def AND16rm : Im16 <"and", 0x23, MRMSrcMem >, OpSize; // R16 &= [mem16]
+def AND32rm : Im32 <"and", 0x23, MRMSrcMem >; // R32 &= [mem32]
+
+def AND8ri : Ii8 <0x80, MRM4r, (ops R8:$dst, R8:$src1, i8imm:$src2), "and $dst, $src2">;
+def AND16ri : Ii16 <0x81, MRM4r, (ops R16:$dst, R16:$src1, i16imm:$src2), "and $dst, $src2">, OpSize;
+def AND32ri : Ii32 <"and", 0x81, MRM4r >;
+def AND8mi : Im8i8 <"and", 0x80, MRM4m >; // [mem8] &= imm8
+def AND16mi : Im16i16<"and", 0x81, MRM4m >, OpSize; // [mem16] &= imm16
+def AND32mi : Im32i32<"and", 0x81, MRM4m >; // [mem32] &= imm32
+
+def AND16ri8 : Ii8 <0x83, MRM4r, (ops R16:$dst, R16:$src1, i8imm:$src2), "and $dst, $src2" >, OpSize; // R16 &= imm8
+def AND32ri8 : Ii8 <0x83, MRM4r, (ops R32:$dst, R32:$src1, i8imm:$src2), "and $dst, $src2">; // R32 &= imm8
+def AND16mi8 : Im16i8<"and", 0x83, MRM4m >, OpSize; // [mem16] &= imm8
+def AND32mi8 : Im32i8<"and", 0x83, MRM4m >; // [mem32] &= imm8
+
+
+def OR8rr : I<0x08, MRMDestReg>,
+ II<(ops R8:$dst, R8:$src1, R8:$src2), "or $dst, $src2">;
+def OR16rr : I<0x09, MRMDestReg>, OpSize,
+ II<(ops R16:$dst, R16:$src1, R16:$src2), "or $dst, $src2">;
+def OR32rr : I<0x09, MRMDestReg>,
+ II<(ops R32:$dst, R32:$src1, R32:$src2), "or $dst, $src2">;
+def OR8mr : Im8 <"or" , 0x08, MRMDestMem>; // [mem8] |= R8
+def OR16mr : Im16 <"or" , 0x09, MRMDestMem>, OpSize; // [mem16] |= R16
+def OR32mr : Im32 <"or" , 0x09, MRMDestMem>; // [mem32] |= R32
+def OR8rm : Im8 <"or" , 0x0A, MRMSrcMem >; // R8 |= [mem8]
+def OR16rm : Im16 <"or" , 0x0B, MRMSrcMem >, OpSize; // R16 |= [mem16]
+def OR32rm : Im32 <"or" , 0x0B, MRMSrcMem >; // R32 |= [mem32]
+
+def OR8ri : Ii8 <0x80, MRM1r, (ops R8:$dst, R8:$src1, i8imm:$src2), "or $dst, $src2">;
+def OR16ri : Ii16 <0x81, MRM1r, (ops R16:$dst, R16:$src1, i16imm:$src2), "or $dst, $src2">, OpSize;
+def OR32ri : Ii32 <"or" , 0x81, MRM1r >;
+def OR8mi : Im8i8 <"or" , 0x80, MRM1m >; // [mem8] |= imm8
+def OR16mi : Im16i16<"or" , 0x81, MRM1m >, OpSize; // [mem16] |= imm16
+def OR32mi : Im32i32<"or" , 0x81, MRM1m >; // [mem32] |= imm32
+
+def OR16ri8 : Ii8 <0x83, MRM1r, (ops R8:$dst, R8:$src1, i8imm:$src2), "or $dst, $src2">, OpSize; // R16 |= imm8
+def OR32ri8 : Ii8 <0x83, MRM1r, (ops R32:$dst, R32:$src1, i8imm:$src2), "or $dst, $src2">; // R32 |= imm8
+def OR16mi8 : Im16i8<"or" , 0x83, MRM1m >, OpSize; // [mem16] |= imm8
+def OR32mi8 : Im32i8<"or" , 0x83, MRM1m >; // [mem32] |= imm8
+
+
+def XOR8rr : I<0x30, MRMDestReg>,
+ II<(ops R8:$dst, R8:$src1, R8:$src2), "xor $dst, $src2">;
+def XOR16rr : I<0x31, MRMDestReg>, OpSize,
+ II<(ops R16:$dst, R16:$src1, R16:$src2), "xor $dst, $src2">;
+def XOR32rr : I<0x31, MRMDestReg>,
+ II<(ops R32:$dst, R32:$src1, R32:$src2), "xor $dst, $src2">;
+def XOR8mr : Im8 <"xor", 0x30, MRMDestMem>; // [mem8] ^= R8
+def XOR16mr : Im16 <"xor", 0x31, MRMDestMem>, OpSize; // [mem16] ^= R16
+def XOR32mr : Im32 <"xor", 0x31, MRMDestMem>; // [mem32] ^= R32
+def XOR8rm : Im8 <"xor", 0x32, MRMSrcMem >; // R8 ^= [mem8]
+def XOR16rm : Im16 <"xor", 0x33, MRMSrcMem >, OpSize; // R16 ^= [mem16]
+def XOR32rm : Im32 <"xor", 0x33, MRMSrcMem >; // R32 ^= [mem32]
+
+def XOR8ri : Ii8 <0x80, MRM6r, (ops R8:$dst, R8:$src1, i8imm:$src2), "xor $dst, $src2">;
+def XOR16ri : Ii16 <0x81, MRM6r, (ops R16:$dst, R16:$src1, i16imm:$src2), "xor $dst, $src2">, OpSize;
+def XOR32ri : Ii32 <"xor", 0x81, MRM6r >;
+def XOR8mi : Im8i8 <"xor", 0x80, MRM6m >; // [mem8] ^= R8
+def XOR16mi : Im16i16<"xor", 0x81, MRM6m >, OpSize; // [mem16] ^= R16
+def XOR32mi : Im32i32<"xor", 0x81, MRM6m >; // [mem32] ^= R32
+
+def XOR16ri8 : Ii8 <0x83, MRM6r, (ops R16:$dst, R16:$src1, i8imm:$src2), "xor $dst, $src2">, OpSize; // R16 ^= imm8
+def XOR32ri8 : Ii8 <0x83, MRM6r, (ops R32:$dst, R32:$src1, i8imm:$src2), "xor $dst, $src2">; // R32 ^= imm8
+def XOR16mi8 : Im16i8<"xor", 0x83, MRM6m >, OpSize; // [mem16] ^= imm8
+def XOR32mi8 : Im32i8<"xor", 0x83, MRM6m >; // [mem32] ^= imm8
+
+// Shift instructions
+// FIXME: provide shorter instructions when imm8 == 1
+let Uses = [CL], printImplicitUsesAfter = 1 in {
+ def SHL8rCL : I<0xD2, MRM4r> , // R8 <<= cl
+ II<(ops R8:$dst, R8:$src), "shl $dst, %CL">;
+ def SHL16rCL : I<0xD3, MRM4r>, OpSize, // R16 <<= cl
+ II<(ops R16:$dst, R16:$src), "shl $dst, %CL">;
+ def SHL32rCL : I<0xD3, MRM4r> , // R32 <<= cl
+ II<(ops R32:$dst, R32:$src), "shl $dst, %CL">;
+ def SHL8mCL : Im8 <"shl", 0xD2, MRM4m > ; // [mem8] <<= cl
+ def SHL16mCL : Im16 <"shl", 0xD3, MRM4m >, OpSize; // [mem16] <<= cl
+ def SHL32mCL : Im32 <"shl", 0xD3, MRM4m > ; // [mem32] <<= cl
+}