1 //===- X86InstrInfo.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTIntShiftDOp: SDTypeProfile<1, 3,
21 [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
22 SDTCisInt<0>, SDTCisInt<3>]>;
24 def SDTX86CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
26 def SDTX86Cmov : SDTypeProfile<1, 4,
27 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
28 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
30 def SDTX86BrCond : SDTypeProfile<0, 3,
31 [SDTCisVT<0, OtherVT>,
32 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
34 def SDTX86SetCC : SDTypeProfile<1, 2,
36 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
38 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
40 def SDTX86cas8 : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
42 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i16>]>;
44 def SDT_X86CallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
45 def SDT_X86CallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
48 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
50 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
52 def SDTX86RdTsc : SDTypeProfile<0, 0, []>;
54 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
56 def SDT_X86TLSADDR : SDTypeProfile<1, 1, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
58 def SDT_X86TLSTP : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
60 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
62 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
64 def X86bsf : SDNode<"X86ISD::BSF", SDTIntUnaryOp>;
65 def X86bsr : SDNode<"X86ISD::BSR", SDTIntUnaryOp>;
66 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
67 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
69 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
71 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
72 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
74 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
76 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
77 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
79 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8,
80 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
83 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
84 [SDNPHasChain, SDNPOptInFlag]>;
86 def X86callseq_start :
87 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
88 [SDNPHasChain, SDNPOutFlag]>;
90 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
91 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
93 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
94 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
96 def X86tailcall: SDNode<"X86ISD::TAILCALL", SDT_X86Call,
97 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
99 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
100 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
101 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
102 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
105 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG",SDTX86RdTsc,
106 [SDNPHasChain, SDNPOutFlag, SDNPSideEffect]>;
108 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
109 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
111 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
112 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
113 def X86TLStp : SDNode<"X86ISD::THREAD_POINTER", SDT_X86TLSTP, []>;
115 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
118 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
119 [SDNPHasChain, SDNPOptInFlag]>;
121 //===----------------------------------------------------------------------===//
122 // X86 Operand Definitions.
125 // *mem - Operand definitions for the funky X86 addressing mode operands.
127 class X86MemOperand<string printMethod> : Operand<iPTR> {
128 let PrintMethod = printMethod;
129 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
132 def i8mem : X86MemOperand<"printi8mem">;
133 def i16mem : X86MemOperand<"printi16mem">;
134 def i32mem : X86MemOperand<"printi32mem">;
135 def i64mem : X86MemOperand<"printi64mem">;
136 def i128mem : X86MemOperand<"printi128mem">;
137 def f32mem : X86MemOperand<"printf32mem">;
138 def f64mem : X86MemOperand<"printf64mem">;
139 def f80mem : X86MemOperand<"printf80mem">;
140 def f128mem : X86MemOperand<"printf128mem">;
142 def lea32mem : Operand<i32> {
143 let PrintMethod = "printi32mem";
144 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
147 def SSECC : Operand<i8> {
148 let PrintMethod = "printSSECC";
151 def piclabel: Operand<i32> {
152 let PrintMethod = "printPICLabel";
155 // A couple of more descriptive operand definitions.
156 // 16-bits but only 8 bits are significant.
157 def i16i8imm : Operand<i16>;
158 // 32-bits but only 8 bits are significant.
159 def i32i8imm : Operand<i32>;
161 // Branch targets have OtherVT type.
162 def brtarget : Operand<OtherVT>;
164 //===----------------------------------------------------------------------===//
165 // X86 Complex Pattern Definitions.
168 // Define X86 specific addressing mode.
169 def addr : ComplexPattern<iPTR, 4, "SelectAddr", [], []>;
170 def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
171 [add, mul, shl, or, frameindex], []>;
173 //===----------------------------------------------------------------------===//
174 // X86 Instruction Predicate Definitions.
175 def HasMMX : Predicate<"Subtarget->hasMMX()">;
176 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
177 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
178 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
179 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
180 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
181 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
182 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
183 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
184 def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
185 def In64BitMode : Predicate<"Subtarget->is64Bit()">;
186 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
187 def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
188 def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
189 def OptForSpeed : Predicate<"!OptForSize">;
191 //===----------------------------------------------------------------------===//
192 // X86 Instruction Format Definitions.
195 include "X86InstrFormats.td"
197 //===----------------------------------------------------------------------===//
198 // Pattern fragments...
201 // X86 specific condition code. These correspond to CondCode in
202 // X86InstrInfo.h. They must be kept in synch.
203 def X86_COND_A : PatLeaf<(i8 0)>;
204 def X86_COND_AE : PatLeaf<(i8 1)>;
205 def X86_COND_B : PatLeaf<(i8 2)>;
206 def X86_COND_BE : PatLeaf<(i8 3)>;
207 def X86_COND_E : PatLeaf<(i8 4)>;
208 def X86_COND_G : PatLeaf<(i8 5)>;
209 def X86_COND_GE : PatLeaf<(i8 6)>;
210 def X86_COND_L : PatLeaf<(i8 7)>;
211 def X86_COND_LE : PatLeaf<(i8 8)>;
212 def X86_COND_NE : PatLeaf<(i8 9)>;
213 def X86_COND_NO : PatLeaf<(i8 10)>;
214 def X86_COND_NP : PatLeaf<(i8 11)>;
215 def X86_COND_NS : PatLeaf<(i8 12)>;
216 def X86_COND_O : PatLeaf<(i8 13)>;
217 def X86_COND_P : PatLeaf<(i8 14)>;
218 def X86_COND_S : PatLeaf<(i8 15)>;
220 def i16immSExt8 : PatLeaf<(i16 imm), [{
221 // i16immSExt8 predicate - True if the 16-bit immediate fits in a 8-bit
222 // sign extended field.
223 return (int16_t)N->getZExtValue() == (int8_t)N->getZExtValue();
226 def i32immSExt8 : PatLeaf<(i32 imm), [{
227 // i32immSExt8 predicate - True if the 32-bit immediate fits in a 8-bit
228 // sign extended field.
229 return (int32_t)N->getZExtValue() == (int8_t)N->getZExtValue();
232 // Helper fragments for loads.
233 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
234 // known to be 32-bit aligned or better. Ditto for i8 to i16.
235 def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
236 LoadSDNode *LD = cast<LoadSDNode>(N);
237 if (LD->getAddressingMode() != ISD::UNINDEXED)
239 ISD::LoadExtType ExtType = LD->getExtensionType();
240 if (ExtType == ISD::NON_EXTLOAD)
242 if (ExtType == ISD::EXTLOAD)
243 return LD->getAlignment() >= 2 && !LD->isVolatile();
247 def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
248 LoadSDNode *LD = cast<LoadSDNode>(N);
249 if (LD->getAddressingMode() != ISD::UNINDEXED)
251 ISD::LoadExtType ExtType = LD->getExtensionType();
252 if (ExtType == ISD::EXTLOAD)
253 return LD->getAlignment() >= 2 && !LD->isVolatile();
257 def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
258 LoadSDNode *LD = cast<LoadSDNode>(N);
259 if (LD->getAddressingMode() != ISD::UNINDEXED)
261 ISD::LoadExtType ExtType = LD->getExtensionType();
262 if (ExtType == ISD::NON_EXTLOAD)
264 if (ExtType == ISD::EXTLOAD)
265 return LD->getAlignment() >= 4 && !LD->isVolatile();
269 def nvloadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
270 LoadSDNode *LD = cast<LoadSDNode>(N);
271 if (LD->isVolatile())
273 if (LD->getAddressingMode() != ISD::UNINDEXED)
275 ISD::LoadExtType ExtType = LD->getExtensionType();
276 if (ExtType == ISD::NON_EXTLOAD)
278 if (ExtType == ISD::EXTLOAD)
279 return LD->getAlignment() >= 4;
283 def loadi8 : PatFrag<(ops node:$ptr), (i8 (load node:$ptr))>;
284 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
286 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
287 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
288 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
290 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
291 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
292 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
294 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
295 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
296 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
297 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
298 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
299 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
301 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
302 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
303 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
304 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
305 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
306 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
309 // An 'and' node with a single use.
310 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
311 return N->hasOneUse();
314 //===----------------------------------------------------------------------===//
315 // Instruction list...
318 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
319 // a stack adjustment and the codegen must know that they may modify the stack
320 // pointer before prolog-epilog rewriting occurs.
321 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
322 // sub / add which can clobber EFLAGS.
323 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
324 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
326 [(X86callseq_start imm:$amt)]>,
327 Requires<[In32BitMode]>;
328 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
330 [(X86callseq_end imm:$amt1, imm:$amt2)]>,
331 Requires<[In32BitMode]>;
335 let neverHasSideEffects = 1 in
336 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
339 let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
340 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins piclabel:$label),
341 "call\t$label\n\tpop{l}\t$reg", []>;
343 //===----------------------------------------------------------------------===//
344 // Control Flow Instructions...
347 // Return instructions.
348 let isTerminator = 1, isReturn = 1, isBarrier = 1,
349 hasCtrlDep = 1, FPForm = SpecialFP, FPFormBits = SpecialFP.Value in {
350 def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
353 def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
355 [(X86retflag imm:$amt)]>;
358 // All branches are RawFrm, Void, Branch, and Terminators
359 let isBranch = 1, isTerminator = 1 in
360 class IBr<bits<8> opcode, dag ins, string asm, list<dag> pattern> :
361 I<opcode, RawFrm, (outs), ins, asm, pattern>;
363 let isBranch = 1, isBarrier = 1 in
364 def JMP : IBr<0xE9, (ins brtarget:$dst), "jmp\t$dst", [(br bb:$dst)]>;
367 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
368 def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
369 [(brind GR32:$dst)]>;
370 def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
371 [(brind (loadi32 addr:$dst))]>;
374 // Conditional branches
375 let Uses = [EFLAGS] in {
376 def JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst",
377 [(X86brcond bb:$dst, X86_COND_E, EFLAGS)]>, TB;
378 def JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst",
379 [(X86brcond bb:$dst, X86_COND_NE, EFLAGS)]>, TB;
380 def JL : IBr<0x8C, (ins brtarget:$dst), "jl\t$dst",
381 [(X86brcond bb:$dst, X86_COND_L, EFLAGS)]>, TB;
382 def JLE : IBr<0x8E, (ins brtarget:$dst), "jle\t$dst",
383 [(X86brcond bb:$dst, X86_COND_LE, EFLAGS)]>, TB;
384 def JG : IBr<0x8F, (ins brtarget:$dst), "jg\t$dst",
385 [(X86brcond bb:$dst, X86_COND_G, EFLAGS)]>, TB;
386 def JGE : IBr<0x8D, (ins brtarget:$dst), "jge\t$dst",
387 [(X86brcond bb:$dst, X86_COND_GE, EFLAGS)]>, TB;
389 def JB : IBr<0x82, (ins brtarget:$dst), "jb\t$dst",
390 [(X86brcond bb:$dst, X86_COND_B, EFLAGS)]>, TB;
391 def JBE : IBr<0x86, (ins brtarget:$dst), "jbe\t$dst",
392 [(X86brcond bb:$dst, X86_COND_BE, EFLAGS)]>, TB;
393 def JA : IBr<0x87, (ins brtarget:$dst), "ja\t$dst",
394 [(X86brcond bb:$dst, X86_COND_A, EFLAGS)]>, TB;
395 def JAE : IBr<0x83, (ins brtarget:$dst), "jae\t$dst",
396 [(X86brcond bb:$dst, X86_COND_AE, EFLAGS)]>, TB;
398 def JS : IBr<0x88, (ins brtarget:$dst), "js\t$dst",
399 [(X86brcond bb:$dst, X86_COND_S, EFLAGS)]>, TB;
400 def JNS : IBr<0x89, (ins brtarget:$dst), "jns\t$dst",
401 [(X86brcond bb:$dst, X86_COND_NS, EFLAGS)]>, TB;
402 def JP : IBr<0x8A, (ins brtarget:$dst), "jp\t$dst",
403 [(X86brcond bb:$dst, X86_COND_P, EFLAGS)]>, TB;
404 def JNP : IBr<0x8B, (ins brtarget:$dst), "jnp\t$dst",
405 [(X86brcond bb:$dst, X86_COND_NP, EFLAGS)]>, TB;
406 def JO : IBr<0x80, (ins brtarget:$dst), "jo\t$dst",
407 [(X86brcond bb:$dst, X86_COND_O, EFLAGS)]>, TB;
408 def JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst",
409 [(X86brcond bb:$dst, X86_COND_NO, EFLAGS)]>, TB;
412 //===----------------------------------------------------------------------===//
413 // Call Instructions...
416 // All calls clobber the non-callee saved registers. ESP is marked as
417 // a use to prevent stack-pointer assignments that appear immediately
418 // before calls from potentially appearing dead. Uses for argument
419 // registers are added manually.
420 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
421 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
422 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS],
424 def CALLpcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i32imm:$dst,variable_ops),
425 "call\t${dst:call}", []>;
426 def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
427 "call\t{*}$dst", [(X86call GR32:$dst)]>;
428 def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
429 "call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>;
434 def TAILCALL : I<0, Pseudo, (outs), (ins),
438 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
439 def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops),
440 "#TC_RETURN $dst $offset",
443 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
444 def TCRETURNri : I<0, Pseudo, (outs), (ins GR32:$dst, i32imm:$offset, variable_ops),
445 "#TC_RETURN $dst $offset",
448 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
450 def TAILJMPd : IBr<0xE9, (ins i32imm:$dst), "jmp\t${dst:call} # TAILCALL",
452 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
453 def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst # TAILCALL",
455 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
456 def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst),
457 "jmp\t{*}$dst # TAILCALL", []>;
459 //===----------------------------------------------------------------------===//
460 // Miscellaneous Instructions...
462 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
463 def LEAVE : I<0xC9, RawFrm,
464 (outs), (ins), "leave", []>;
466 let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
468 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
471 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
474 let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in
475 def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf", []>;
476 let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
477 def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
479 let isTwoAddress = 1 in // GR32 = bswap GR32
480 def BSWAP32r : I<0xC8, AddRegFrm,
481 (outs GR32:$dst), (ins GR32:$src),
483 [(set GR32:$dst, (bswap GR32:$src))]>, TB;
486 // Bit scan instructions.
487 let Defs = [EFLAGS] in {
488 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
489 "bsf{w}\t{$src, $dst|$dst, $src}",
490 [(set GR16:$dst, (X86bsf GR16:$src)), (implicit EFLAGS)]>, TB;
491 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
492 "bsf{w}\t{$src, $dst|$dst, $src}",
493 [(set GR16:$dst, (X86bsf (loadi16 addr:$src))),
494 (implicit EFLAGS)]>, TB;
495 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
496 "bsf{l}\t{$src, $dst|$dst, $src}",
497 [(set GR32:$dst, (X86bsf GR32:$src)), (implicit EFLAGS)]>, TB;
498 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
499 "bsf{l}\t{$src, $dst|$dst, $src}",
500 [(set GR32:$dst, (X86bsf (loadi32 addr:$src))),
501 (implicit EFLAGS)]>, TB;
503 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
504 "bsr{w}\t{$src, $dst|$dst, $src}",
505 [(set GR16:$dst, (X86bsr GR16:$src)), (implicit EFLAGS)]>, TB;
506 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
507 "bsr{w}\t{$src, $dst|$dst, $src}",
508 [(set GR16:$dst, (X86bsr (loadi16 addr:$src))),
509 (implicit EFLAGS)]>, TB;
510 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
511 "bsr{l}\t{$src, $dst|$dst, $src}",
512 [(set GR32:$dst, (X86bsr GR32:$src)), (implicit EFLAGS)]>, TB;
513 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
514 "bsr{l}\t{$src, $dst|$dst, $src}",
515 [(set GR32:$dst, (X86bsr (loadi32 addr:$src))),
516 (implicit EFLAGS)]>, TB;
519 let neverHasSideEffects = 1 in
520 def LEA16r : I<0x8D, MRMSrcMem,
521 (outs GR16:$dst), (ins i32mem:$src),
522 "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
523 let isReMaterializable = 1 in
524 def LEA32r : I<0x8D, MRMSrcMem,
525 (outs GR32:$dst), (ins lea32mem:$src),
526 "lea{l}\t{$src|$dst}, {$dst|$src}",
527 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
529 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI] in {
530 def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
531 [(X86rep_movs i8)]>, REP;
532 def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
533 [(X86rep_movs i16)]>, REP, OpSize;
534 def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
535 [(X86rep_movs i32)]>, REP;
538 let Defs = [ECX,EDI], Uses = [AL,ECX,EDI] in
539 def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
540 [(X86rep_stos i8)]>, REP;
541 let Defs = [ECX,EDI], Uses = [AX,ECX,EDI] in
542 def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
543 [(X86rep_stos i16)]>, REP, OpSize;
544 let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI] in
545 def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
546 [(X86rep_stos i32)]>, REP;
548 let Defs = [RAX, RDX] in
549 def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>,
552 let isBarrier = 1, hasCtrlDep = 1 in {
553 def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
556 //===----------------------------------------------------------------------===//
557 // Input/Output Instructions...
559 let Defs = [AL], Uses = [DX] in
560 def IN8rr : I<0xEC, RawFrm, (outs), (ins),
561 "in{b}\t{%dx, %al|%AL, %DX}", []>;
562 let Defs = [AX], Uses = [DX] in
563 def IN16rr : I<0xED, RawFrm, (outs), (ins),
564 "in{w}\t{%dx, %ax|%AX, %DX}", []>, OpSize;
565 let Defs = [EAX], Uses = [DX] in
566 def IN32rr : I<0xED, RawFrm, (outs), (ins),
567 "in{l}\t{%dx, %eax|%EAX, %DX}", []>;
570 def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i16i8imm:$port),
571 "in{b}\t{$port, %al|%AL, $port}", []>;
573 def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
574 "in{w}\t{$port, %ax|%AX, $port}", []>, OpSize;
576 def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
577 "in{l}\t{$port, %eax|%EAX, $port}", []>;
579 let Uses = [DX, AL] in
580 def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
581 "out{b}\t{%al, %dx|%DX, %AL}", []>;
582 let Uses = [DX, AX] in
583 def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
584 "out{w}\t{%ax, %dx|%DX, %AX}", []>, OpSize;
585 let Uses = [DX, EAX] in
586 def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
587 "out{l}\t{%eax, %dx|%DX, %EAX}", []>;
590 def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i16i8imm:$port),
591 "out{b}\t{%al, $port|$port, %AL}", []>;
593 def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
594 "out{w}\t{%ax, $port|$port, %AX}", []>, OpSize;
596 def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
597 "out{l}\t{%eax, $port|$port, %EAX}", []>;
599 //===----------------------------------------------------------------------===//
600 // Move Instructions...
602 let neverHasSideEffects = 1 in {
603 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
604 "mov{b}\t{$src, $dst|$dst, $src}", []>;
605 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
606 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
607 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
608 "mov{l}\t{$src, $dst|$dst, $src}", []>;
610 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
611 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
612 "mov{b}\t{$src, $dst|$dst, $src}",
613 [(set GR8:$dst, imm:$src)]>;
614 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
615 "mov{w}\t{$src, $dst|$dst, $src}",
616 [(set GR16:$dst, imm:$src)]>, OpSize;
617 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
618 "mov{l}\t{$src, $dst|$dst, $src}",
619 [(set GR32:$dst, imm:$src)]>;
621 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
622 "mov{b}\t{$src, $dst|$dst, $src}",
623 [(store (i8 imm:$src), addr:$dst)]>;
624 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
625 "mov{w}\t{$src, $dst|$dst, $src}",
626 [(store (i16 imm:$src), addr:$dst)]>, OpSize;
627 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
628 "mov{l}\t{$src, $dst|$dst, $src}",
629 [(store (i32 imm:$src), addr:$dst)]>;
631 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
632 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
633 "mov{b}\t{$src, $dst|$dst, $src}",
634 [(set GR8:$dst, (load addr:$src))]>;
635 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
636 "mov{w}\t{$src, $dst|$dst, $src}",
637 [(set GR16:$dst, (load addr:$src))]>, OpSize;
638 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
639 "mov{l}\t{$src, $dst|$dst, $src}",
640 [(set GR32:$dst, (load addr:$src))]>;
643 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
644 "mov{b}\t{$src, $dst|$dst, $src}",
645 [(store GR8:$src, addr:$dst)]>;
646 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
647 "mov{w}\t{$src, $dst|$dst, $src}",
648 [(store GR16:$src, addr:$dst)]>, OpSize;
649 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
650 "mov{l}\t{$src, $dst|$dst, $src}",
651 [(store GR32:$src, addr:$dst)]>;
653 //===----------------------------------------------------------------------===//
654 // Fixed-Register Multiplication and Division Instructions...
657 // Extra precision multiplication
658 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
659 def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
660 // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
661 // This probably ought to be moved to a def : Pat<> if the
662 // syntax can be accepted.
663 [(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8
664 let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
665 def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src", []>,
666 OpSize; // AX,DX = AX*GR16
667 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
668 def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>;
669 // EAX,EDX = EAX*GR32
670 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
671 def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
673 // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
674 // This probably ought to be moved to a def : Pat<> if the
675 // syntax can be accepted.
676 [(set AL, (mul AL, (loadi8 addr:$src)))]>; // AL,AH = AL*[mem8]
677 let mayLoad = 1, neverHasSideEffects = 1 in {
678 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
679 def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
680 "mul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
681 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
682 def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
683 "mul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
686 let neverHasSideEffects = 1 in {
687 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
688 def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>;
690 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
691 def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>,
692 OpSize; // AX,DX = AX*GR16
693 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
694 def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>;
695 // EAX,EDX = EAX*GR32
697 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
698 def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
699 "imul{b}\t$src", []>; // AL,AH = AL*[mem8]
700 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
701 def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
702 "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
703 let Defs = [EAX,EDX], Uses = [EAX] in
704 def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
705 "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
708 // unsigned division/remainder
709 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
710 def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
712 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
713 def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
714 "div{w}\t$src", []>, OpSize;
715 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
716 def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
719 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
720 def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
722 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
723 def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
724 "div{w}\t$src", []>, OpSize;
725 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
726 def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX
730 // Signed division/remainder.
731 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
732 def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
733 "idiv{b}\t$src", []>;
734 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
735 def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
736 "idiv{w}\t$src", []>, OpSize;
737 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
738 def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
739 "idiv{l}\t$src", []>;
740 let mayLoad = 1, mayLoad = 1 in {
741 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
742 def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
743 "idiv{b}\t$src", []>;
744 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
745 def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
746 "idiv{w}\t$src", []>, OpSize;
747 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
748 def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX
749 "idiv{l}\t$src", []>;
751 } // neverHasSideEffects
753 //===----------------------------------------------------------------------===//
754 // Two address Instructions.
756 let isTwoAddress = 1 in {
759 let Uses = [EFLAGS] in {
760 let isCommutable = 1 in {
761 def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
762 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
763 "cmovb\t{$src2, $dst|$dst, $src2}",
764 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
765 X86_COND_B, EFLAGS))]>,
767 def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
768 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
769 "cmovb\t{$src2, $dst|$dst, $src2}",
770 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
771 X86_COND_B, EFLAGS))]>,
774 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
775 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
776 "cmovae\t{$src2, $dst|$dst, $src2}",
777 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
778 X86_COND_AE, EFLAGS))]>,
780 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
781 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
782 "cmovae\t{$src2, $dst|$dst, $src2}",
783 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
784 X86_COND_AE, EFLAGS))]>,
786 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
787 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
788 "cmove\t{$src2, $dst|$dst, $src2}",
789 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
790 X86_COND_E, EFLAGS))]>,
792 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
793 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
794 "cmove\t{$src2, $dst|$dst, $src2}",
795 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
796 X86_COND_E, EFLAGS))]>,
798 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
799 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
800 "cmovne\t{$src2, $dst|$dst, $src2}",
801 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
802 X86_COND_NE, EFLAGS))]>,
804 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
805 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
806 "cmovne\t{$src2, $dst|$dst, $src2}",
807 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
808 X86_COND_NE, EFLAGS))]>,
810 def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
811 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
812 "cmovbe\t{$src2, $dst|$dst, $src2}",
813 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
814 X86_COND_BE, EFLAGS))]>,
816 def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
817 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
818 "cmovbe\t{$src2, $dst|$dst, $src2}",
819 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
820 X86_COND_BE, EFLAGS))]>,
822 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
823 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
824 "cmova\t{$src2, $dst|$dst, $src2}",
825 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
826 X86_COND_A, EFLAGS))]>,
828 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
829 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
830 "cmova\t{$src2, $dst|$dst, $src2}",
831 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
832 X86_COND_A, EFLAGS))]>,
834 def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
835 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
836 "cmovl\t{$src2, $dst|$dst, $src2}",
837 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
838 X86_COND_L, EFLAGS))]>,
840 def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
841 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
842 "cmovl\t{$src2, $dst|$dst, $src2}",
843 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
844 X86_COND_L, EFLAGS))]>,
846 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
847 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
848 "cmovge\t{$src2, $dst|$dst, $src2}",
849 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
850 X86_COND_GE, EFLAGS))]>,
852 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
853 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
854 "cmovge\t{$src2, $dst|$dst, $src2}",
855 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
856 X86_COND_GE, EFLAGS))]>,
858 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
859 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
860 "cmovle\t{$src2, $dst|$dst, $src2}",
861 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
862 X86_COND_LE, EFLAGS))]>,
864 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
865 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
866 "cmovle\t{$src2, $dst|$dst, $src2}",
867 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
868 X86_COND_LE, EFLAGS))]>,
870 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
871 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
872 "cmovg\t{$src2, $dst|$dst, $src2}",
873 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
874 X86_COND_G, EFLAGS))]>,
876 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
877 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
878 "cmovg\t{$src2, $dst|$dst, $src2}",
879 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
880 X86_COND_G, EFLAGS))]>,
882 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
883 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
884 "cmovs\t{$src2, $dst|$dst, $src2}",
885 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
886 X86_COND_S, EFLAGS))]>,
888 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
889 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
890 "cmovs\t{$src2, $dst|$dst, $src2}",
891 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
892 X86_COND_S, EFLAGS))]>,
894 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
895 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
896 "cmovns\t{$src2, $dst|$dst, $src2}",
897 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
898 X86_COND_NS, EFLAGS))]>,
900 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
901 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
902 "cmovns\t{$src2, $dst|$dst, $src2}",
903 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
904 X86_COND_NS, EFLAGS))]>,
906 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
907 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
908 "cmovp\t{$src2, $dst|$dst, $src2}",
909 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
910 X86_COND_P, EFLAGS))]>,
912 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
913 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
914 "cmovp\t{$src2, $dst|$dst, $src2}",
915 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
916 X86_COND_P, EFLAGS))]>,
918 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
919 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
920 "cmovnp\t{$src2, $dst|$dst, $src2}",
921 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
922 X86_COND_NP, EFLAGS))]>,
924 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
925 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
926 "cmovnp\t{$src2, $dst|$dst, $src2}",
927 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
928 X86_COND_NP, EFLAGS))]>,
930 } // isCommutable = 1
932 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
933 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
934 "cmovnp\t{$src2, $dst|$dst, $src2}",
935 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
936 X86_COND_NP, EFLAGS))]>,
939 def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
940 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
941 "cmovb\t{$src2, $dst|$dst, $src2}",
942 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
943 X86_COND_B, EFLAGS))]>,
945 def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
946 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
947 "cmovb\t{$src2, $dst|$dst, $src2}",
948 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
949 X86_COND_B, EFLAGS))]>,
951 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
952 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
953 "cmovae\t{$src2, $dst|$dst, $src2}",
954 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
955 X86_COND_AE, EFLAGS))]>,
957 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
958 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
959 "cmovae\t{$src2, $dst|$dst, $src2}",
960 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
961 X86_COND_AE, EFLAGS))]>,
963 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
964 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
965 "cmove\t{$src2, $dst|$dst, $src2}",
966 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
967 X86_COND_E, EFLAGS))]>,
969 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
970 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
971 "cmove\t{$src2, $dst|$dst, $src2}",
972 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
973 X86_COND_E, EFLAGS))]>,
975 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
976 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
977 "cmovne\t{$src2, $dst|$dst, $src2}",
978 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
979 X86_COND_NE, EFLAGS))]>,
981 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
982 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
983 "cmovne\t{$src2, $dst|$dst, $src2}",
984 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
985 X86_COND_NE, EFLAGS))]>,
987 def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
988 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
989 "cmovbe\t{$src2, $dst|$dst, $src2}",
990 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
991 X86_COND_BE, EFLAGS))]>,
993 def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
994 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
995 "cmovbe\t{$src2, $dst|$dst, $src2}",
996 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
997 X86_COND_BE, EFLAGS))]>,
999 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
1000 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1001 "cmova\t{$src2, $dst|$dst, $src2}",
1002 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1003 X86_COND_A, EFLAGS))]>,
1005 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
1006 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1007 "cmova\t{$src2, $dst|$dst, $src2}",
1008 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1009 X86_COND_A, EFLAGS))]>,
1011 def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
1012 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1013 "cmovl\t{$src2, $dst|$dst, $src2}",
1014 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1015 X86_COND_L, EFLAGS))]>,
1017 def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
1018 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1019 "cmovl\t{$src2, $dst|$dst, $src2}",
1020 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1021 X86_COND_L, EFLAGS))]>,
1023 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
1024 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1025 "cmovge\t{$src2, $dst|$dst, $src2}",
1026 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1027 X86_COND_GE, EFLAGS))]>,
1029 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
1030 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1031 "cmovge\t{$src2, $dst|$dst, $src2}",
1032 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1033 X86_COND_GE, EFLAGS))]>,
1035 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
1036 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1037 "cmovle\t{$src2, $dst|$dst, $src2}",
1038 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1039 X86_COND_LE, EFLAGS))]>,
1041 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
1042 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1043 "cmovle\t{$src2, $dst|$dst, $src2}",
1044 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1045 X86_COND_LE, EFLAGS))]>,
1047 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
1048 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1049 "cmovg\t{$src2, $dst|$dst, $src2}",
1050 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1051 X86_COND_G, EFLAGS))]>,
1053 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
1054 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1055 "cmovg\t{$src2, $dst|$dst, $src2}",
1056 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1057 X86_COND_G, EFLAGS))]>,
1059 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
1060 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1061 "cmovs\t{$src2, $dst|$dst, $src2}",
1062 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1063 X86_COND_S, EFLAGS))]>,
1065 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
1066 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1067 "cmovs\t{$src2, $dst|$dst, $src2}",
1068 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1069 X86_COND_S, EFLAGS))]>,
1071 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
1072 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1073 "cmovns\t{$src2, $dst|$dst, $src2}",
1074 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1075 X86_COND_NS, EFLAGS))]>,
1077 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
1078 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1079 "cmovns\t{$src2, $dst|$dst, $src2}",
1080 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1081 X86_COND_NS, EFLAGS))]>,
1083 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
1084 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1085 "cmovp\t{$src2, $dst|$dst, $src2}",
1086 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1087 X86_COND_P, EFLAGS))]>,
1089 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
1090 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1091 "cmovp\t{$src2, $dst|$dst, $src2}",
1092 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1093 X86_COND_P, EFLAGS))]>,
1095 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
1096 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1097 "cmovnp\t{$src2, $dst|$dst, $src2}",
1098 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1099 X86_COND_NP, EFLAGS))]>,
1101 } // Uses = [EFLAGS]
1104 // unary instructions
1105 let CodeSize = 2 in {
1106 let Defs = [EFLAGS] in {
1107 def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src), "neg{b}\t$dst",
1108 [(set GR8:$dst, (ineg GR8:$src))]>;
1109 def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src), "neg{w}\t$dst",
1110 [(set GR16:$dst, (ineg GR16:$src))]>, OpSize;
1111 def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src), "neg{l}\t$dst",
1112 [(set GR32:$dst, (ineg GR32:$src))]>;
1113 let isTwoAddress = 0 in {
1114 def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst), "neg{b}\t$dst",
1115 [(store (ineg (loadi8 addr:$dst)), addr:$dst)]>;
1116 def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst), "neg{w}\t$dst",
1117 [(store (ineg (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
1118 def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst), "neg{l}\t$dst",
1119 [(store (ineg (loadi32 addr:$dst)), addr:$dst)]>;
1122 } // Defs = [EFLAGS]
1124 def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src), "not{b}\t$dst",
1125 [(set GR8:$dst, (not GR8:$src))]>;
1126 def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src), "not{w}\t$dst",
1127 [(set GR16:$dst, (not GR16:$src))]>, OpSize;
1128 def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src), "not{l}\t$dst",
1129 [(set GR32:$dst, (not GR32:$src))]>;
1130 let isTwoAddress = 0 in {
1131 def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst), "not{b}\t$dst",
1132 [(store (not (loadi8 addr:$dst)), addr:$dst)]>;
1133 def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst), "not{w}\t$dst",
1134 [(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
1135 def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst), "not{l}\t$dst",
1136 [(store (not (loadi32 addr:$dst)), addr:$dst)]>;
1140 // TODO: inc/dec is slow for P4, but fast for Pentium-M.
1141 let Defs = [EFLAGS] in {
1143 def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst",
1144 [(set GR8:$dst, (add GR8:$src, 1))]>;
1145 let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
1146 def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
1147 [(set GR16:$dst, (add GR16:$src, 1))]>,
1148 OpSize, Requires<[In32BitMode]>;
1149 def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
1150 [(set GR32:$dst, (add GR32:$src, 1))]>, Requires<[In32BitMode]>;
1152 let isTwoAddress = 0, CodeSize = 2 in {
1153 def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
1154 [(store (add (loadi8 addr:$dst), 1), addr:$dst)]>;
1155 def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
1156 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
1157 OpSize, Requires<[In32BitMode]>;
1158 def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
1159 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
1160 Requires<[In32BitMode]>;
1164 def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst",
1165 [(set GR8:$dst, (add GR8:$src, -1))]>;
1166 let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
1167 def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
1168 [(set GR16:$dst, (add GR16:$src, -1))]>,
1169 OpSize, Requires<[In32BitMode]>;
1170 def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
1171 [(set GR32:$dst, (add GR32:$src, -1))]>, Requires<[In32BitMode]>;
1174 let isTwoAddress = 0, CodeSize = 2 in {
1175 def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
1176 [(store (add (loadi8 addr:$dst), -1), addr:$dst)]>;
1177 def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
1178 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
1179 OpSize, Requires<[In32BitMode]>;
1180 def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
1181 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
1182 Requires<[In32BitMode]>;
1184 } // Defs = [EFLAGS]
1186 // Logical operators...
1187 let Defs = [EFLAGS] in {
1188 let isCommutable = 1 in { // X = AND Y, Z --> X = AND Z, Y
1189 def AND8rr : I<0x20, MRMDestReg,
1190 (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1191 "and{b}\t{$src2, $dst|$dst, $src2}",
1192 [(set GR8:$dst, (and GR8:$src1, GR8:$src2))]>;
1193 def AND16rr : I<0x21, MRMDestReg,
1194 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1195 "and{w}\t{$src2, $dst|$dst, $src2}",
1196 [(set GR16:$dst, (and GR16:$src1, GR16:$src2))]>, OpSize;
1197 def AND32rr : I<0x21, MRMDestReg,
1198 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1199 "and{l}\t{$src2, $dst|$dst, $src2}",
1200 [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
1203 def AND8rm : I<0x22, MRMSrcMem,
1204 (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1205 "and{b}\t{$src2, $dst|$dst, $src2}",
1206 [(set GR8:$dst, (and GR8:$src1, (load addr:$src2)))]>;
1207 def AND16rm : I<0x23, MRMSrcMem,
1208 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1209 "and{w}\t{$src2, $dst|$dst, $src2}",
1210 [(set GR16:$dst, (and GR16:$src1, (load addr:$src2)))]>, OpSize;
1211 def AND32rm : I<0x23, MRMSrcMem,
1212 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1213 "and{l}\t{$src2, $dst|$dst, $src2}",
1214 [(set GR32:$dst, (and GR32:$src1, (load addr:$src2)))]>;
1216 def AND8ri : Ii8<0x80, MRM4r,
1217 (outs GR8 :$dst), (ins GR8 :$src1, i8imm :$src2),
1218 "and{b}\t{$src2, $dst|$dst, $src2}",
1219 [(set GR8:$dst, (and GR8:$src1, imm:$src2))]>;
1220 def AND16ri : Ii16<0x81, MRM4r,
1221 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1222 "and{w}\t{$src2, $dst|$dst, $src2}",
1223 [(set GR16:$dst, (and GR16:$src1, imm:$src2))]>, OpSize;
1224 def AND32ri : Ii32<0x81, MRM4r,
1225 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1226 "and{l}\t{$src2, $dst|$dst, $src2}",
1227 [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
1228 def AND16ri8 : Ii8<0x83, MRM4r,
1229 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1230 "and{w}\t{$src2, $dst|$dst, $src2}",
1231 [(set GR16:$dst, (and GR16:$src1, i16immSExt8:$src2))]>,
1233 def AND32ri8 : Ii8<0x83, MRM4r,
1234 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1235 "and{l}\t{$src2, $dst|$dst, $src2}",
1236 [(set GR32:$dst, (and GR32:$src1, i32immSExt8:$src2))]>;
1238 let isTwoAddress = 0 in {
1239 def AND8mr : I<0x20, MRMDestMem,
1240 (outs), (ins i8mem :$dst, GR8 :$src),
1241 "and{b}\t{$src, $dst|$dst, $src}",
1242 [(store (and (load addr:$dst), GR8:$src), addr:$dst)]>;
1243 def AND16mr : I<0x21, MRMDestMem,
1244 (outs), (ins i16mem:$dst, GR16:$src),
1245 "and{w}\t{$src, $dst|$dst, $src}",
1246 [(store (and (load addr:$dst), GR16:$src), addr:$dst)]>,
1248 def AND32mr : I<0x21, MRMDestMem,
1249 (outs), (ins i32mem:$dst, GR32:$src),
1250 "and{l}\t{$src, $dst|$dst, $src}",
1251 [(store (and (load addr:$dst), GR32:$src), addr:$dst)]>;
1252 def AND8mi : Ii8<0x80, MRM4m,
1253 (outs), (ins i8mem :$dst, i8imm :$src),
1254 "and{b}\t{$src, $dst|$dst, $src}",
1255 [(store (and (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1256 def AND16mi : Ii16<0x81, MRM4m,
1257 (outs), (ins i16mem:$dst, i16imm:$src),
1258 "and{w}\t{$src, $dst|$dst, $src}",
1259 [(store (and (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1261 def AND32mi : Ii32<0x81, MRM4m,
1262 (outs), (ins i32mem:$dst, i32imm:$src),
1263 "and{l}\t{$src, $dst|$dst, $src}",
1264 [(store (and (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1265 def AND16mi8 : Ii8<0x83, MRM4m,
1266 (outs), (ins i16mem:$dst, i16i8imm :$src),
1267 "and{w}\t{$src, $dst|$dst, $src}",
1268 [(store (and (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1270 def AND32mi8 : Ii8<0x83, MRM4m,
1271 (outs), (ins i32mem:$dst, i32i8imm :$src),
1272 "and{l}\t{$src, $dst|$dst, $src}",
1273 [(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1277 let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
1278 def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1279 "or{b}\t{$src2, $dst|$dst, $src2}",
1280 [(set GR8:$dst, (or GR8:$src1, GR8:$src2))]>;
1281 def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1282 "or{w}\t{$src2, $dst|$dst, $src2}",
1283 [(set GR16:$dst, (or GR16:$src1, GR16:$src2))]>, OpSize;
1284 def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1285 "or{l}\t{$src2, $dst|$dst, $src2}",
1286 [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
1288 def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1289 "or{b}\t{$src2, $dst|$dst, $src2}",
1290 [(set GR8:$dst, (or GR8:$src1, (load addr:$src2)))]>;
1291 def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1292 "or{w}\t{$src2, $dst|$dst, $src2}",
1293 [(set GR16:$dst, (or GR16:$src1, (load addr:$src2)))]>, OpSize;
1294 def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1295 "or{l}\t{$src2, $dst|$dst, $src2}",
1296 [(set GR32:$dst, (or GR32:$src1, (load addr:$src2)))]>;
1298 def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1299 "or{b}\t{$src2, $dst|$dst, $src2}",
1300 [(set GR8:$dst, (or GR8:$src1, imm:$src2))]>;
1301 def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1302 "or{w}\t{$src2, $dst|$dst, $src2}",
1303 [(set GR16:$dst, (or GR16:$src1, imm:$src2))]>, OpSize;
1304 def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1305 "or{l}\t{$src2, $dst|$dst, $src2}",
1306 [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
1308 def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1309 "or{w}\t{$src2, $dst|$dst, $src2}",
1310 [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2))]>, OpSize;
1311 def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1312 "or{l}\t{$src2, $dst|$dst, $src2}",
1313 [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2))]>;
1314 let isTwoAddress = 0 in {
1315 def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1316 "or{b}\t{$src, $dst|$dst, $src}",
1317 [(store (or (load addr:$dst), GR8:$src), addr:$dst)]>;
1318 def OR16mr : I<0x09, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1319 "or{w}\t{$src, $dst|$dst, $src}",
1320 [(store (or (load addr:$dst), GR16:$src), addr:$dst)]>, OpSize;
1321 def OR32mr : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1322 "or{l}\t{$src, $dst|$dst, $src}",
1323 [(store (or (load addr:$dst), GR32:$src), addr:$dst)]>;
1324 def OR8mi : Ii8<0x80, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
1325 "or{b}\t{$src, $dst|$dst, $src}",
1326 [(store (or (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1327 def OR16mi : Ii16<0x81, MRM1m, (outs), (ins i16mem:$dst, i16imm:$src),
1328 "or{w}\t{$src, $dst|$dst, $src}",
1329 [(store (or (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1331 def OR32mi : Ii32<0x81, MRM1m, (outs), (ins i32mem:$dst, i32imm:$src),
1332 "or{l}\t{$src, $dst|$dst, $src}",
1333 [(store (or (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1334 def OR16mi8 : Ii8<0x83, MRM1m, (outs), (ins i16mem:$dst, i16i8imm:$src),
1335 "or{w}\t{$src, $dst|$dst, $src}",
1336 [(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1338 def OR32mi8 : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$src),
1339 "or{l}\t{$src, $dst|$dst, $src}",
1340 [(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1341 } // isTwoAddress = 0
1344 let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
1345 def XOR8rr : I<0x30, MRMDestReg,
1346 (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1347 "xor{b}\t{$src2, $dst|$dst, $src2}",
1348 [(set GR8:$dst, (xor GR8:$src1, GR8:$src2))]>;
1349 def XOR16rr : I<0x31, MRMDestReg,
1350 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1351 "xor{w}\t{$src2, $dst|$dst, $src2}",
1352 [(set GR16:$dst, (xor GR16:$src1, GR16:$src2))]>, OpSize;
1353 def XOR32rr : I<0x31, MRMDestReg,
1354 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1355 "xor{l}\t{$src2, $dst|$dst, $src2}",
1356 [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
1357 } // isCommutable = 1
1359 def XOR8rm : I<0x32, MRMSrcMem ,
1360 (outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
1361 "xor{b}\t{$src2, $dst|$dst, $src2}",
1362 [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2)))]>;
1363 def XOR16rm : I<0x33, MRMSrcMem ,
1364 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1365 "xor{w}\t{$src2, $dst|$dst, $src2}",
1366 [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2)))]>,
1368 def XOR32rm : I<0x33, MRMSrcMem ,
1369 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1370 "xor{l}\t{$src2, $dst|$dst, $src2}",
1371 [(set GR32:$dst, (xor GR32:$src1, (load addr:$src2)))]>;
1373 def XOR8ri : Ii8<0x80, MRM6r,
1374 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1375 "xor{b}\t{$src2, $dst|$dst, $src2}",
1376 [(set GR8:$dst, (xor GR8:$src1, imm:$src2))]>;
1377 def XOR16ri : Ii16<0x81, MRM6r,
1378 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1379 "xor{w}\t{$src2, $dst|$dst, $src2}",
1380 [(set GR16:$dst, (xor GR16:$src1, imm:$src2))]>, OpSize;
1381 def XOR32ri : Ii32<0x81, MRM6r,
1382 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1383 "xor{l}\t{$src2, $dst|$dst, $src2}",
1384 [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
1385 def XOR16ri8 : Ii8<0x83, MRM6r,
1386 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1387 "xor{w}\t{$src2, $dst|$dst, $src2}",
1388 [(set GR16:$dst, (xor GR16:$src1, i16immSExt8:$src2))]>,
1390 def XOR32ri8 : Ii8<0x83, MRM6r,
1391 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1392 "xor{l}\t{$src2, $dst|$dst, $src2}",
1393 [(set GR32:$dst, (xor GR32:$src1, i32immSExt8:$src2))]>;
1395 let isTwoAddress = 0 in {
1396 def XOR8mr : I<0x30, MRMDestMem,
1397 (outs), (ins i8mem :$dst, GR8 :$src),
1398 "xor{b}\t{$src, $dst|$dst, $src}",
1399 [(store (xor (load addr:$dst), GR8:$src), addr:$dst)]>;
1400 def XOR16mr : I<0x31, MRMDestMem,
1401 (outs), (ins i16mem:$dst, GR16:$src),
1402 "xor{w}\t{$src, $dst|$dst, $src}",
1403 [(store (xor (load addr:$dst), GR16:$src), addr:$dst)]>,
1405 def XOR32mr : I<0x31, MRMDestMem,
1406 (outs), (ins i32mem:$dst, GR32:$src),
1407 "xor{l}\t{$src, $dst|$dst, $src}",
1408 [(store (xor (load addr:$dst), GR32:$src), addr:$dst)]>;
1409 def XOR8mi : Ii8<0x80, MRM6m,
1410 (outs), (ins i8mem :$dst, i8imm :$src),
1411 "xor{b}\t{$src, $dst|$dst, $src}",
1412 [(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1413 def XOR16mi : Ii16<0x81, MRM6m,
1414 (outs), (ins i16mem:$dst, i16imm:$src),
1415 "xor{w}\t{$src, $dst|$dst, $src}",
1416 [(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1418 def XOR32mi : Ii32<0x81, MRM6m,
1419 (outs), (ins i32mem:$dst, i32imm:$src),
1420 "xor{l}\t{$src, $dst|$dst, $src}",
1421 [(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1422 def XOR16mi8 : Ii8<0x83, MRM6m,
1423 (outs), (ins i16mem:$dst, i16i8imm :$src),
1424 "xor{w}\t{$src, $dst|$dst, $src}",
1425 [(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1427 def XOR32mi8 : Ii8<0x83, MRM6m,
1428 (outs), (ins i32mem:$dst, i32i8imm :$src),
1429 "xor{l}\t{$src, $dst|$dst, $src}",
1430 [(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1431 } // isTwoAddress = 0
1432 } // Defs = [EFLAGS]
1434 // Shift instructions
1435 let Defs = [EFLAGS] in {
1436 let Uses = [CL] in {
1437 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src),
1438 "shl{b}\t{%cl, $dst|$dst, %CL}",
1439 [(set GR8:$dst, (shl GR8:$src, CL))]>;
1440 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src),
1441 "shl{w}\t{%cl, $dst|$dst, %CL}",
1442 [(set GR16:$dst, (shl GR16:$src, CL))]>, OpSize;
1443 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src),
1444 "shl{l}\t{%cl, $dst|$dst, %CL}",
1445 [(set GR32:$dst, (shl GR32:$src, CL))]>;
1448 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1449 "shl{b}\t{$src2, $dst|$dst, $src2}",
1450 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
1451 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1452 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1453 "shl{w}\t{$src2, $dst|$dst, $src2}",
1454 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1455 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1456 "shl{l}\t{$src2, $dst|$dst, $src2}",
1457 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>;
1458 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
1460 } // isConvertibleToThreeAddress = 1
1462 let isTwoAddress = 0 in {
1463 let Uses = [CL] in {
1464 def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
1465 "shl{b}\t{%cl, $dst|$dst, %CL}",
1466 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
1467 def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
1468 "shl{w}\t{%cl, $dst|$dst, %CL}",
1469 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1470 def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
1471 "shl{l}\t{%cl, $dst|$dst, %CL}",
1472 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>;
1474 def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
1475 "shl{b}\t{$src, $dst|$dst, $src}",
1476 [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1477 def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
1478 "shl{w}\t{$src, $dst|$dst, $src}",
1479 [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1481 def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
1482 "shl{l}\t{$src, $dst|$dst, $src}",
1483 [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1486 def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
1488 [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1489 def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
1491 [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1493 def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
1495 [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1498 let Uses = [CL] in {
1499 def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src),
1500 "shr{b}\t{%cl, $dst|$dst, %CL}",
1501 [(set GR8:$dst, (srl GR8:$src, CL))]>;
1502 def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src),
1503 "shr{w}\t{%cl, $dst|$dst, %CL}",
1504 [(set GR16:$dst, (srl GR16:$src, CL))]>, OpSize;
1505 def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src),
1506 "shr{l}\t{%cl, $dst|$dst, %CL}",
1507 [(set GR32:$dst, (srl GR32:$src, CL))]>;
1510 def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1511 "shr{b}\t{$src2, $dst|$dst, $src2}",
1512 [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
1513 def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1514 "shr{w}\t{$src2, $dst|$dst, $src2}",
1515 [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1516 def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1517 "shr{l}\t{$src2, $dst|$dst, $src2}",
1518 [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>;
1521 def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
1523 [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
1524 def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
1526 [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize;
1527 def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
1529 [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
1531 let isTwoAddress = 0 in {
1532 let Uses = [CL] in {
1533 def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
1534 "shr{b}\t{%cl, $dst|$dst, %CL}",
1535 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
1536 def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
1537 "shr{w}\t{%cl, $dst|$dst, %CL}",
1538 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
1540 def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
1541 "shr{l}\t{%cl, $dst|$dst, %CL}",
1542 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>;
1544 def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
1545 "shr{b}\t{$src, $dst|$dst, $src}",
1546 [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1547 def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
1548 "shr{w}\t{$src, $dst|$dst, $src}",
1549 [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1551 def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
1552 "shr{l}\t{$src, $dst|$dst, $src}",
1553 [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1556 def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
1558 [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1559 def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
1561 [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,OpSize;
1562 def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
1564 [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1567 let Uses = [CL] in {
1568 def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src),
1569 "sar{b}\t{%cl, $dst|$dst, %CL}",
1570 [(set GR8:$dst, (sra GR8:$src, CL))]>;
1571 def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src),
1572 "sar{w}\t{%cl, $dst|$dst, %CL}",
1573 [(set GR16:$dst, (sra GR16:$src, CL))]>, OpSize;
1574 def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src),
1575 "sar{l}\t{%cl, $dst|$dst, %CL}",
1576 [(set GR32:$dst, (sra GR32:$src, CL))]>;
1579 def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1580 "sar{b}\t{$src2, $dst|$dst, $src2}",
1581 [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
1582 def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1583 "sar{w}\t{$src2, $dst|$dst, $src2}",
1584 [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
1586 def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1587 "sar{l}\t{$src2, $dst|$dst, $src2}",
1588 [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>;
1591 def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
1593 [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
1594 def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
1596 [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize;
1597 def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
1599 [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
1601 let isTwoAddress = 0 in {
1602 let Uses = [CL] in {
1603 def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
1604 "sar{b}\t{%cl, $dst|$dst, %CL}",
1605 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
1606 def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
1607 "sar{w}\t{%cl, $dst|$dst, %CL}",
1608 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1609 def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
1610 "sar{l}\t{%cl, $dst|$dst, %CL}",
1611 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>;
1613 def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
1614 "sar{b}\t{$src, $dst|$dst, $src}",
1615 [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1616 def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
1617 "sar{w}\t{$src, $dst|$dst, $src}",
1618 [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1620 def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
1621 "sar{l}\t{$src, $dst|$dst, $src}",
1622 [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1625 def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
1627 [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1628 def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
1630 [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1632 def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
1634 [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1637 // Rotate instructions
1638 // FIXME: provide shorter instructions when imm8 == 1
1639 let Uses = [CL] in {
1640 def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src),
1641 "rol{b}\t{%cl, $dst|$dst, %CL}",
1642 [(set GR8:$dst, (rotl GR8:$src, CL))]>;
1643 def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src),
1644 "rol{w}\t{%cl, $dst|$dst, %CL}",
1645 [(set GR16:$dst, (rotl GR16:$src, CL))]>, OpSize;
1646 def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src),
1647 "rol{l}\t{%cl, $dst|$dst, %CL}",
1648 [(set GR32:$dst, (rotl GR32:$src, CL))]>;
1651 def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1652 "rol{b}\t{$src2, $dst|$dst, $src2}",
1653 [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
1654 def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1655 "rol{w}\t{$src2, $dst|$dst, $src2}",
1656 [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1657 def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1658 "rol{l}\t{$src2, $dst|$dst, $src2}",
1659 [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>;
1662 def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
1664 [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
1665 def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
1667 [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize;
1668 def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
1670 [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
1672 let isTwoAddress = 0 in {
1673 let Uses = [CL] in {
1674 def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
1675 "rol{b}\t{%cl, $dst|$dst, %CL}",
1676 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
1677 def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
1678 "rol{w}\t{%cl, $dst|$dst, %CL}",
1679 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1680 def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
1681 "rol{l}\t{%cl, $dst|$dst, %CL}",
1682 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>;
1684 def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src),
1685 "rol{b}\t{$src, $dst|$dst, $src}",
1686 [(store (rotl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1687 def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src),
1688 "rol{w}\t{$src, $dst|$dst, $src}",
1689 [(store (rotl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1691 def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src),
1692 "rol{l}\t{$src, $dst|$dst, $src}",
1693 [(store (rotl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1696 def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
1698 [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1699 def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
1701 [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1703 def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
1705 [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1708 let Uses = [CL] in {
1709 def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src),
1710 "ror{b}\t{%cl, $dst|$dst, %CL}",
1711 [(set GR8:$dst, (rotr GR8:$src, CL))]>;
1712 def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src),
1713 "ror{w}\t{%cl, $dst|$dst, %CL}",
1714 [(set GR16:$dst, (rotr GR16:$src, CL))]>, OpSize;
1715 def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src),
1716 "ror{l}\t{%cl, $dst|$dst, %CL}",
1717 [(set GR32:$dst, (rotr GR32:$src, CL))]>;
1720 def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1721 "ror{b}\t{$src2, $dst|$dst, $src2}",
1722 [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
1723 def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1724 "ror{w}\t{$src2, $dst|$dst, $src2}",
1725 [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1726 def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1727 "ror{l}\t{$src2, $dst|$dst, $src2}",
1728 [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>;
1731 def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
1733 [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
1734 def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
1736 [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize;
1737 def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
1739 [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
1741 let isTwoAddress = 0 in {
1742 let Uses = [CL] in {
1743 def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
1744 "ror{b}\t{%cl, $dst|$dst, %CL}",
1745 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
1746 def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
1747 "ror{w}\t{%cl, $dst|$dst, %CL}",
1748 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1749 def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
1750 "ror{l}\t{%cl, $dst|$dst, %CL}",
1751 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>;
1753 def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
1754 "ror{b}\t{$src, $dst|$dst, $src}",
1755 [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1756 def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
1757 "ror{w}\t{$src, $dst|$dst, $src}",
1758 [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1760 def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
1761 "ror{l}\t{$src, $dst|$dst, $src}",
1762 [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1765 def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
1767 [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1768 def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
1770 [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1772 def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
1774 [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1779 // Double shift instructions (generalizations of rotate)
1780 let Uses = [CL] in {
1781 def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1782 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1783 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB;
1784 def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1785 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1786 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB;
1787 def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1788 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1789 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
1791 def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1792 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1793 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
1797 let isCommutable = 1 in { // These instructions commute to each other.
1798 def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
1799 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3),
1800 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1801 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
1804 def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
1805 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3),
1806 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1807 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
1810 def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
1811 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3),
1812 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1813 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
1816 def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
1817 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3),
1818 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1819 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
1824 let isTwoAddress = 0 in {
1825 let Uses = [CL] in {
1826 def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1827 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1828 [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
1830 def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1831 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1832 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
1835 def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
1836 (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
1837 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1838 [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
1839 (i8 imm:$src3)), addr:$dst)]>,
1841 def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
1842 (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
1843 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1844 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
1845 (i8 imm:$src3)), addr:$dst)]>,
1848 let Uses = [CL] in {
1849 def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1850 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1851 [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
1852 addr:$dst)]>, TB, OpSize;
1853 def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1854 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1855 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
1856 addr:$dst)]>, TB, OpSize;
1858 def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
1859 (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
1860 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1861 [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
1862 (i8 imm:$src3)), addr:$dst)]>,
1864 def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
1865 (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
1866 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1867 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
1868 (i8 imm:$src3)), addr:$dst)]>,
1871 } // Defs = [EFLAGS]
1875 let Defs = [EFLAGS] in {
1876 let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
1877 def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
1878 (ins GR8 :$src1, GR8 :$src2),
1879 "add{b}\t{$src2, $dst|$dst, $src2}",
1880 [(set GR8:$dst, (add GR8:$src1, GR8:$src2))]>;
1881 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1882 def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
1883 (ins GR16:$src1, GR16:$src2),
1884 "add{w}\t{$src2, $dst|$dst, $src2}",
1885 [(set GR16:$dst, (add GR16:$src1, GR16:$src2))]>, OpSize;
1886 def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
1887 (ins GR32:$src1, GR32:$src2),
1888 "add{l}\t{$src2, $dst|$dst, $src2}",
1889 [(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
1890 } // end isConvertibleToThreeAddress
1891 } // end isCommutable
1892 def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
1893 (ins GR8 :$src1, i8mem :$src2),
1894 "add{b}\t{$src2, $dst|$dst, $src2}",
1895 [(set GR8:$dst, (add GR8:$src1, (load addr:$src2)))]>;
1896 def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
1897 (ins GR16:$src1, i16mem:$src2),
1898 "add{w}\t{$src2, $dst|$dst, $src2}",
1899 [(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>,OpSize;
1900 def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
1901 (ins GR32:$src1, i32mem:$src2),
1902 "add{l}\t{$src2, $dst|$dst, $src2}",
1903 [(set GR32:$dst, (add GR32:$src1, (load addr:$src2)))]>;
1905 def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1906 "add{b}\t{$src2, $dst|$dst, $src2}",
1907 [(set GR8:$dst, (add GR8:$src1, imm:$src2))]>;
1909 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1910 def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
1911 (ins GR16:$src1, i16imm:$src2),
1912 "add{w}\t{$src2, $dst|$dst, $src2}",
1913 [(set GR16:$dst, (add GR16:$src1, imm:$src2))]>, OpSize;
1914 def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
1915 (ins GR32:$src1, i32imm:$src2),
1916 "add{l}\t{$src2, $dst|$dst, $src2}",
1917 [(set GR32:$dst, (add GR32:$src1, imm:$src2))]>;
1918 def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
1919 (ins GR16:$src1, i16i8imm:$src2),
1920 "add{w}\t{$src2, $dst|$dst, $src2}",
1921 [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, OpSize;
1922 def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
1923 (ins GR32:$src1, i32i8imm:$src2),
1924 "add{l}\t{$src2, $dst|$dst, $src2}",
1925 [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>;
1928 let isTwoAddress = 0 in {
1929 def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
1930 "add{b}\t{$src2, $dst|$dst, $src2}",
1931 [(store (add (load addr:$dst), GR8:$src2), addr:$dst)]>;
1932 def ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1933 "add{w}\t{$src2, $dst|$dst, $src2}",
1934 [(store (add (load addr:$dst), GR16:$src2), addr:$dst)]>,
1936 def ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1937 "add{l}\t{$src2, $dst|$dst, $src2}",
1938 [(store (add (load addr:$dst), GR32:$src2), addr:$dst)]>;
1939 def ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
1940 "add{b}\t{$src2, $dst|$dst, $src2}",
1941 [(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
1942 def ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
1943 "add{w}\t{$src2, $dst|$dst, $src2}",
1944 [(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
1946 def ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
1947 "add{l}\t{$src2, $dst|$dst, $src2}",
1948 [(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
1949 def ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
1950 "add{w}\t{$src2, $dst|$dst, $src2}",
1951 [(store (add (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
1953 def ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
1954 "add{l}\t{$src2, $dst|$dst, $src2}",
1955 [(store (add (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
1958 let Uses = [EFLAGS] in {
1959 let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
1960 def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1961 "adc{l}\t{$src2, $dst|$dst, $src2}",
1962 [(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>;
1964 def ADC32rm : I<0x13, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1965 "adc{l}\t{$src2, $dst|$dst, $src2}",
1966 [(set GR32:$dst, (adde GR32:$src1, (load addr:$src2)))]>;
1967 def ADC32ri : Ii32<0x81, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1968 "adc{l}\t{$src2, $dst|$dst, $src2}",
1969 [(set GR32:$dst, (adde GR32:$src1, imm:$src2))]>;
1970 def ADC32ri8 : Ii8<0x83, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1971 "adc{l}\t{$src2, $dst|$dst, $src2}",
1972 [(set GR32:$dst, (adde GR32:$src1, i32immSExt8:$src2))]>;
1974 let isTwoAddress = 0 in {
1975 def ADC32mr : I<0x11, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1976 "adc{l}\t{$src2, $dst|$dst, $src2}",
1977 [(store (adde (load addr:$dst), GR32:$src2), addr:$dst)]>;
1978 def ADC32mi : Ii32<0x81, MRM2m, (outs), (ins i32mem:$dst, i32imm:$src2),
1979 "adc{l}\t{$src2, $dst|$dst, $src2}",
1980 [(store (adde (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
1981 def ADC32mi8 : Ii8<0x83, MRM2m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
1982 "adc{l}\t{$src2, $dst|$dst, $src2}",
1983 [(store (adde (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
1985 } // Uses = [EFLAGS]
1987 def SUB8rr : I<0x28, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1988 "sub{b}\t{$src2, $dst|$dst, $src2}",
1989 [(set GR8:$dst, (sub GR8:$src1, GR8:$src2))]>;
1990 def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1991 "sub{w}\t{$src2, $dst|$dst, $src2}",
1992 [(set GR16:$dst, (sub GR16:$src1, GR16:$src2))]>, OpSize;
1993 def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1994 "sub{l}\t{$src2, $dst|$dst, $src2}",
1995 [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
1996 def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1997 "sub{b}\t{$src2, $dst|$dst, $src2}",
1998 [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2)))]>;
1999 def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
2000 "sub{w}\t{$src2, $dst|$dst, $src2}",
2001 [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2)))]>, OpSize;
2002 def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
2003 "sub{l}\t{$src2, $dst|$dst, $src2}",
2004 [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2)))]>;
2006 def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
2007 "sub{b}\t{$src2, $dst|$dst, $src2}",
2008 [(set GR8:$dst, (sub GR8:$src1, imm:$src2))]>;
2009 def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
2010 "sub{w}\t{$src2, $dst|$dst, $src2}",
2011 [(set GR16:$dst, (sub GR16:$src1, imm:$src2))]>, OpSize;
2012 def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
2013 "sub{l}\t{$src2, $dst|$dst, $src2}",
2014 [(set GR32:$dst, (sub GR32:$src1, imm:$src2))]>;
2015 def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
2016 "sub{w}\t{$src2, $dst|$dst, $src2}",
2017 [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2))]>,
2019 def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
2020 "sub{l}\t{$src2, $dst|$dst, $src2}",
2021 [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2))]>;
2022 let isTwoAddress = 0 in {
2023 def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
2024 "sub{b}\t{$src2, $dst|$dst, $src2}",
2025 [(store (sub (load addr:$dst), GR8:$src2), addr:$dst)]>;
2026 def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
2027 "sub{w}\t{$src2, $dst|$dst, $src2}",
2028 [(store (sub (load addr:$dst), GR16:$src2), addr:$dst)]>,
2030 def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
2031 "sub{l}\t{$src2, $dst|$dst, $src2}",
2032 [(store (sub (load addr:$dst), GR32:$src2), addr:$dst)]>;
2033 def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
2034 "sub{b}\t{$src2, $dst|$dst, $src2}",
2035 [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
2036 def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
2037 "sub{w}\t{$src2, $dst|$dst, $src2}",
2038 [(store (sub (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
2040 def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
2041 "sub{l}\t{$src2, $dst|$dst, $src2}",
2042 [(store (sub (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
2043 def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
2044 "sub{w}\t{$src2, $dst|$dst, $src2}",
2045 [(store (sub (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
2047 def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
2048 "sub{l}\t{$src2, $dst|$dst, $src2}",
2049 [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
2052 let Uses = [EFLAGS] in {
2053 def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2054 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2055 [(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>;
2057 let isTwoAddress = 0 in {
2058 def SBB32mr : I<0x19, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
2059 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2060 [(store (sube (load addr:$dst), GR32:$src2), addr:$dst)]>;
2061 def SBB8mi : Ii32<0x80, MRM3m, (outs), (ins i8mem:$dst, i8imm:$src2),
2062 "sbb{b}\t{$src2, $dst|$dst, $src2}",
2063 [(store (sube (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
2064 def SBB32mi : Ii32<0x81, MRM3m, (outs), (ins i32mem:$dst, i32imm:$src2),
2065 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2066 [(store (sube (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
2067 def SBB32mi8 : Ii8<0x83, MRM3m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
2068 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2069 [(store (sube (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
2071 def SBB32rm : I<0x1B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
2072 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2073 [(set GR32:$dst, (sube GR32:$src1, (load addr:$src2)))]>;
2074 def SBB32ri : Ii32<0x81, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
2075 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2076 [(set GR32:$dst, (sube GR32:$src1, imm:$src2))]>;
2077 def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
2078 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2079 [(set GR32:$dst, (sube GR32:$src1, i32immSExt8:$src2))]>;
2080 } // Uses = [EFLAGS]
2081 } // Defs = [EFLAGS]
2083 let Defs = [EFLAGS] in {
2084 let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
2085 def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2086 "imul{w}\t{$src2, $dst|$dst, $src2}",
2087 [(set GR16:$dst, (mul GR16:$src1, GR16:$src2))]>, TB, OpSize;
2088 def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2089 "imul{l}\t{$src2, $dst|$dst, $src2}",
2090 [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>, TB;
2092 def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
2093 "imul{w}\t{$src2, $dst|$dst, $src2}",
2094 [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2)))]>,
2096 def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
2097 "imul{l}\t{$src2, $dst|$dst, $src2}",
2098 [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2)))]>, TB;
2099 } // Defs = [EFLAGS]
2100 } // end Two Address instructions
2102 // Suprisingly enough, these are not two address instructions!
2103 let Defs = [EFLAGS] in {
2104 def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
2105 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
2106 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2107 [(set GR16:$dst, (mul GR16:$src1, imm:$src2))]>, OpSize;
2108 def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
2109 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
2110 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2111 [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>;
2112 def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
2113 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
2114 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2115 [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2))]>,
2117 def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
2118 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
2119 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2120 [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2))]>;
2122 def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
2123 (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
2124 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2125 [(set GR16:$dst, (mul (load addr:$src1), imm:$src2))]>,
2127 def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
2128 (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
2129 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2130 [(set GR32:$dst, (mul (load addr:$src1), imm:$src2))]>;
2131 def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
2132 (outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
2133 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2134 [(set GR16:$dst, (mul (load addr:$src1), i16immSExt8:$src2))]>,
2136 def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
2137 (outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
2138 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2139 [(set GR32:$dst, (mul (load addr:$src1), i32immSExt8:$src2))]>;
2140 } // Defs = [EFLAGS]
2142 //===----------------------------------------------------------------------===//
2143 // Test instructions are just like AND, except they don't generate a result.
2145 let Defs = [EFLAGS] in {
2146 let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
2147 def TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2),
2148 "test{b}\t{$src2, $src1|$src1, $src2}",
2149 [(X86cmp (and_su GR8:$src1, GR8:$src2), 0),
2150 (implicit EFLAGS)]>;
2151 def TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
2152 "test{w}\t{$src2, $src1|$src1, $src2}",
2153 [(X86cmp (and_su GR16:$src1, GR16:$src2), 0),
2154 (implicit EFLAGS)]>,
2156 def TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
2157 "test{l}\t{$src2, $src1|$src1, $src2}",
2158 [(X86cmp (and_su GR32:$src1, GR32:$src2), 0),
2159 (implicit EFLAGS)]>;
2162 def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
2163 "test{b}\t{$src2, $src1|$src1, $src2}",
2164 [(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0),
2165 (implicit EFLAGS)]>;
2166 def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
2167 "test{w}\t{$src2, $src1|$src1, $src2}",
2168 [(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0),
2169 (implicit EFLAGS)]>, OpSize;
2170 def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
2171 "test{l}\t{$src2, $src1|$src1, $src2}",
2172 [(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0),
2173 (implicit EFLAGS)]>;
2175 def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
2176 (outs), (ins GR8:$src1, i8imm:$src2),
2177 "test{b}\t{$src2, $src1|$src1, $src2}",
2178 [(X86cmp (and_su GR8:$src1, imm:$src2), 0),
2179 (implicit EFLAGS)]>;
2180 def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
2181 (outs), (ins GR16:$src1, i16imm:$src2),
2182 "test{w}\t{$src2, $src1|$src1, $src2}",
2183 [(X86cmp (and_su GR16:$src1, imm:$src2), 0),
2184 (implicit EFLAGS)]>, OpSize;
2185 def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
2186 (outs), (ins GR32:$src1, i32imm:$src2),
2187 "test{l}\t{$src2, $src1|$src1, $src2}",
2188 [(X86cmp (and_su GR32:$src1, imm:$src2), 0),
2189 (implicit EFLAGS)]>;
2191 def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
2192 (outs), (ins i8mem:$src1, i8imm:$src2),
2193 "test{b}\t{$src2, $src1|$src1, $src2}",
2194 [(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0),
2195 (implicit EFLAGS)]>;
2196 def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
2197 (outs), (ins i16mem:$src1, i16imm:$src2),
2198 "test{w}\t{$src2, $src1|$src1, $src2}",
2199 [(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0),
2200 (implicit EFLAGS)]>, OpSize;
2201 def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
2202 (outs), (ins i32mem:$src1, i32imm:$src2),
2203 "test{l}\t{$src2, $src1|$src1, $src2}",
2204 [(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0),
2205 (implicit EFLAGS)]>;
2206 } // Defs = [EFLAGS]
2209 // Condition code ops, incl. set if equal/not equal/...
2210 let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
2211 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
2212 let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
2213 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
2215 let Uses = [EFLAGS] in {
2216 def SETEr : I<0x94, MRM0r,
2217 (outs GR8 :$dst), (ins),
2219 [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
2221 def SETEm : I<0x94, MRM0m,
2222 (outs), (ins i8mem:$dst),
2224 [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
2226 def SETNEr : I<0x95, MRM0r,
2227 (outs GR8 :$dst), (ins),
2229 [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
2231 def SETNEm : I<0x95, MRM0m,
2232 (outs), (ins i8mem:$dst),
2234 [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
2236 def SETLr : I<0x9C, MRM0r,
2237 (outs GR8 :$dst), (ins),
2239 [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
2240 TB; // GR8 = < signed
2241 def SETLm : I<0x9C, MRM0m,
2242 (outs), (ins i8mem:$dst),
2244 [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
2245 TB; // [mem8] = < signed
2246 def SETGEr : I<0x9D, MRM0r,
2247 (outs GR8 :$dst), (ins),
2249 [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
2250 TB; // GR8 = >= signed
2251 def SETGEm : I<0x9D, MRM0m,
2252 (outs), (ins i8mem:$dst),
2254 [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
2255 TB; // [mem8] = >= signed
2256 def SETLEr : I<0x9E, MRM0r,
2257 (outs GR8 :$dst), (ins),
2259 [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
2260 TB; // GR8 = <= signed
2261 def SETLEm : I<0x9E, MRM0m,
2262 (outs), (ins i8mem:$dst),
2264 [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
2265 TB; // [mem8] = <= signed
2266 def SETGr : I<0x9F, MRM0r,
2267 (outs GR8 :$dst), (ins),
2269 [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
2270 TB; // GR8 = > signed
2271 def SETGm : I<0x9F, MRM0m,
2272 (outs), (ins i8mem:$dst),
2274 [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
2275 TB; // [mem8] = > signed
2277 def SETBr : I<0x92, MRM0r,
2278 (outs GR8 :$dst), (ins),
2280 [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
2281 TB; // GR8 = < unsign
2282 def SETBm : I<0x92, MRM0m,
2283 (outs), (ins i8mem:$dst),
2285 [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
2286 TB; // [mem8] = < unsign
2287 def SETAEr : I<0x93, MRM0r,
2288 (outs GR8 :$dst), (ins),
2290 [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
2291 TB; // GR8 = >= unsign
2292 def SETAEm : I<0x93, MRM0m,
2293 (outs), (ins i8mem:$dst),
2295 [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
2296 TB; // [mem8] = >= unsign
2297 def SETBEr : I<0x96, MRM0r,
2298 (outs GR8 :$dst), (ins),
2300 [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
2301 TB; // GR8 = <= unsign
2302 def SETBEm : I<0x96, MRM0m,
2303 (outs), (ins i8mem:$dst),
2305 [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
2306 TB; // [mem8] = <= unsign
2307 def SETAr : I<0x97, MRM0r,
2308 (outs GR8 :$dst), (ins),
2310 [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
2311 TB; // GR8 = > signed
2312 def SETAm : I<0x97, MRM0m,
2313 (outs), (ins i8mem:$dst),
2315 [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
2316 TB; // [mem8] = > signed
2318 def SETSr : I<0x98, MRM0r,
2319 (outs GR8 :$dst), (ins),
2321 [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
2322 TB; // GR8 = <sign bit>
2323 def SETSm : I<0x98, MRM0m,
2324 (outs), (ins i8mem:$dst),
2326 [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
2327 TB; // [mem8] = <sign bit>
2328 def SETNSr : I<0x99, MRM0r,
2329 (outs GR8 :$dst), (ins),
2331 [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
2332 TB; // GR8 = !<sign bit>
2333 def SETNSm : I<0x99, MRM0m,
2334 (outs), (ins i8mem:$dst),
2336 [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
2337 TB; // [mem8] = !<sign bit>
2338 def SETPr : I<0x9A, MRM0r,
2339 (outs GR8 :$dst), (ins),
2341 [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
2343 def SETPm : I<0x9A, MRM0m,
2344 (outs), (ins i8mem:$dst),
2346 [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
2347 TB; // [mem8] = parity
2348 def SETNPr : I<0x9B, MRM0r,
2349 (outs GR8 :$dst), (ins),
2351 [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
2352 TB; // GR8 = not parity
2353 def SETNPm : I<0x9B, MRM0m,
2354 (outs), (ins i8mem:$dst),
2356 [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
2357 TB; // [mem8] = not parity
2358 } // Uses = [EFLAGS]
2361 // Integer comparisons
2362 let Defs = [EFLAGS] in {
2363 def CMP8rr : I<0x38, MRMDestReg,
2364 (outs), (ins GR8 :$src1, GR8 :$src2),
2365 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2366 [(X86cmp GR8:$src1, GR8:$src2), (implicit EFLAGS)]>;
2367 def CMP16rr : I<0x39, MRMDestReg,
2368 (outs), (ins GR16:$src1, GR16:$src2),
2369 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2370 [(X86cmp GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize;
2371 def CMP32rr : I<0x39, MRMDestReg,
2372 (outs), (ins GR32:$src1, GR32:$src2),
2373 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2374 [(X86cmp GR32:$src1, GR32:$src2), (implicit EFLAGS)]>;
2375 def CMP8mr : I<0x38, MRMDestMem,
2376 (outs), (ins i8mem :$src1, GR8 :$src2),
2377 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2378 [(X86cmp (loadi8 addr:$src1), GR8:$src2),
2379 (implicit EFLAGS)]>;
2380 def CMP16mr : I<0x39, MRMDestMem,
2381 (outs), (ins i16mem:$src1, GR16:$src2),
2382 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2383 [(X86cmp (loadi16 addr:$src1), GR16:$src2),
2384 (implicit EFLAGS)]>, OpSize;
2385 def CMP32mr : I<0x39, MRMDestMem,
2386 (outs), (ins i32mem:$src1, GR32:$src2),
2387 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2388 [(X86cmp (loadi32 addr:$src1), GR32:$src2),
2389 (implicit EFLAGS)]>;
2390 def CMP8rm : I<0x3A, MRMSrcMem,
2391 (outs), (ins GR8 :$src1, i8mem :$src2),
2392 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2393 [(X86cmp GR8:$src1, (loadi8 addr:$src2)),
2394 (implicit EFLAGS)]>;
2395 def CMP16rm : I<0x3B, MRMSrcMem,
2396 (outs), (ins GR16:$src1, i16mem:$src2),
2397 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2398 [(X86cmp GR16:$src1, (loadi16 addr:$src2)),
2399 (implicit EFLAGS)]>, OpSize;
2400 def CMP32rm : I<0x3B, MRMSrcMem,
2401 (outs), (ins GR32:$src1, i32mem:$src2),
2402 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2403 [(X86cmp GR32:$src1, (loadi32 addr:$src2)),
2404 (implicit EFLAGS)]>;
2405 def CMP8ri : Ii8<0x80, MRM7r,
2406 (outs), (ins GR8:$src1, i8imm:$src2),
2407 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2408 [(X86cmp GR8:$src1, imm:$src2), (implicit EFLAGS)]>;
2409 def CMP16ri : Ii16<0x81, MRM7r,
2410 (outs), (ins GR16:$src1, i16imm:$src2),
2411 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2412 [(X86cmp GR16:$src1, imm:$src2),
2413 (implicit EFLAGS)]>, OpSize;
2414 def CMP32ri : Ii32<0x81, MRM7r,
2415 (outs), (ins GR32:$src1, i32imm:$src2),
2416 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2417 [(X86cmp GR32:$src1, imm:$src2), (implicit EFLAGS)]>;
2418 def CMP8mi : Ii8 <0x80, MRM7m,
2419 (outs), (ins i8mem :$src1, i8imm :$src2),
2420 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2421 [(X86cmp (loadi8 addr:$src1), imm:$src2),
2422 (implicit EFLAGS)]>;
2423 def CMP16mi : Ii16<0x81, MRM7m,
2424 (outs), (ins i16mem:$src1, i16imm:$src2),
2425 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2426 [(X86cmp (loadi16 addr:$src1), imm:$src2),
2427 (implicit EFLAGS)]>, OpSize;
2428 def CMP32mi : Ii32<0x81, MRM7m,
2429 (outs), (ins i32mem:$src1, i32imm:$src2),
2430 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2431 [(X86cmp (loadi32 addr:$src1), imm:$src2),
2432 (implicit EFLAGS)]>;
2433 def CMP16ri8 : Ii8<0x83, MRM7r,
2434 (outs), (ins GR16:$src1, i16i8imm:$src2),
2435 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2436 [(X86cmp GR16:$src1, i16immSExt8:$src2),
2437 (implicit EFLAGS)]>, OpSize;
2438 def CMP16mi8 : Ii8<0x83, MRM7m,
2439 (outs), (ins i16mem:$src1, i16i8imm:$src2),
2440 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2441 [(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2),
2442 (implicit EFLAGS)]>, OpSize;
2443 def CMP32mi8 : Ii8<0x83, MRM7m,
2444 (outs), (ins i32mem:$src1, i32i8imm:$src2),
2445 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2446 [(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2),
2447 (implicit EFLAGS)]>;
2448 def CMP32ri8 : Ii8<0x83, MRM7r,
2449 (outs), (ins GR32:$src1, i32i8imm:$src2),
2450 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2451 [(X86cmp GR32:$src1, i32immSExt8:$src2),
2452 (implicit EFLAGS)]>;
2453 } // Defs = [EFLAGS]
2455 // Sign/Zero extenders
2456 // Use movsbl intead of movsbw; we don't care about the high 16 bits
2457 // of the register here. This has a smaller encoding and avoids a
2458 // partial-register update.
2459 def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
2460 "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2461 [(set GR16:$dst, (sext GR8:$src))]>, TB;
2462 def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
2463 "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2464 [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
2465 def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
2466 "movs{bl|x}\t{$src, $dst|$dst, $src}",
2467 [(set GR32:$dst, (sext GR8:$src))]>, TB;
2468 def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
2469 "movs{bl|x}\t{$src, $dst|$dst, $src}",
2470 [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
2471 def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
2472 "movs{wl|x}\t{$src, $dst|$dst, $src}",
2473 [(set GR32:$dst, (sext GR16:$src))]>, TB;
2474 def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
2475 "movs{wl|x}\t{$src, $dst|$dst, $src}",
2476 [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
2478 // Use movzbl intead of movzbw; we don't care about the high 16 bits
2479 // of the register here. This has a smaller encoding and avoids a
2480 // partial-register update.
2481 def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
2482 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2483 [(set GR16:$dst, (zext GR8:$src))]>, TB;
2484 def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
2485 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2486 [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
2487 def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
2488 "movz{bl|x}\t{$src, $dst|$dst, $src}",
2489 [(set GR32:$dst, (zext GR8:$src))]>, TB;
2490 def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
2491 "movz{bl|x}\t{$src, $dst|$dst, $src}",
2492 [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
2493 def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
2494 "movz{wl|x}\t{$src, $dst|$dst, $src}",
2495 [(set GR32:$dst, (zext GR16:$src))]>, TB;
2496 def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
2497 "movz{wl|x}\t{$src, $dst|$dst, $src}",
2498 [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
2500 let neverHasSideEffects = 1 in {
2501 let Defs = [AX], Uses = [AL] in
2502 def CBW : I<0x98, RawFrm, (outs), (ins),
2503 "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
2504 let Defs = [EAX], Uses = [AX] in
2505 def CWDE : I<0x98, RawFrm, (outs), (ins),
2506 "{cwtl|cwde}", []>; // EAX = signext(AX)
2508 let Defs = [AX,DX], Uses = [AX] in
2509 def CWD : I<0x99, RawFrm, (outs), (ins),
2510 "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
2511 let Defs = [EAX,EDX], Uses = [EAX] in
2512 def CDQ : I<0x99, RawFrm, (outs), (ins),
2513 "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
2516 //===----------------------------------------------------------------------===//
2517 // Alias Instructions
2518 //===----------------------------------------------------------------------===//
2520 // Alias instructions that map movr0 to xor.
2521 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
2522 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1 in {
2523 def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins),
2524 "xor{b}\t$dst, $dst",
2525 [(set GR8:$dst, 0)]>;
2526 // Use xorl instead of xorw since we don't care about the high 16 bits,
2527 // it's smaller, and it avoids a partial-register update.
2528 def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
2529 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
2530 [(set GR16:$dst, 0)]>;
2531 def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins),
2532 "xor{l}\t$dst, $dst",
2533 [(set GR32:$dst, 0)]>;
2536 // Basic operations on GR16 / GR32 subclasses GR16_ and GR32_ which contains only
2537 // those registers that have GR8 sub-registers (i.e. AX - DX, EAX - EDX).
2538 let neverHasSideEffects = 1 in {
2539 def MOV16to16_ : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16:$src),
2540 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2541 def MOV32to32_ : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32:$src),
2542 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2544 def MOV16_rr : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16_:$src),
2545 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2546 def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src),
2547 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2548 } // neverHasSideEffects
2550 let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
2551 def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src),
2552 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2553 def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src),
2554 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2556 let mayStore = 1, neverHasSideEffects = 1 in {
2557 def MOV16_mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16_:$src),
2558 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2559 def MOV32_mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32_:$src),
2560 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2563 //===----------------------------------------------------------------------===//
2564 // Thread Local Storage Instructions
2568 def TLS_addr32 : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$sym),
2569 "leal\t${sym:mem}(,%ebx,1), $dst",
2570 [(set GR32:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
2572 let AddedComplexity = 10 in
2573 def TLS_gs_rr : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
2574 "movl\t%gs:($src), $dst",
2575 [(set GR32:$dst, (load (add X86TLStp, GR32:$src)))]>;
2577 let AddedComplexity = 15 in
2578 def TLS_gs_ri : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
2579 "movl\t%gs:${src:mem}, $dst",
2581 (load (add X86TLStp, (X86Wrapper tglobaltlsaddr:$src))))]>;
2583 def TLS_tp : I<0, Pseudo, (outs GR32:$dst), (ins),
2584 "movl\t%gs:0, $dst",
2585 [(set GR32:$dst, X86TLStp)]>;
2587 //===----------------------------------------------------------------------===//
2588 // DWARF Pseudo Instructions
2591 def DWARF_LOC : I<0, Pseudo, (outs),
2592 (ins i32imm:$line, i32imm:$col, i32imm:$file),
2593 ".loc\t${file:debug} ${line:debug} ${col:debug}",
2594 [(dwarf_loc (i32 imm:$line), (i32 imm:$col),
2597 //===----------------------------------------------------------------------===//
2598 // EH Pseudo Instructions
2600 let isTerminator = 1, isReturn = 1, isBarrier = 1,
2602 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
2603 "ret\t#eh_return, addr: $addr",
2604 [(X86ehret GR32:$addr)]>;
2608 //===----------------------------------------------------------------------===//
2612 // Atomic swap. These are just normal xchg instructions. But since a memory
2613 // operand is referenced, the atomicity is ensured.
2614 let Constraints = "$val = $dst" in {
2615 def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2616 "xchg{l}\t{$val, $ptr|$ptr, $val}",
2617 [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
2618 def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
2619 "xchg{w}\t{$val, $ptr|$ptr, $val}",
2620 [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
2622 def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
2623 "xchg{b}\t{$val, $ptr|$ptr, $val}",
2624 [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
2627 // Atomic compare and swap.
2628 let Defs = [EAX, EFLAGS], Uses = [EAX] in {
2629 def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
2630 "lock\n\tcmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
2631 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
2633 let Defs = [EAX, EBX, ECX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
2634 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i32mem:$ptr),
2635 "lock\n\tcmpxchg8b\t$ptr",
2636 [(X86cas8 addr:$ptr)]>, TB, LOCK;
2639 let Defs = [AX, EFLAGS], Uses = [AX] in {
2640 def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
2641 "lock\n\tcmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
2642 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
2644 let Defs = [AL, EFLAGS], Uses = [AL] in {
2645 def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
2646 "lock\n\tcmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
2647 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
2650 // Atomic exchange and add
2651 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
2652 def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2653 "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}",
2654 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
2656 def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
2657 "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}",
2658 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
2660 def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
2661 "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}",
2662 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
2666 // Atomic exchange, and, or, xor
2667 let Constraints = "$val = $dst", Defs = [EFLAGS],
2668 usesCustomDAGSchedInserter = 1 in {
2669 def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2670 "#ATOMAND32 PSUEDO!",
2671 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
2672 def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2673 "#ATOMOR32 PSUEDO!",
2674 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
2675 def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2676 "#ATOMXOR32 PSUEDO!",
2677 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
2678 def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2679 "#ATOMNAND32 PSUEDO!",
2680 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
2681 def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2682 "#ATOMMIN32 PSUEDO!",
2683 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
2684 def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2685 "#ATOMMAX32 PSUEDO!",
2686 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
2687 def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2688 "#ATOMUMIN32 PSUEDO!",
2689 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
2690 def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2691 "#ATOMUMAX32 PSUEDO!",
2692 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
2694 def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2695 "#ATOMAND16 PSUEDO!",
2696 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
2697 def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2698 "#ATOMOR16 PSUEDO!",
2699 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
2700 def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2701 "#ATOMXOR16 PSUEDO!",
2702 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
2703 def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2704 "#ATOMNAND16 PSUEDO!",
2705 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
2706 def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
2707 "#ATOMMIN16 PSUEDO!",
2708 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
2709 def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2710 "#ATOMMAX16 PSUEDO!",
2711 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
2712 def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2713 "#ATOMUMIN16 PSUEDO!",
2714 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
2715 def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
2716 "#ATOMUMAX16 PSUEDO!",
2717 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
2719 def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
2720 "#ATOMAND8 PSUEDO!",
2721 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
2722 def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
2724 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
2725 def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
2726 "#ATOMXOR8 PSUEDO!",
2727 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
2728 def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
2729 "#ATOMNAND8 PSUEDO!",
2730 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
2733 //===----------------------------------------------------------------------===//
2734 // Non-Instruction Patterns
2735 //===----------------------------------------------------------------------===//
2737 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
2738 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
2739 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
2740 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
2741 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
2742 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
2744 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
2745 (ADD32ri GR32:$src1, tconstpool:$src2)>;
2746 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
2747 (ADD32ri GR32:$src1, tjumptable:$src2)>;
2748 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
2749 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
2750 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
2751 (ADD32ri GR32:$src1, texternalsym:$src2)>;
2753 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
2754 (MOV32mi addr:$dst, tglobaladdr:$src)>;
2755 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
2756 (MOV32mi addr:$dst, texternalsym:$src)>;
2760 def : Pat<(X86tailcall GR32:$dst),
2763 def : Pat<(X86tailcall (i32 tglobaladdr:$dst)),
2765 def : Pat<(X86tailcall (i32 texternalsym:$dst)),
2768 def : Pat<(X86tcret GR32:$dst, imm:$off),
2769 (TCRETURNri GR32:$dst, imm:$off)>;
2771 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
2772 (TCRETURNdi texternalsym:$dst, imm:$off)>;
2774 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
2775 (TCRETURNdi texternalsym:$dst, imm:$off)>;
2777 def : Pat<(X86call (i32 tglobaladdr:$dst)),
2778 (CALLpcrel32 tglobaladdr:$dst)>;
2779 def : Pat<(X86call (i32 texternalsym:$dst)),
2780 (CALLpcrel32 texternalsym:$dst)>;
2782 // X86 specific add which produces a flag.
2783 def : Pat<(addc GR32:$src1, GR32:$src2),
2784 (ADD32rr GR32:$src1, GR32:$src2)>;
2785 def : Pat<(addc GR32:$src1, (load addr:$src2)),
2786 (ADD32rm GR32:$src1, addr:$src2)>;
2787 def : Pat<(addc GR32:$src1, imm:$src2),
2788 (ADD32ri GR32:$src1, imm:$src2)>;
2789 def : Pat<(addc GR32:$src1, i32immSExt8:$src2),
2790 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
2792 def : Pat<(subc GR32:$src1, GR32:$src2),
2793 (SUB32rr GR32:$src1, GR32:$src2)>;
2794 def : Pat<(subc GR32:$src1, (load addr:$src2)),
2795 (SUB32rm GR32:$src1, addr:$src2)>;
2796 def : Pat<(subc GR32:$src1, imm:$src2),
2797 (SUB32ri GR32:$src1, imm:$src2)>;
2798 def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
2799 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
2803 // TEST R,R is smaller than CMP R,0
2804 def : Pat<(parallel (X86cmp GR8:$src1, 0), (implicit EFLAGS)),
2805 (TEST8rr GR8:$src1, GR8:$src1)>;
2806 def : Pat<(parallel (X86cmp GR16:$src1, 0), (implicit EFLAGS)),
2807 (TEST16rr GR16:$src1, GR16:$src1)>;
2808 def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)),
2809 (TEST32rr GR32:$src1, GR32:$src1)>;
2811 // zextload bool -> zextload byte
2812 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
2813 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
2814 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
2816 // extload bool -> extload byte
2817 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
2818 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>,
2819 Requires<[In32BitMode]>;
2820 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
2821 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>,
2822 Requires<[In32BitMode]>;
2823 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
2824 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
2827 def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>,
2828 Requires<[In32BitMode]>;
2829 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>,
2830 Requires<[In32BitMode]>;
2831 def : Pat<(i32 (anyext GR16:$src)),
2832 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
2834 // (and (i32 load), 255) -> (zextload i8)
2835 def : Pat<(i32 (and (nvloadi32 addr:$src), (i32 255))),
2836 (MOVZX32rm8 addr:$src)>;
2837 def : Pat<(i32 (and (nvloadi32 addr:$src), (i32 65535))),
2838 (MOVZX32rm16 addr:$src)>;
2840 //===----------------------------------------------------------------------===//
2842 //===----------------------------------------------------------------------===//
2844 // r & (2^16-1) ==> movz
2845 def : Pat<(and GR32:$src1, 0xffff),
2846 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit)))>;
2847 // r & (2^8-1) ==> movz
2848 def : Pat<(and GR32:$src1, 0xff),
2849 (MOVZX32rr8 (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src1),
2850 x86_subreg_8bit)))>,
2851 Requires<[In32BitMode]>;
2852 // r & (2^8-1) ==> movz
2853 def : Pat<(and GR16:$src1, 0xff),
2854 (MOVZX16rr8 (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src1),
2855 x86_subreg_8bit)))>,
2856 Requires<[In32BitMode]>;
2858 // sext_inreg patterns
2859 def : Pat<(sext_inreg GR32:$src, i16),
2860 (MOVSX32rr16 (i16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)))>;
2861 def : Pat<(sext_inreg GR32:$src, i8),
2862 (MOVSX32rr8 (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src),
2863 x86_subreg_8bit)))>,
2864 Requires<[In32BitMode]>;
2865 def : Pat<(sext_inreg GR16:$src, i8),
2866 (MOVSX16rr8 (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src),
2867 x86_subreg_8bit)))>,
2868 Requires<[In32BitMode]>;
2871 def : Pat<(i16 (trunc GR32:$src)),
2872 (i16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
2873 def : Pat<(i8 (trunc GR32:$src)),
2874 (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src), x86_subreg_8bit))>,
2875 Requires<[In32BitMode]>;
2876 def : Pat<(i8 (trunc GR16:$src)),
2877 (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src), x86_subreg_8bit))>,
2878 Requires<[In32BitMode]>;
2880 // (shl x, 1) ==> (add x, x)
2881 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
2882 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
2883 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
2885 // (shl x (and y, 31)) ==> (shl x, y)
2886 def : Pat<(shl GR8:$src1, (and CL:$amt, 31)),
2887 (SHL8rCL GR8:$src1)>;
2888 def : Pat<(shl GR16:$src1, (and CL:$amt, 31)),
2889 (SHL16rCL GR16:$src1)>;
2890 def : Pat<(shl GR32:$src1, (and CL:$amt, 31)),
2891 (SHL32rCL GR32:$src1)>;
2892 def : Pat<(store (shl (loadi8 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2893 (SHL8mCL addr:$dst)>;
2894 def : Pat<(store (shl (loadi16 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2895 (SHL16mCL addr:$dst)>;
2896 def : Pat<(store (shl (loadi32 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2897 (SHL32mCL addr:$dst)>;
2899 def : Pat<(srl GR8:$src1, (and CL:$amt, 31)),
2900 (SHR8rCL GR8:$src1)>;
2901 def : Pat<(srl GR16:$src1, (and CL:$amt, 31)),
2902 (SHR16rCL GR16:$src1)>;
2903 def : Pat<(srl GR32:$src1, (and CL:$amt, 31)),
2904 (SHR32rCL GR32:$src1)>;
2905 def : Pat<(store (srl (loadi8 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2906 (SHR8mCL addr:$dst)>;
2907 def : Pat<(store (srl (loadi16 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2908 (SHR16mCL addr:$dst)>;
2909 def : Pat<(store (srl (loadi32 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2910 (SHR32mCL addr:$dst)>;
2912 def : Pat<(sra GR8:$src1, (and CL:$amt, 31)),
2913 (SAR8rCL GR8:$src1)>;
2914 def : Pat<(sra GR16:$src1, (and CL:$amt, 31)),
2915 (SAR16rCL GR16:$src1)>;
2916 def : Pat<(sra GR32:$src1, (and CL:$amt, 31)),
2917 (SAR32rCL GR32:$src1)>;
2918 def : Pat<(store (sra (loadi8 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2919 (SAR8mCL addr:$dst)>;
2920 def : Pat<(store (sra (loadi16 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2921 (SAR16mCL addr:$dst)>;
2922 def : Pat<(store (sra (loadi32 addr:$dst), (and CL:$amt, 31)), addr:$dst),
2923 (SAR32mCL addr:$dst)>;
2925 // (or (x >> c) | (y << (32 - c))) ==> (shrd32 x, y, c)
2926 def : Pat<(or (srl GR32:$src1, CL:$amt),
2927 (shl GR32:$src2, (sub 32, CL:$amt))),
2928 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
2930 def : Pat<(store (or (srl (loadi32 addr:$dst), CL:$amt),
2931 (shl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
2932 (SHRD32mrCL addr:$dst, GR32:$src2)>;
2934 // (or (x << c) | (y >> (32 - c))) ==> (shld32 x, y, c)
2935 def : Pat<(or (shl GR32:$src1, CL:$amt),
2936 (srl GR32:$src2, (sub 32, CL:$amt))),
2937 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
2939 def : Pat<(store (or (shl (loadi32 addr:$dst), CL:$amt),
2940 (srl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
2941 (SHLD32mrCL addr:$dst, GR32:$src2)>;
2943 // (or (x >> c) | (y << (16 - c))) ==> (shrd16 x, y, c)
2944 def : Pat<(or (srl GR16:$src1, CL:$amt),
2945 (shl GR16:$src2, (sub 16, CL:$amt))),
2946 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
2948 def : Pat<(store (or (srl (loadi16 addr:$dst), CL:$amt),
2949 (shl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
2950 (SHRD16mrCL addr:$dst, GR16:$src2)>;
2952 // (or (x << c) | (y >> (16 - c))) ==> (shld16 x, y, c)
2953 def : Pat<(or (shl GR16:$src1, CL:$amt),
2954 (srl GR16:$src2, (sub 16, CL:$amt))),
2955 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
2957 def : Pat<(store (or (shl (loadi16 addr:$dst), CL:$amt),
2958 (srl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
2959 (SHLD16mrCL addr:$dst, GR16:$src2)>;
2961 //===----------------------------------------------------------------------===//
2962 // Floating Point Stack Support
2963 //===----------------------------------------------------------------------===//
2965 include "X86InstrFPStack.td"
2967 //===----------------------------------------------------------------------===//
2969 //===----------------------------------------------------------------------===//
2971 include "X86Instr64bit.td"
2973 //===----------------------------------------------------------------------===//
2974 // XMM Floating point support (requires SSE / SSE2)
2975 //===----------------------------------------------------------------------===//
2977 include "X86InstrSSE.td"
2979 //===----------------------------------------------------------------------===//
2980 // MMX and XMM Packed Integer support (requires MMX, SSE, and SSE2)
2981 //===----------------------------------------------------------------------===//
2983 include "X86InstrMMX.td"