add a multiclass for cmov's, but don't start using it yet.
[oota-llvm.git] / lib / Target / X86 / X86InstrCMovSetCC.td
1 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
2 // 
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 // 
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the X86 conditional move and set on condition
11 // instructions.
12 //
13 //===----------------------------------------------------------------------===//
14
15
16 // SetCC instructions.
17 multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
18   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
19       isCommutable = 1 in {
20     def rr16 : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
21                  !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
22                  [(set GR16:$dst,
23                        (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,
24                TB, OpSize;
25     def rr32 : I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
26                  !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
27                  [(set GR32:$dst,
28                        (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>,
29                TB;
30     def rr64 :RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
31                  !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
32                  [(set GR64:$dst,
33                        (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>,
34               TB;
35   }
36
37   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in {
38   def rm16 : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
39                !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
40                [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
41                                          CondNode, EFLAGS))]>, TB, OpSize;
42   def rm32 : I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
43                !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
44                [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
45                                          CondNode, EFLAGS))]>, TB;
46   def rm64 :RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
47                !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
48                [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
49                                          CondNode, EFLAGS))]>, TB;
50   } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
51 } // end multiclass
52
53 //defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
54
55
56 let Constraints = "$src1 = $dst" in {
57
58 // Conditional moves
59 let Uses = [EFLAGS] in {
60
61 let Predicates = [HasCMov] in {
62 let isCommutable = 1 in {
63 def CMOVB16rr : I<0x42, MRMSrcReg,       // if <u, GR16 = GR16
64                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
65                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
66                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
67                                    X86_COND_B, EFLAGS))]>,
68                   TB, OpSize;
69 def CMOVB32rr : I<0x42, MRMSrcReg,       // if <u, GR32 = GR32
70                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
71                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
72                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
73                                    X86_COND_B, EFLAGS))]>,
74                    TB;
75 def CMOVAE16rr: I<0x43, MRMSrcReg,       // if >=u, GR16 = GR16
76                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
77                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
78                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
79                                    X86_COND_AE, EFLAGS))]>,
80                    TB, OpSize;
81 def CMOVAE32rr: I<0x43, MRMSrcReg,       // if >=u, GR32 = GR32
82                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
83                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
84                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
85                                    X86_COND_AE, EFLAGS))]>,
86                    TB;
87 def CMOVE16rr : I<0x44, MRMSrcReg,       // if ==, GR16 = GR16
88                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
89                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
90                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
91                                    X86_COND_E, EFLAGS))]>,
92                    TB, OpSize;
93 def CMOVE32rr : I<0x44, MRMSrcReg,       // if ==, GR32 = GR32
94                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
95                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
96                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
97                                    X86_COND_E, EFLAGS))]>,
98                    TB;
99 def CMOVNE16rr: I<0x45, MRMSrcReg,       // if !=, GR16 = GR16
100                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
101                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
102                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
103                                    X86_COND_NE, EFLAGS))]>,
104                    TB, OpSize;
105 def CMOVNE32rr: I<0x45, MRMSrcReg,       // if !=, GR32 = GR32
106                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
107                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
108                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
109                                    X86_COND_NE, EFLAGS))]>,
110                    TB;
111 def CMOVBE16rr: I<0x46, MRMSrcReg,       // if <=u, GR16 = GR16
112                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
113                   "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
114                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
115                                    X86_COND_BE, EFLAGS))]>,
116                    TB, OpSize;
117 def CMOVBE32rr: I<0x46, MRMSrcReg,       // if <=u, GR32 = GR32
118                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
119                   "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
120                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
121                                    X86_COND_BE, EFLAGS))]>,
122                    TB;
123 def CMOVA16rr : I<0x47, MRMSrcReg,       // if >u, GR16 = GR16
124                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
125                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
126                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
127                                    X86_COND_A, EFLAGS))]>,
128                    TB, OpSize;
129 def CMOVA32rr : I<0x47, MRMSrcReg,       // if >u, GR32 = GR32
130                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
131                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
132                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
133                                    X86_COND_A, EFLAGS))]>,
134                    TB;
135 def CMOVL16rr : I<0x4C, MRMSrcReg,       // if <s, GR16 = GR16
136                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
137                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
138                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
139                                    X86_COND_L, EFLAGS))]>,
140                    TB, OpSize;
141 def CMOVL32rr : I<0x4C, MRMSrcReg,       // if <s, GR32 = GR32
142                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
143                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
144                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
145                                    X86_COND_L, EFLAGS))]>,
146                    TB;
147 def CMOVGE16rr: I<0x4D, MRMSrcReg,       // if >=s, GR16 = GR16
148                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
149                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
150                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
151                                    X86_COND_GE, EFLAGS))]>,
152                    TB, OpSize;
153 def CMOVGE32rr: I<0x4D, MRMSrcReg,       // if >=s, GR32 = GR32
154                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
155                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
156                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
157                                    X86_COND_GE, EFLAGS))]>,
158                    TB;
159 def CMOVLE16rr: I<0x4E, MRMSrcReg,       // if <=s, GR16 = GR16
160                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
161                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
162                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
163                                    X86_COND_LE, EFLAGS))]>,
164                    TB, OpSize;
165 def CMOVLE32rr: I<0x4E, MRMSrcReg,       // if <=s, GR32 = GR32
166                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
167                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
168                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
169                                    X86_COND_LE, EFLAGS))]>,
170                    TB;
171 def CMOVG16rr : I<0x4F, MRMSrcReg,       // if >s, GR16 = GR16
172                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
173                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
174                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
175                                    X86_COND_G, EFLAGS))]>,
176                    TB, OpSize;
177 def CMOVG32rr : I<0x4F, MRMSrcReg,       // if >s, GR32 = GR32
178                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
179                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
180                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
181                                    X86_COND_G, EFLAGS))]>,
182                    TB;
183 def CMOVS16rr : I<0x48, MRMSrcReg,       // if signed, GR16 = GR16
184                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
185                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
186                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
187                                    X86_COND_S, EFLAGS))]>,
188                   TB, OpSize;
189 def CMOVS32rr : I<0x48, MRMSrcReg,       // if signed, GR32 = GR32
190                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
191                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
192                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
193                                    X86_COND_S, EFLAGS))]>,
194                   TB;
195 def CMOVNS16rr: I<0x49, MRMSrcReg,       // if !signed, GR16 = GR16
196                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
197                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
198                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
199                                    X86_COND_NS, EFLAGS))]>,
200                   TB, OpSize;
201 def CMOVNS32rr: I<0x49, MRMSrcReg,       // if !signed, GR32 = GR32
202                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
203                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
204                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
205                                    X86_COND_NS, EFLAGS))]>,
206                   TB;
207 def CMOVP16rr : I<0x4A, MRMSrcReg,       // if parity, GR16 = GR16
208                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
209                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
210                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
211                                    X86_COND_P, EFLAGS))]>,
212                   TB, OpSize;
213 def CMOVP32rr : I<0x4A, MRMSrcReg,       // if parity, GR32 = GR32
214                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
215                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
216                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
217                                    X86_COND_P, EFLAGS))]>,
218                   TB;
219 def CMOVNP16rr : I<0x4B, MRMSrcReg,       // if !parity, GR16 = GR16
220                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
221                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
222                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
223                                     X86_COND_NP, EFLAGS))]>,
224                   TB, OpSize;
225 def CMOVNP32rr : I<0x4B, MRMSrcReg,       // if !parity, GR32 = GR32
226                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
227                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
228                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
229                                     X86_COND_NP, EFLAGS))]>,
230                   TB;
231 def CMOVO16rr : I<0x40, MRMSrcReg,       // if overflow, GR16 = GR16
232                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
233                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
234                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
235                                    X86_COND_O, EFLAGS))]>,
236                   TB, OpSize;
237 def CMOVO32rr : I<0x40, MRMSrcReg,       // if overflow, GR32 = GR32
238                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
239                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
240                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
241                                    X86_COND_O, EFLAGS))]>,
242                   TB;
243 def CMOVNO16rr : I<0x41, MRMSrcReg,       // if !overflow, GR16 = GR16
244                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
245                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
246                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
247                                     X86_COND_NO, EFLAGS))]>,
248                   TB, OpSize;
249 def CMOVNO32rr : I<0x41, MRMSrcReg,       // if !overflow, GR32 = GR32
250                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
251                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
252                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
253                                     X86_COND_NO, EFLAGS))]>,
254                   TB;
255 } // isCommutable = 1
256
257 def CMOVB16rm : I<0x42, MRMSrcMem,       // if <u, GR16 = [mem16]
258                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
259                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
260                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
261                                    X86_COND_B, EFLAGS))]>,
262                   TB, OpSize;
263 def CMOVB32rm : I<0x42, MRMSrcMem,       // if <u, GR32 = [mem32]
264                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
265                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
266                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
267                                    X86_COND_B, EFLAGS))]>,
268                    TB;
269 def CMOVAE16rm: I<0x43, MRMSrcMem,       // if >=u, GR16 = [mem16]
270                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
271                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
272                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
273                                    X86_COND_AE, EFLAGS))]>,
274                    TB, OpSize;
275 def CMOVAE32rm: I<0x43, MRMSrcMem,       // if >=u, GR32 = [mem32]
276                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
277                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
278                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
279                                    X86_COND_AE, EFLAGS))]>,
280                    TB;
281 def CMOVE16rm : I<0x44, MRMSrcMem,       // if ==, GR16 = [mem16]
282                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
283                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
284                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
285                                    X86_COND_E, EFLAGS))]>,
286                    TB, OpSize;
287 def CMOVE32rm : I<0x44, MRMSrcMem,       // if ==, GR32 = [mem32]
288                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
289                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
290                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
291                                    X86_COND_E, EFLAGS))]>,
292                    TB;
293 def CMOVNE16rm: I<0x45, MRMSrcMem,       // if !=, GR16 = [mem16]
294                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
295                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
296                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
297                                    X86_COND_NE, EFLAGS))]>,
298                    TB, OpSize;
299 def CMOVNE32rm: I<0x45, MRMSrcMem,       // if !=, GR32 = [mem32]
300                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
301                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
302                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
303                                    X86_COND_NE, EFLAGS))]>,
304                    TB;
305 def CMOVBE16rm: I<0x46, MRMSrcMem,       // if <=u, GR16 = [mem16]
306                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
307                   "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
308                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
309                                    X86_COND_BE, EFLAGS))]>,
310                    TB, OpSize;
311 def CMOVBE32rm: I<0x46, MRMSrcMem,       // if <=u, GR32 = [mem32]
312                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
313                   "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
314                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
315                                    X86_COND_BE, EFLAGS))]>,
316                    TB;
317 def CMOVA16rm : I<0x47, MRMSrcMem,       // if >u, GR16 = [mem16]
318                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
319                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
320                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
321                                    X86_COND_A, EFLAGS))]>,
322                    TB, OpSize;
323 def CMOVA32rm : I<0x47, MRMSrcMem,       // if >u, GR32 = [mem32]
324                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
325                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
326                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
327                                    X86_COND_A, EFLAGS))]>,
328                    TB;
329 def CMOVL16rm : I<0x4C, MRMSrcMem,       // if <s, GR16 = [mem16]
330                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
331                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
332                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
333                                    X86_COND_L, EFLAGS))]>,
334                    TB, OpSize;
335 def CMOVL32rm : I<0x4C, MRMSrcMem,       // if <s, GR32 = [mem32]
336                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
337                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
338                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
339                                    X86_COND_L, EFLAGS))]>,
340                    TB;
341 def CMOVGE16rm: I<0x4D, MRMSrcMem,       // if >=s, GR16 = [mem16]
342                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
343                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
344                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
345                                    X86_COND_GE, EFLAGS))]>,
346                    TB, OpSize;
347 def CMOVGE32rm: I<0x4D, MRMSrcMem,       // if >=s, GR32 = [mem32]
348                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
349                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
350                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
351                                    X86_COND_GE, EFLAGS))]>,
352                    TB;
353 def CMOVLE16rm: I<0x4E, MRMSrcMem,       // if <=s, GR16 = [mem16]
354                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
355                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
356                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
357                                    X86_COND_LE, EFLAGS))]>,
358                    TB, OpSize;
359 def CMOVLE32rm: I<0x4E, MRMSrcMem,       // if <=s, GR32 = [mem32]
360                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
361                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
362                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
363                                    X86_COND_LE, EFLAGS))]>,
364                    TB;
365 def CMOVG16rm : I<0x4F, MRMSrcMem,       // if >s, GR16 = [mem16]
366                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
367                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
368                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
369                                    X86_COND_G, EFLAGS))]>,
370                    TB, OpSize;
371 def CMOVG32rm : I<0x4F, MRMSrcMem,       // if >s, GR32 = [mem32]
372                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
373                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
374                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
375                                    X86_COND_G, EFLAGS))]>,
376                    TB;
377 def CMOVS16rm : I<0x48, MRMSrcMem,       // if signed, GR16 = [mem16]
378                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
379                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
380                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
381                                    X86_COND_S, EFLAGS))]>,
382                   TB, OpSize;
383 def CMOVS32rm : I<0x48, MRMSrcMem,       // if signed, GR32 = [mem32]
384                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
385                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
386                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
387                                    X86_COND_S, EFLAGS))]>,
388                   TB;
389 def CMOVNS16rm: I<0x49, MRMSrcMem,       // if !signed, GR16 = [mem16]
390                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
391                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
392                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
393                                    X86_COND_NS, EFLAGS))]>,
394                   TB, OpSize;
395 def CMOVNS32rm: I<0x49, MRMSrcMem,       // if !signed, GR32 = [mem32]
396                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
397                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
398                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
399                                    X86_COND_NS, EFLAGS))]>,
400                   TB;
401 def CMOVP16rm : I<0x4A, MRMSrcMem,       // if parity, GR16 = [mem16]
402                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
403                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
404                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
405                                    X86_COND_P, EFLAGS))]>,
406                   TB, OpSize;
407 def CMOVP32rm : I<0x4A, MRMSrcMem,       // if parity, GR32 = [mem32]
408                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
409                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
410                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
411                                    X86_COND_P, EFLAGS))]>,
412                   TB;
413 def CMOVNP16rm : I<0x4B, MRMSrcMem,       // if !parity, GR16 = [mem16]
414                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
415                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
416                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
417                                     X86_COND_NP, EFLAGS))]>,
418                   TB, OpSize;
419 def CMOVNP32rm : I<0x4B, MRMSrcMem,       // if !parity, GR32 = [mem32]
420                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
421                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
422                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
423                                     X86_COND_NP, EFLAGS))]>,
424                   TB;
425 def CMOVO16rm : I<0x40, MRMSrcMem,       // if overflow, GR16 = [mem16]
426                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
427                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
428                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
429                                    X86_COND_O, EFLAGS))]>,
430                   TB, OpSize;
431 def CMOVO32rm : I<0x40, MRMSrcMem,       // if overflow, GR32 = [mem32]
432                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
433                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
434                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
435                                    X86_COND_O, EFLAGS))]>,
436                   TB;
437 def CMOVNO16rm : I<0x41, MRMSrcMem,       // if !overflow, GR16 = [mem16]
438                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
439                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
440                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
441                                     X86_COND_NO, EFLAGS))]>,
442                   TB, OpSize;
443 def CMOVNO32rm : I<0x41, MRMSrcMem,       // if !overflow, GR32 = [mem32]
444                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
445                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
446                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
447                                     X86_COND_NO, EFLAGS))]>,
448                   TB;
449 } // Predicates = [HasCMov]
450
451 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
452 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
453 // however that requires promoting the operands, and can induce additional
454 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
455 // clobber EFLAGS, because if one of the operands is zero, the expansion
456 // could involve an xor.
457 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
458 def CMOV_GR8 : I<0, Pseudo,
459                  (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
460                  "#CMOV_GR8 PSEUDO!",
461                  [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
462                                           imm:$cond, EFLAGS))]>;
463
464 let Predicates = [NoCMov] in {
465 def CMOV_GR32 : I<0, Pseudo,
466                     (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
467                     "#CMOV_GR32* PSEUDO!",
468                     [(set GR32:$dst,
469                       (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
470 def CMOV_GR16 : I<0, Pseudo,
471                     (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
472                     "#CMOV_GR16* PSEUDO!",
473                     [(set GR16:$dst,
474                       (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
475 def CMOV_RFP32 : I<0, Pseudo,
476                     (outs RFP32:$dst),
477                     (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
478                     "#CMOV_RFP32 PSEUDO!",
479                     [(set RFP32:$dst,
480                       (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
481                                                   EFLAGS))]>;
482 def CMOV_RFP64 : I<0, Pseudo,
483                     (outs RFP64:$dst),
484                     (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
485                     "#CMOV_RFP64 PSEUDO!",
486                     [(set RFP64:$dst,
487                       (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
488                                                   EFLAGS))]>;
489 def CMOV_RFP80 : I<0, Pseudo,
490                     (outs RFP80:$dst),
491                     (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
492                     "#CMOV_RFP80 PSEUDO!",
493                     [(set RFP80:$dst,
494                       (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
495                                                   EFLAGS))]>;
496 } // Predicates = [NoCMov]
497 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] 
498 } // Uses = [EFLAGS]
499
500 } // Constraints = "$src1 = $dst" in
501
502
503 // Conditional moves
504 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
505 let isCommutable = 1 in {
506 def CMOVB64rr : RI<0x42, MRMSrcReg,       // if <u, GR64 = GR64
507                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
508                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
509                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
510                                      X86_COND_B, EFLAGS))]>, TB;
511 def CMOVAE64rr: RI<0x43, MRMSrcReg,       // if >=u, GR64 = GR64
512                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
513                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
514                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
515                                      X86_COND_AE, EFLAGS))]>, TB;
516 def CMOVE64rr : RI<0x44, MRMSrcReg,       // if ==, GR64 = GR64
517                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
518                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
519                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
520                                      X86_COND_E, EFLAGS))]>, TB;
521 def CMOVNE64rr: RI<0x45, MRMSrcReg,       // if !=, GR64 = GR64
522                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
523                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
524                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
525                                     X86_COND_NE, EFLAGS))]>, TB;
526 def CMOVBE64rr: RI<0x46, MRMSrcReg,       // if <=u, GR64 = GR64
527                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
528                    "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
529                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
530                                     X86_COND_BE, EFLAGS))]>, TB;
531 def CMOVA64rr : RI<0x47, MRMSrcReg,       // if >u, GR64 = GR64
532                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
533                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
534                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
535                                     X86_COND_A, EFLAGS))]>, TB;
536 def CMOVL64rr : RI<0x4C, MRMSrcReg,       // if <s, GR64 = GR64
537                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
538                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
539                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
540                                     X86_COND_L, EFLAGS))]>, TB;
541 def CMOVGE64rr: RI<0x4D, MRMSrcReg,       // if >=s, GR64 = GR64
542                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
543                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
544                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
545                                     X86_COND_GE, EFLAGS))]>, TB;
546 def CMOVLE64rr: RI<0x4E, MRMSrcReg,       // if <=s, GR64 = GR64
547                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
548                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
549                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
550                                     X86_COND_LE, EFLAGS))]>, TB;
551 def CMOVG64rr : RI<0x4F, MRMSrcReg,       // if >s, GR64 = GR64
552                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
553                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
554                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
555                                     X86_COND_G, EFLAGS))]>, TB;
556 def CMOVS64rr : RI<0x48, MRMSrcReg,       // if signed, GR64 = GR64
557                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
558                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
559                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
560                                     X86_COND_S, EFLAGS))]>, TB;
561 def CMOVNS64rr: RI<0x49, MRMSrcReg,       // if !signed, GR64 = GR64
562                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
563                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
564                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
565                                     X86_COND_NS, EFLAGS))]>, TB;
566 def CMOVP64rr : RI<0x4A, MRMSrcReg,       // if parity, GR64 = GR64
567                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
568                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
569                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
570                                     X86_COND_P, EFLAGS))]>, TB;
571 def CMOVNP64rr : RI<0x4B, MRMSrcReg,       // if !parity, GR64 = GR64
572                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
573                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
574                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
575                                      X86_COND_NP, EFLAGS))]>, TB;
576 def CMOVO64rr : RI<0x40, MRMSrcReg,       // if overflow, GR64 = GR64
577                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
578                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
579                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
580                                     X86_COND_O, EFLAGS))]>, TB;
581 def CMOVNO64rr : RI<0x41, MRMSrcReg,       // if !overflow, GR64 = GR64
582                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
583                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
584                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
585                                      X86_COND_NO, EFLAGS))]>, TB;
586 } // isCommutable = 1
587
588 def CMOVB64rm : RI<0x42, MRMSrcMem,       // if <u, GR64 = [mem64]
589                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
590                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
591                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
592                                      X86_COND_B, EFLAGS))]>, TB;
593 def CMOVAE64rm: RI<0x43, MRMSrcMem,       // if >=u, GR64 = [mem64]
594                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
595                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
596                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
597                                      X86_COND_AE, EFLAGS))]>, TB;
598 def CMOVE64rm : RI<0x44, MRMSrcMem,       // if ==, GR64 = [mem64]
599                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
600                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
601                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
602                                      X86_COND_E, EFLAGS))]>, TB;
603 def CMOVNE64rm: RI<0x45, MRMSrcMem,       // if !=, GR64 = [mem64]
604                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
605                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
606                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
607                                     X86_COND_NE, EFLAGS))]>, TB;
608 def CMOVBE64rm: RI<0x46, MRMSrcMem,       // if <=u, GR64 = [mem64]
609                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
610                    "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
611                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
612                                     X86_COND_BE, EFLAGS))]>, TB;
613 def CMOVA64rm : RI<0x47, MRMSrcMem,       // if >u, GR64 = [mem64]
614                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
615                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
616                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
617                                     X86_COND_A, EFLAGS))]>, TB;
618 def CMOVL64rm : RI<0x4C, MRMSrcMem,       // if <s, GR64 = [mem64]
619                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
620                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
621                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
622                                     X86_COND_L, EFLAGS))]>, TB;
623 def CMOVGE64rm: RI<0x4D, MRMSrcMem,       // if >=s, GR64 = [mem64]
624                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
625                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
626                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
627                                     X86_COND_GE, EFLAGS))]>, TB;
628 def CMOVLE64rm: RI<0x4E, MRMSrcMem,       // if <=s, GR64 = [mem64]
629                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
630                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
631                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
632                                     X86_COND_LE, EFLAGS))]>, TB;
633 def CMOVG64rm : RI<0x4F, MRMSrcMem,       // if >s, GR64 = [mem64]
634                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
635                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
636                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
637                                     X86_COND_G, EFLAGS))]>, TB;
638 def CMOVS64rm : RI<0x48, MRMSrcMem,       // if signed, GR64 = [mem64]
639                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
640                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
641                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
642                                     X86_COND_S, EFLAGS))]>, TB;
643 def CMOVNS64rm: RI<0x49, MRMSrcMem,       // if !signed, GR64 = [mem64]
644                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
645                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
646                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
647                                     X86_COND_NS, EFLAGS))]>, TB;
648 def CMOVP64rm : RI<0x4A, MRMSrcMem,       // if parity, GR64 = [mem64]
649                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
650                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
651                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
652                                     X86_COND_P, EFLAGS))]>, TB;
653 def CMOVNP64rm : RI<0x4B, MRMSrcMem,       // if !parity, GR64 = [mem64]
654                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
655                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
656                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
657                                      X86_COND_NP, EFLAGS))]>, TB;
658 def CMOVO64rm : RI<0x40, MRMSrcMem,       // if overflow, GR64 = [mem64]
659                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
660                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
661                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
662                                     X86_COND_O, EFLAGS))]>, TB;
663 def CMOVNO64rm : RI<0x41, MRMSrcMem,       // if !overflow, GR64 = [mem64]
664                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
665                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
666                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
667                                      X86_COND_NO, EFLAGS))]>, TB;
668 } // Constraints = "$src1 = $dst"
669
670
671 // SetCC instructions.
672 multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
673   let Uses = [EFLAGS] in {
674     def r    : I<opc, MRM0r,  (outs GR8:$dst), (ins),
675                      !strconcat(Mnemonic, "\t$dst"),
676                      [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
677     def m    : I<opc, MRM0m,  (outs), (ins i8mem:$dst),
678                      !strconcat(Mnemonic, "\t$dst"),
679                      [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
680   } // Uses = [EFLAGS]
681 }
682
683 defm SETO  : SETCC<0x90, "seto",  X86_COND_O>;   // is overflow bit set
684 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>;  // is overflow bit not set
685 defm SETB  : SETCC<0x92, "setb",  X86_COND_B>;   // unsigned less than
686 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>;  // unsigned greater or equal
687 defm SETE  : SETCC<0x94, "sete",  X86_COND_E>;   // equal to
688 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>;  // not equal to
689 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>;  // unsigned less than or equal
690 defm SETA  : SETCC<0x97, "seta",  X86_COND_A>;   // unsigned greater than
691 defm SETS  : SETCC<0x98, "sets",  X86_COND_S>;   // is signed bit set
692 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>;  // is not signed
693 defm SETP  : SETCC<0x9A, "setp",  X86_COND_P>;   // is parity bit set
694 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>;  // is parity bit not set
695 defm SETL  : SETCC<0x9C, "setl",  X86_COND_L>;   // signed less than
696 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>;  // signed greater or equal
697 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>;  // signed less than or equal
698 defm SETG  : SETCC<0x9F, "setg",  X86_COND_G>;   // signed greater than
699