use a multipattern to define setcc instructions:
[oota-llvm.git] / lib / Target / X86 / X86InstrCMovSetCC.td
1 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
2 // 
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 // 
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the X86 conditional move and set on condition
11 // instructions.
12 //
13 //===----------------------------------------------------------------------===//
14
15 // FIXME: Someone please sprinkle some defm's in here!
16
17 let Constraints = "$src1 = $dst" in {
18
19 // Conditional moves
20 let Uses = [EFLAGS] in {
21
22 let Predicates = [HasCMov] in {
23 let isCommutable = 1 in {
24 def CMOVB16rr : I<0x42, MRMSrcReg,       // if <u, GR16 = GR16
25                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
26                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
27                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
28                                    X86_COND_B, EFLAGS))]>,
29                   TB, OpSize;
30 def CMOVB32rr : I<0x42, MRMSrcReg,       // if <u, GR32 = GR32
31                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
32                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
33                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
34                                    X86_COND_B, EFLAGS))]>,
35                    TB;
36 def CMOVAE16rr: I<0x43, MRMSrcReg,       // if >=u, GR16 = GR16
37                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
38                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
39                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
40                                    X86_COND_AE, EFLAGS))]>,
41                    TB, OpSize;
42 def CMOVAE32rr: I<0x43, MRMSrcReg,       // if >=u, GR32 = GR32
43                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
44                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
45                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
46                                    X86_COND_AE, EFLAGS))]>,
47                    TB;
48 def CMOVE16rr : I<0x44, MRMSrcReg,       // if ==, GR16 = GR16
49                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
50                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
51                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
52                                    X86_COND_E, EFLAGS))]>,
53                    TB, OpSize;
54 def CMOVE32rr : I<0x44, MRMSrcReg,       // if ==, GR32 = GR32
55                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
56                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
57                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
58                                    X86_COND_E, EFLAGS))]>,
59                    TB;
60 def CMOVNE16rr: I<0x45, MRMSrcReg,       // if !=, GR16 = GR16
61                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
62                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
63                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
64                                    X86_COND_NE, EFLAGS))]>,
65                    TB, OpSize;
66 def CMOVNE32rr: I<0x45, MRMSrcReg,       // if !=, GR32 = GR32
67                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
68                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
69                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
70                                    X86_COND_NE, EFLAGS))]>,
71                    TB;
72 def CMOVBE16rr: I<0x46, MRMSrcReg,       // if <=u, GR16 = GR16
73                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
74                   "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
75                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
76                                    X86_COND_BE, EFLAGS))]>,
77                    TB, OpSize;
78 def CMOVBE32rr: I<0x46, MRMSrcReg,       // if <=u, GR32 = GR32
79                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
80                   "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
81                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
82                                    X86_COND_BE, EFLAGS))]>,
83                    TB;
84 def CMOVA16rr : I<0x47, MRMSrcReg,       // if >u, GR16 = GR16
85                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
86                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
87                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
88                                    X86_COND_A, EFLAGS))]>,
89                    TB, OpSize;
90 def CMOVA32rr : I<0x47, MRMSrcReg,       // if >u, GR32 = GR32
91                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
92                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
93                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
94                                    X86_COND_A, EFLAGS))]>,
95                    TB;
96 def CMOVL16rr : I<0x4C, MRMSrcReg,       // if <s, GR16 = GR16
97                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
98                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
99                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
100                                    X86_COND_L, EFLAGS))]>,
101                    TB, OpSize;
102 def CMOVL32rr : I<0x4C, MRMSrcReg,       // if <s, GR32 = GR32
103                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
104                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
105                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
106                                    X86_COND_L, EFLAGS))]>,
107                    TB;
108 def CMOVGE16rr: I<0x4D, MRMSrcReg,       // if >=s, GR16 = GR16
109                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
110                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
111                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
112                                    X86_COND_GE, EFLAGS))]>,
113                    TB, OpSize;
114 def CMOVGE32rr: I<0x4D, MRMSrcReg,       // if >=s, GR32 = GR32
115                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
116                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
117                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
118                                    X86_COND_GE, EFLAGS))]>,
119                    TB;
120 def CMOVLE16rr: I<0x4E, MRMSrcReg,       // if <=s, GR16 = GR16
121                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
122                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
123                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
124                                    X86_COND_LE, EFLAGS))]>,
125                    TB, OpSize;
126 def CMOVLE32rr: I<0x4E, MRMSrcReg,       // if <=s, GR32 = GR32
127                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
128                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
129                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
130                                    X86_COND_LE, EFLAGS))]>,
131                    TB;
132 def CMOVG16rr : I<0x4F, MRMSrcReg,       // if >s, GR16 = GR16
133                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
134                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
135                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
136                                    X86_COND_G, EFLAGS))]>,
137                    TB, OpSize;
138 def CMOVG32rr : I<0x4F, MRMSrcReg,       // if >s, GR32 = GR32
139                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
140                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
141                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
142                                    X86_COND_G, EFLAGS))]>,
143                    TB;
144 def CMOVS16rr : I<0x48, MRMSrcReg,       // if signed, GR16 = GR16
145                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
146                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
147                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
148                                    X86_COND_S, EFLAGS))]>,
149                   TB, OpSize;
150 def CMOVS32rr : I<0x48, MRMSrcReg,       // if signed, GR32 = GR32
151                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
152                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
153                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
154                                    X86_COND_S, EFLAGS))]>,
155                   TB;
156 def CMOVNS16rr: I<0x49, MRMSrcReg,       // if !signed, GR16 = GR16
157                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
158                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
159                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
160                                    X86_COND_NS, EFLAGS))]>,
161                   TB, OpSize;
162 def CMOVNS32rr: I<0x49, MRMSrcReg,       // if !signed, GR32 = GR32
163                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
164                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
165                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
166                                    X86_COND_NS, EFLAGS))]>,
167                   TB;
168 def CMOVP16rr : I<0x4A, MRMSrcReg,       // if parity, GR16 = GR16
169                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
170                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
171                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
172                                    X86_COND_P, EFLAGS))]>,
173                   TB, OpSize;
174 def CMOVP32rr : I<0x4A, MRMSrcReg,       // if parity, GR32 = GR32
175                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
176                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
177                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
178                                    X86_COND_P, EFLAGS))]>,
179                   TB;
180 def CMOVNP16rr : I<0x4B, MRMSrcReg,       // if !parity, GR16 = GR16
181                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
182                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
183                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
184                                     X86_COND_NP, EFLAGS))]>,
185                   TB, OpSize;
186 def CMOVNP32rr : I<0x4B, MRMSrcReg,       // if !parity, GR32 = GR32
187                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
188                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
189                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
190                                     X86_COND_NP, EFLAGS))]>,
191                   TB;
192 def CMOVO16rr : I<0x40, MRMSrcReg,       // if overflow, GR16 = GR16
193                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
194                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
195                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
196                                    X86_COND_O, EFLAGS))]>,
197                   TB, OpSize;
198 def CMOVO32rr : I<0x40, MRMSrcReg,       // if overflow, GR32 = GR32
199                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
200                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
201                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
202                                    X86_COND_O, EFLAGS))]>,
203                   TB;
204 def CMOVNO16rr : I<0x41, MRMSrcReg,       // if !overflow, GR16 = GR16
205                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
206                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
207                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
208                                     X86_COND_NO, EFLAGS))]>,
209                   TB, OpSize;
210 def CMOVNO32rr : I<0x41, MRMSrcReg,       // if !overflow, GR32 = GR32
211                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
212                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
213                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
214                                     X86_COND_NO, EFLAGS))]>,
215                   TB;
216 } // isCommutable = 1
217
218 def CMOVB16rm : I<0x42, MRMSrcMem,       // if <u, GR16 = [mem16]
219                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
220                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
221                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
222                                    X86_COND_B, EFLAGS))]>,
223                   TB, OpSize;
224 def CMOVB32rm : I<0x42, MRMSrcMem,       // if <u, GR32 = [mem32]
225                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
226                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
227                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
228                                    X86_COND_B, EFLAGS))]>,
229                    TB;
230 def CMOVAE16rm: I<0x43, MRMSrcMem,       // if >=u, GR16 = [mem16]
231                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
232                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
233                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
234                                    X86_COND_AE, EFLAGS))]>,
235                    TB, OpSize;
236 def CMOVAE32rm: I<0x43, MRMSrcMem,       // if >=u, GR32 = [mem32]
237                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
238                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
239                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
240                                    X86_COND_AE, EFLAGS))]>,
241                    TB;
242 def CMOVE16rm : I<0x44, MRMSrcMem,       // if ==, GR16 = [mem16]
243                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
244                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
245                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
246                                    X86_COND_E, EFLAGS))]>,
247                    TB, OpSize;
248 def CMOVE32rm : I<0x44, MRMSrcMem,       // if ==, GR32 = [mem32]
249                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
250                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
251                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
252                                    X86_COND_E, EFLAGS))]>,
253                    TB;
254 def CMOVNE16rm: I<0x45, MRMSrcMem,       // if !=, GR16 = [mem16]
255                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
256                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
257                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
258                                    X86_COND_NE, EFLAGS))]>,
259                    TB, OpSize;
260 def CMOVNE32rm: I<0x45, MRMSrcMem,       // if !=, GR32 = [mem32]
261                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
262                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
263                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
264                                    X86_COND_NE, EFLAGS))]>,
265                    TB;
266 def CMOVBE16rm: I<0x46, MRMSrcMem,       // if <=u, GR16 = [mem16]
267                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
268                   "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
269                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
270                                    X86_COND_BE, EFLAGS))]>,
271                    TB, OpSize;
272 def CMOVBE32rm: I<0x46, MRMSrcMem,       // if <=u, GR32 = [mem32]
273                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
274                   "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
275                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
276                                    X86_COND_BE, EFLAGS))]>,
277                    TB;
278 def CMOVA16rm : I<0x47, MRMSrcMem,       // if >u, GR16 = [mem16]
279                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
280                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
281                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
282                                    X86_COND_A, EFLAGS))]>,
283                    TB, OpSize;
284 def CMOVA32rm : I<0x47, MRMSrcMem,       // if >u, GR32 = [mem32]
285                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
286                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
287                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
288                                    X86_COND_A, EFLAGS))]>,
289                    TB;
290 def CMOVL16rm : I<0x4C, MRMSrcMem,       // if <s, GR16 = [mem16]
291                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
292                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
293                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
294                                    X86_COND_L, EFLAGS))]>,
295                    TB, OpSize;
296 def CMOVL32rm : I<0x4C, MRMSrcMem,       // if <s, GR32 = [mem32]
297                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
298                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
299                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
300                                    X86_COND_L, EFLAGS))]>,
301                    TB;
302 def CMOVGE16rm: I<0x4D, MRMSrcMem,       // if >=s, GR16 = [mem16]
303                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
304                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
305                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
306                                    X86_COND_GE, EFLAGS))]>,
307                    TB, OpSize;
308 def CMOVGE32rm: I<0x4D, MRMSrcMem,       // if >=s, GR32 = [mem32]
309                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
310                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
311                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
312                                    X86_COND_GE, EFLAGS))]>,
313                    TB;
314 def CMOVLE16rm: I<0x4E, MRMSrcMem,       // if <=s, GR16 = [mem16]
315                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
316                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
317                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
318                                    X86_COND_LE, EFLAGS))]>,
319                    TB, OpSize;
320 def CMOVLE32rm: I<0x4E, MRMSrcMem,       // if <=s, GR32 = [mem32]
321                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
322                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
323                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
324                                    X86_COND_LE, EFLAGS))]>,
325                    TB;
326 def CMOVG16rm : I<0x4F, MRMSrcMem,       // if >s, GR16 = [mem16]
327                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
328                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
329                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
330                                    X86_COND_G, EFLAGS))]>,
331                    TB, OpSize;
332 def CMOVG32rm : I<0x4F, MRMSrcMem,       // if >s, GR32 = [mem32]
333                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
334                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
335                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
336                                    X86_COND_G, EFLAGS))]>,
337                    TB;
338 def CMOVS16rm : I<0x48, MRMSrcMem,       // if signed, GR16 = [mem16]
339                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
340                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
341                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
342                                    X86_COND_S, EFLAGS))]>,
343                   TB, OpSize;
344 def CMOVS32rm : I<0x48, MRMSrcMem,       // if signed, GR32 = [mem32]
345                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
346                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
347                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
348                                    X86_COND_S, EFLAGS))]>,
349                   TB;
350 def CMOVNS16rm: I<0x49, MRMSrcMem,       // if !signed, GR16 = [mem16]
351                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
352                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
353                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
354                                    X86_COND_NS, EFLAGS))]>,
355                   TB, OpSize;
356 def CMOVNS32rm: I<0x49, MRMSrcMem,       // if !signed, GR32 = [mem32]
357                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
358                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
359                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
360                                    X86_COND_NS, EFLAGS))]>,
361                   TB;
362 def CMOVP16rm : I<0x4A, MRMSrcMem,       // if parity, GR16 = [mem16]
363                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
364                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
365                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
366                                    X86_COND_P, EFLAGS))]>,
367                   TB, OpSize;
368 def CMOVP32rm : I<0x4A, MRMSrcMem,       // if parity, GR32 = [mem32]
369                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
370                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
371                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
372                                    X86_COND_P, EFLAGS))]>,
373                   TB;
374 def CMOVNP16rm : I<0x4B, MRMSrcMem,       // if !parity, GR16 = [mem16]
375                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
376                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
377                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
378                                     X86_COND_NP, EFLAGS))]>,
379                   TB, OpSize;
380 def CMOVNP32rm : I<0x4B, MRMSrcMem,       // if !parity, GR32 = [mem32]
381                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
382                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
383                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
384                                     X86_COND_NP, EFLAGS))]>,
385                   TB;
386 def CMOVO16rm : I<0x40, MRMSrcMem,       // if overflow, GR16 = [mem16]
387                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
388                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
389                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
390                                    X86_COND_O, EFLAGS))]>,
391                   TB, OpSize;
392 def CMOVO32rm : I<0x40, MRMSrcMem,       // if overflow, GR32 = [mem32]
393                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
394                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
395                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
396                                    X86_COND_O, EFLAGS))]>,
397                   TB;
398 def CMOVNO16rm : I<0x41, MRMSrcMem,       // if !overflow, GR16 = [mem16]
399                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
400                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
401                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
402                                     X86_COND_NO, EFLAGS))]>,
403                   TB, OpSize;
404 def CMOVNO32rm : I<0x41, MRMSrcMem,       // if !overflow, GR32 = [mem32]
405                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
406                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
407                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
408                                     X86_COND_NO, EFLAGS))]>,
409                   TB;
410 } // Predicates = [HasCMov]
411
412 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
413 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
414 // however that requires promoting the operands, and can induce additional
415 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
416 // clobber EFLAGS, because if one of the operands is zero, the expansion
417 // could involve an xor.
418 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
419 def CMOV_GR8 : I<0, Pseudo,
420                  (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
421                  "#CMOV_GR8 PSEUDO!",
422                  [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
423                                           imm:$cond, EFLAGS))]>;
424
425 let Predicates = [NoCMov] in {
426 def CMOV_GR32 : I<0, Pseudo,
427                     (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
428                     "#CMOV_GR32* PSEUDO!",
429                     [(set GR32:$dst,
430                       (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
431 def CMOV_GR16 : I<0, Pseudo,
432                     (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
433                     "#CMOV_GR16* PSEUDO!",
434                     [(set GR16:$dst,
435                       (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
436 def CMOV_RFP32 : I<0, Pseudo,
437                     (outs RFP32:$dst),
438                     (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
439                     "#CMOV_RFP32 PSEUDO!",
440                     [(set RFP32:$dst,
441                       (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
442                                                   EFLAGS))]>;
443 def CMOV_RFP64 : I<0, Pseudo,
444                     (outs RFP64:$dst),
445                     (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
446                     "#CMOV_RFP64 PSEUDO!",
447                     [(set RFP64:$dst,
448                       (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
449                                                   EFLAGS))]>;
450 def CMOV_RFP80 : I<0, Pseudo,
451                     (outs RFP80:$dst),
452                     (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
453                     "#CMOV_RFP80 PSEUDO!",
454                     [(set RFP80:$dst,
455                       (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
456                                                   EFLAGS))]>;
457 } // Predicates = [NoCMov]
458 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] 
459 } // Uses = [EFLAGS]
460
461 } // Constraints = "$src1 = $dst" in
462
463
464 // Conditional moves
465 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
466 let isCommutable = 1 in {
467 def CMOVB64rr : RI<0x42, MRMSrcReg,       // if <u, GR64 = GR64
468                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
469                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
470                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
471                                      X86_COND_B, EFLAGS))]>, TB;
472 def CMOVAE64rr: RI<0x43, MRMSrcReg,       // if >=u, GR64 = GR64
473                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
474                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
475                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
476                                      X86_COND_AE, EFLAGS))]>, TB;
477 def CMOVE64rr : RI<0x44, MRMSrcReg,       // if ==, GR64 = GR64
478                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
479                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
480                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
481                                      X86_COND_E, EFLAGS))]>, TB;
482 def CMOVNE64rr: RI<0x45, MRMSrcReg,       // if !=, GR64 = GR64
483                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
484                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
485                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
486                                     X86_COND_NE, EFLAGS))]>, TB;
487 def CMOVBE64rr: RI<0x46, MRMSrcReg,       // if <=u, GR64 = GR64
488                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
489                    "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
490                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
491                                     X86_COND_BE, EFLAGS))]>, TB;
492 def CMOVA64rr : RI<0x47, MRMSrcReg,       // if >u, GR64 = GR64
493                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
494                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
495                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
496                                     X86_COND_A, EFLAGS))]>, TB;
497 def CMOVL64rr : RI<0x4C, MRMSrcReg,       // if <s, GR64 = GR64
498                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
499                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
500                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
501                                     X86_COND_L, EFLAGS))]>, TB;
502 def CMOVGE64rr: RI<0x4D, MRMSrcReg,       // if >=s, GR64 = GR64
503                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
504                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
505                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
506                                     X86_COND_GE, EFLAGS))]>, TB;
507 def CMOVLE64rr: RI<0x4E, MRMSrcReg,       // if <=s, GR64 = GR64
508                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
509                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
510                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
511                                     X86_COND_LE, EFLAGS))]>, TB;
512 def CMOVG64rr : RI<0x4F, MRMSrcReg,       // if >s, GR64 = GR64
513                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
514                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
515                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
516                                     X86_COND_G, EFLAGS))]>, TB;
517 def CMOVS64rr : RI<0x48, MRMSrcReg,       // if signed, GR64 = GR64
518                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
519                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
520                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
521                                     X86_COND_S, EFLAGS))]>, TB;
522 def CMOVNS64rr: RI<0x49, MRMSrcReg,       // if !signed, GR64 = GR64
523                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
524                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
525                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
526                                     X86_COND_NS, EFLAGS))]>, TB;
527 def CMOVP64rr : RI<0x4A, MRMSrcReg,       // if parity, GR64 = GR64
528                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
529                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
530                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
531                                     X86_COND_P, EFLAGS))]>, TB;
532 def CMOVNP64rr : RI<0x4B, MRMSrcReg,       // if !parity, GR64 = GR64
533                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
534                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
535                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
536                                      X86_COND_NP, EFLAGS))]>, TB;
537 def CMOVO64rr : RI<0x40, MRMSrcReg,       // if overflow, GR64 = GR64
538                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
539                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
540                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
541                                     X86_COND_O, EFLAGS))]>, TB;
542 def CMOVNO64rr : RI<0x41, MRMSrcReg,       // if !overflow, GR64 = GR64
543                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
544                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
545                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
546                                      X86_COND_NO, EFLAGS))]>, TB;
547 } // isCommutable = 1
548
549 def CMOVB64rm : RI<0x42, MRMSrcMem,       // if <u, GR64 = [mem64]
550                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
551                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
552                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
553                                      X86_COND_B, EFLAGS))]>, TB;
554 def CMOVAE64rm: RI<0x43, MRMSrcMem,       // if >=u, GR64 = [mem64]
555                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
556                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
557                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
558                                      X86_COND_AE, EFLAGS))]>, TB;
559 def CMOVE64rm : RI<0x44, MRMSrcMem,       // if ==, GR64 = [mem64]
560                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
561                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
562                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
563                                      X86_COND_E, EFLAGS))]>, TB;
564 def CMOVNE64rm: RI<0x45, MRMSrcMem,       // if !=, GR64 = [mem64]
565                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
566                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
567                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
568                                     X86_COND_NE, EFLAGS))]>, TB;
569 def CMOVBE64rm: RI<0x46, MRMSrcMem,       // if <=u, GR64 = [mem64]
570                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
571                    "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
572                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
573                                     X86_COND_BE, EFLAGS))]>, TB;
574 def CMOVA64rm : RI<0x47, MRMSrcMem,       // if >u, GR64 = [mem64]
575                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
576                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
577                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
578                                     X86_COND_A, EFLAGS))]>, TB;
579 def CMOVL64rm : RI<0x4C, MRMSrcMem,       // if <s, GR64 = [mem64]
580                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
581                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
582                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
583                                     X86_COND_L, EFLAGS))]>, TB;
584 def CMOVGE64rm: RI<0x4D, MRMSrcMem,       // if >=s, GR64 = [mem64]
585                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
586                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
587                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
588                                     X86_COND_GE, EFLAGS))]>, TB;
589 def CMOVLE64rm: RI<0x4E, MRMSrcMem,       // if <=s, GR64 = [mem64]
590                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
591                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
592                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
593                                     X86_COND_LE, EFLAGS))]>, TB;
594 def CMOVG64rm : RI<0x4F, MRMSrcMem,       // if >s, GR64 = [mem64]
595                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
596                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
597                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
598                                     X86_COND_G, EFLAGS))]>, TB;
599 def CMOVS64rm : RI<0x48, MRMSrcMem,       // if signed, GR64 = [mem64]
600                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
601                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
602                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
603                                     X86_COND_S, EFLAGS))]>, TB;
604 def CMOVNS64rm: RI<0x49, MRMSrcMem,       // if !signed, GR64 = [mem64]
605                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
606                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
607                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
608                                     X86_COND_NS, EFLAGS))]>, TB;
609 def CMOVP64rm : RI<0x4A, MRMSrcMem,       // if parity, GR64 = [mem64]
610                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
611                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
612                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
613                                     X86_COND_P, EFLAGS))]>, TB;
614 def CMOVNP64rm : RI<0x4B, MRMSrcMem,       // if !parity, GR64 = [mem64]
615                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
616                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
617                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
618                                      X86_COND_NP, EFLAGS))]>, TB;
619 def CMOVO64rm : RI<0x40, MRMSrcMem,       // if overflow, GR64 = [mem64]
620                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
621                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
622                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
623                                     X86_COND_O, EFLAGS))]>, TB;
624 def CMOVNO64rm : RI<0x41, MRMSrcMem,       // if !overflow, GR64 = [mem64]
625                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
626                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
627                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
628                                      X86_COND_NO, EFLAGS))]>, TB;
629 } // Constraints = "$src1 = $dst"
630
631
632 // SetCC instructions.
633 multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
634   let Uses = [EFLAGS] in {
635     def r    : I<opc, MRM0r,  (outs GR8:$dst), (ins),
636                      !strconcat(Mnemonic, "\t$dst"),
637                      [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
638     def m    : I<opc, MRM0m,  (outs), (ins i8mem:$dst),
639                      !strconcat(Mnemonic, "\t$dst"),
640                      [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
641   } // Uses = [EFLAGS]
642 }
643
644 defm SETO  : SETCC<0x90, "seto",  X86_COND_O>;   // is overflow bit set
645 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>;  // is overflow bit not set
646 defm SETB  : SETCC<0x92, "setb",  X86_COND_B>;   // unsigned less than
647 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>;  // unsigned greater or equal
648 defm SETE  : SETCC<0x94, "sete",  X86_COND_E>;   // equal to
649 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>;  // not equal to
650 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>;  // unsigned less than or equal
651 defm SETA  : SETCC<0x97, "seta",  X86_COND_A>;   // unsigned greater than
652 defm SETS  : SETCC<0x98, "sets",  X86_COND_S>;   // is signed bit set
653 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>;  // is not signed
654 defm SETP  : SETCC<0x9A, "setp",  X86_COND_P>;   // is parity bit set
655 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>;  // is parity bit not set
656 defm SETL  : SETCC<0x9C, "setl",  X86_COND_L>;   // signed less than
657 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>;  // signed greater or equal
658 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>;  // signed less than or equal
659 defm SETG  : SETCC<0x9F, "setg",  X86_COND_G>;   // signed greater than
660