switch CMOVBE to the multipattern:
[oota-llvm.git] / lib / Target / X86 / X86InstrCMovSetCC.td
1 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
2 // 
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 // 
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the X86 conditional move and set on condition
11 // instructions.
12 //
13 //===----------------------------------------------------------------------===//
14
15
16 // SetCC instructions.
17 multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
18   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
19       isCommutable = 1 in {
20     def rr16 : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
21                  !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
22                  [(set GR16:$dst,
23                        (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,
24                TB, OpSize;
25     def rr32 : I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
26                  !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
27                  [(set GR32:$dst,
28                        (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>,
29                TB;
30     def rr64 :RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
31                  !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
32                  [(set GR64:$dst,
33                        (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>,
34               TB;
35   }
36
37   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in {
38   def rm16 : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
39                !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
40                [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
41                                          CondNode, EFLAGS))]>, TB, OpSize;
42   def rm32 : I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
43                !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
44                [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
45                                          CondNode, EFLAGS))]>, TB;
46   def rm64 :RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
47                !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
48                [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
49                                          CondNode, EFLAGS))]>, TB;
50   } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
51 } // end multiclass
52
53
54 // Conditional Moves.
55 defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
56
57
58 let Constraints = "$src1 = $dst" in {
59
60 // Conditional moves
61 let Uses = [EFLAGS] in {
62
63 let Predicates = [HasCMov] in {
64 let isCommutable = 1 in {
65 def CMOVB16rr : I<0x42, MRMSrcReg,       // if <u, GR16 = GR16
66                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
67                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
68                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
69                                    X86_COND_B, EFLAGS))]>,
70                   TB, OpSize;
71 def CMOVB32rr : I<0x42, MRMSrcReg,       // if <u, GR32 = GR32
72                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
73                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
74                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
75                                    X86_COND_B, EFLAGS))]>,
76                    TB;
77 def CMOVAE16rr: I<0x43, MRMSrcReg,       // if >=u, GR16 = GR16
78                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
79                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
80                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
81                                    X86_COND_AE, EFLAGS))]>,
82                    TB, OpSize;
83 def CMOVAE32rr: I<0x43, MRMSrcReg,       // if >=u, GR32 = GR32
84                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
85                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
86                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
87                                    X86_COND_AE, EFLAGS))]>,
88                    TB;
89 def CMOVE16rr : I<0x44, MRMSrcReg,       // if ==, GR16 = GR16
90                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
91                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
92                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
93                                    X86_COND_E, EFLAGS))]>,
94                    TB, OpSize;
95 def CMOVE32rr : I<0x44, MRMSrcReg,       // if ==, GR32 = GR32
96                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
97                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
98                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
99                                    X86_COND_E, EFLAGS))]>,
100                    TB;
101 def CMOVNE16rr: I<0x45, MRMSrcReg,       // if !=, GR16 = GR16
102                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
103                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
104                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
105                                    X86_COND_NE, EFLAGS))]>,
106                    TB, OpSize;
107 def CMOVNE32rr: I<0x45, MRMSrcReg,       // if !=, GR32 = GR32
108                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
109                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
110                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
111                                    X86_COND_NE, EFLAGS))]>,
112                    TB;
113 def CMOVA16rr : I<0x47, MRMSrcReg,       // if >u, GR16 = GR16
114                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
115                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
116                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
117                                    X86_COND_A, EFLAGS))]>,
118                    TB, OpSize;
119 def CMOVA32rr : I<0x47, MRMSrcReg,       // if >u, GR32 = GR32
120                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
121                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
122                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
123                                    X86_COND_A, EFLAGS))]>,
124                    TB;
125 def CMOVL16rr : I<0x4C, MRMSrcReg,       // if <s, GR16 = GR16
126                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
127                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
128                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
129                                    X86_COND_L, EFLAGS))]>,
130                    TB, OpSize;
131 def CMOVL32rr : I<0x4C, MRMSrcReg,       // if <s, GR32 = GR32
132                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
133                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
134                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
135                                    X86_COND_L, EFLAGS))]>,
136                    TB;
137 def CMOVGE16rr: I<0x4D, MRMSrcReg,       // if >=s, GR16 = GR16
138                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
139                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
140                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
141                                    X86_COND_GE, EFLAGS))]>,
142                    TB, OpSize;
143 def CMOVGE32rr: I<0x4D, MRMSrcReg,       // if >=s, GR32 = GR32
144                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
145                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
146                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
147                                    X86_COND_GE, EFLAGS))]>,
148                    TB;
149 def CMOVLE16rr: I<0x4E, MRMSrcReg,       // if <=s, GR16 = GR16
150                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
151                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
152                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
153                                    X86_COND_LE, EFLAGS))]>,
154                    TB, OpSize;
155 def CMOVLE32rr: I<0x4E, MRMSrcReg,       // if <=s, GR32 = GR32
156                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
157                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
158                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
159                                    X86_COND_LE, EFLAGS))]>,
160                    TB;
161 def CMOVG16rr : I<0x4F, MRMSrcReg,       // if >s, GR16 = GR16
162                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
163                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
164                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
165                                    X86_COND_G, EFLAGS))]>,
166                    TB, OpSize;
167 def CMOVG32rr : I<0x4F, MRMSrcReg,       // if >s, GR32 = GR32
168                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
169                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
170                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
171                                    X86_COND_G, EFLAGS))]>,
172                    TB;
173 def CMOVS16rr : I<0x48, MRMSrcReg,       // if signed, GR16 = GR16
174                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
175                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
176                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
177                                    X86_COND_S, EFLAGS))]>,
178                   TB, OpSize;
179 def CMOVS32rr : I<0x48, MRMSrcReg,       // if signed, GR32 = GR32
180                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
181                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
182                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
183                                    X86_COND_S, EFLAGS))]>,
184                   TB;
185 def CMOVNS16rr: I<0x49, MRMSrcReg,       // if !signed, GR16 = GR16
186                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
187                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
188                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
189                                    X86_COND_NS, EFLAGS))]>,
190                   TB, OpSize;
191 def CMOVNS32rr: I<0x49, MRMSrcReg,       // if !signed, GR32 = GR32
192                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
193                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
194                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
195                                    X86_COND_NS, EFLAGS))]>,
196                   TB;
197 def CMOVP16rr : I<0x4A, MRMSrcReg,       // if parity, GR16 = GR16
198                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
199                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
200                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
201                                    X86_COND_P, EFLAGS))]>,
202                   TB, OpSize;
203 def CMOVP32rr : I<0x4A, MRMSrcReg,       // if parity, GR32 = GR32
204                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
205                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
206                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
207                                    X86_COND_P, EFLAGS))]>,
208                   TB;
209 def CMOVNP16rr : I<0x4B, MRMSrcReg,       // if !parity, GR16 = GR16
210                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
211                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
212                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
213                                     X86_COND_NP, EFLAGS))]>,
214                   TB, OpSize;
215 def CMOVNP32rr : I<0x4B, MRMSrcReg,       // if !parity, GR32 = GR32
216                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
217                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
218                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
219                                     X86_COND_NP, EFLAGS))]>,
220                   TB;
221 def CMOVO16rr : I<0x40, MRMSrcReg,       // if overflow, GR16 = GR16
222                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
223                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
224                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
225                                    X86_COND_O, EFLAGS))]>,
226                   TB, OpSize;
227 def CMOVO32rr : I<0x40, MRMSrcReg,       // if overflow, GR32 = GR32
228                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
229                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
230                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
231                                    X86_COND_O, EFLAGS))]>,
232                   TB;
233 def CMOVNO16rr : I<0x41, MRMSrcReg,       // if !overflow, GR16 = GR16
234                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
235                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
236                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
237                                     X86_COND_NO, EFLAGS))]>,
238                   TB, OpSize;
239 def CMOVNO32rr : I<0x41, MRMSrcReg,       // if !overflow, GR32 = GR32
240                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
241                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
242                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
243                                     X86_COND_NO, EFLAGS))]>,
244                   TB;
245 } // isCommutable = 1
246
247 def CMOVB16rm : I<0x42, MRMSrcMem,       // if <u, GR16 = [mem16]
248                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
249                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
250                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
251                                    X86_COND_B, EFLAGS))]>,
252                   TB, OpSize;
253 def CMOVB32rm : I<0x42, MRMSrcMem,       // if <u, GR32 = [mem32]
254                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
255                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
256                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
257                                    X86_COND_B, EFLAGS))]>,
258                    TB;
259 def CMOVAE16rm: I<0x43, MRMSrcMem,       // if >=u, GR16 = [mem16]
260                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
261                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
262                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
263                                    X86_COND_AE, EFLAGS))]>,
264                    TB, OpSize;
265 def CMOVAE32rm: I<0x43, MRMSrcMem,       // if >=u, GR32 = [mem32]
266                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
267                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
268                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
269                                    X86_COND_AE, EFLAGS))]>,
270                    TB;
271 def CMOVE16rm : I<0x44, MRMSrcMem,       // if ==, GR16 = [mem16]
272                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
273                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
274                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
275                                    X86_COND_E, EFLAGS))]>,
276                    TB, OpSize;
277 def CMOVE32rm : I<0x44, MRMSrcMem,       // if ==, GR32 = [mem32]
278                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
279                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
280                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
281                                    X86_COND_E, EFLAGS))]>,
282                    TB;
283 def CMOVNE16rm: I<0x45, MRMSrcMem,       // if !=, GR16 = [mem16]
284                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
285                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
286                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
287                                    X86_COND_NE, EFLAGS))]>,
288                    TB, OpSize;
289 def CMOVNE32rm: I<0x45, MRMSrcMem,       // if !=, GR32 = [mem32]
290                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
291                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
292                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
293                                    X86_COND_NE, EFLAGS))]>,
294                    TB;
295 def CMOVA16rm : I<0x47, MRMSrcMem,       // if >u, GR16 = [mem16]
296                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
297                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
298                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
299                                    X86_COND_A, EFLAGS))]>,
300                    TB, OpSize;
301 def CMOVA32rm : I<0x47, MRMSrcMem,       // if >u, GR32 = [mem32]
302                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
303                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
304                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
305                                    X86_COND_A, EFLAGS))]>,
306                    TB;
307 def CMOVL16rm : I<0x4C, MRMSrcMem,       // if <s, GR16 = [mem16]
308                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
309                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
310                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
311                                    X86_COND_L, EFLAGS))]>,
312                    TB, OpSize;
313 def CMOVL32rm : I<0x4C, MRMSrcMem,       // if <s, GR32 = [mem32]
314                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
315                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
316                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
317                                    X86_COND_L, EFLAGS))]>,
318                    TB;
319 def CMOVGE16rm: I<0x4D, MRMSrcMem,       // if >=s, GR16 = [mem16]
320                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
321                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
322                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
323                                    X86_COND_GE, EFLAGS))]>,
324                    TB, OpSize;
325 def CMOVGE32rm: I<0x4D, MRMSrcMem,       // if >=s, GR32 = [mem32]
326                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
327                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
328                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
329                                    X86_COND_GE, EFLAGS))]>,
330                    TB;
331 def CMOVLE16rm: I<0x4E, MRMSrcMem,       // if <=s, GR16 = [mem16]
332                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
333                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
334                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
335                                    X86_COND_LE, EFLAGS))]>,
336                    TB, OpSize;
337 def CMOVLE32rm: I<0x4E, MRMSrcMem,       // if <=s, GR32 = [mem32]
338                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
339                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
340                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
341                                    X86_COND_LE, EFLAGS))]>,
342                    TB;
343 def CMOVG16rm : I<0x4F, MRMSrcMem,       // if >s, GR16 = [mem16]
344                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
345                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
346                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
347                                    X86_COND_G, EFLAGS))]>,
348                    TB, OpSize;
349 def CMOVG32rm : I<0x4F, MRMSrcMem,       // if >s, GR32 = [mem32]
350                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
351                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
352                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
353                                    X86_COND_G, EFLAGS))]>,
354                    TB;
355 def CMOVS16rm : I<0x48, MRMSrcMem,       // if signed, GR16 = [mem16]
356                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
357                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
358                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
359                                    X86_COND_S, EFLAGS))]>,
360                   TB, OpSize;
361 def CMOVS32rm : I<0x48, MRMSrcMem,       // if signed, GR32 = [mem32]
362                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
363                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
364                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
365                                    X86_COND_S, EFLAGS))]>,
366                   TB;
367 def CMOVNS16rm: I<0x49, MRMSrcMem,       // if !signed, GR16 = [mem16]
368                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
369                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
370                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
371                                    X86_COND_NS, EFLAGS))]>,
372                   TB, OpSize;
373 def CMOVNS32rm: I<0x49, MRMSrcMem,       // if !signed, GR32 = [mem32]
374                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
375                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
376                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
377                                    X86_COND_NS, EFLAGS))]>,
378                   TB;
379 def CMOVP16rm : I<0x4A, MRMSrcMem,       // if parity, GR16 = [mem16]
380                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
381                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
382                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
383                                    X86_COND_P, EFLAGS))]>,
384                   TB, OpSize;
385 def CMOVP32rm : I<0x4A, MRMSrcMem,       // if parity, GR32 = [mem32]
386                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
387                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
388                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
389                                    X86_COND_P, EFLAGS))]>,
390                   TB;
391 def CMOVNP16rm : I<0x4B, MRMSrcMem,       // if !parity, GR16 = [mem16]
392                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
393                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
394                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
395                                     X86_COND_NP, EFLAGS))]>,
396                   TB, OpSize;
397 def CMOVNP32rm : I<0x4B, MRMSrcMem,       // if !parity, GR32 = [mem32]
398                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
399                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
400                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
401                                     X86_COND_NP, EFLAGS))]>,
402                   TB;
403 def CMOVO16rm : I<0x40, MRMSrcMem,       // if overflow, GR16 = [mem16]
404                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
405                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
406                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
407                                    X86_COND_O, EFLAGS))]>,
408                   TB, OpSize;
409 def CMOVO32rm : I<0x40, MRMSrcMem,       // if overflow, GR32 = [mem32]
410                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
411                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
412                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
413                                    X86_COND_O, EFLAGS))]>,
414                   TB;
415 def CMOVNO16rm : I<0x41, MRMSrcMem,       // if !overflow, GR16 = [mem16]
416                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
417                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
418                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
419                                     X86_COND_NO, EFLAGS))]>,
420                   TB, OpSize;
421 def CMOVNO32rm : I<0x41, MRMSrcMem,       // if !overflow, GR32 = [mem32]
422                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
423                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
424                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
425                                     X86_COND_NO, EFLAGS))]>,
426                   TB;
427 } // Predicates = [HasCMov]
428
429 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
430 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
431 // however that requires promoting the operands, and can induce additional
432 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
433 // clobber EFLAGS, because if one of the operands is zero, the expansion
434 // could involve an xor.
435 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
436 def CMOV_GR8 : I<0, Pseudo,
437                  (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
438                  "#CMOV_GR8 PSEUDO!",
439                  [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
440                                           imm:$cond, EFLAGS))]>;
441
442 let Predicates = [NoCMov] in {
443 def CMOV_GR32 : I<0, Pseudo,
444                     (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
445                     "#CMOV_GR32* PSEUDO!",
446                     [(set GR32:$dst,
447                       (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
448 def CMOV_GR16 : I<0, Pseudo,
449                     (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
450                     "#CMOV_GR16* PSEUDO!",
451                     [(set GR16:$dst,
452                       (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
453 def CMOV_RFP32 : I<0, Pseudo,
454                     (outs RFP32:$dst),
455                     (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
456                     "#CMOV_RFP32 PSEUDO!",
457                     [(set RFP32:$dst,
458                       (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
459                                                   EFLAGS))]>;
460 def CMOV_RFP64 : I<0, Pseudo,
461                     (outs RFP64:$dst),
462                     (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
463                     "#CMOV_RFP64 PSEUDO!",
464                     [(set RFP64:$dst,
465                       (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
466                                                   EFLAGS))]>;
467 def CMOV_RFP80 : I<0, Pseudo,
468                     (outs RFP80:$dst),
469                     (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
470                     "#CMOV_RFP80 PSEUDO!",
471                     [(set RFP80:$dst,
472                       (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
473                                                   EFLAGS))]>;
474 } // Predicates = [NoCMov]
475 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] 
476 } // Uses = [EFLAGS]
477
478 } // Constraints = "$src1 = $dst" in
479
480
481 // Conditional moves
482 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
483 let isCommutable = 1 in {
484 def CMOVB64rr : RI<0x42, MRMSrcReg,       // if <u, GR64 = GR64
485                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
486                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
487                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
488                                      X86_COND_B, EFLAGS))]>, TB;
489 def CMOVAE64rr: RI<0x43, MRMSrcReg,       // if >=u, GR64 = GR64
490                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
491                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
492                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
493                                      X86_COND_AE, EFLAGS))]>, TB;
494 def CMOVE64rr : RI<0x44, MRMSrcReg,       // if ==, GR64 = GR64
495                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
496                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
497                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
498                                      X86_COND_E, EFLAGS))]>, TB;
499 def CMOVNE64rr: RI<0x45, MRMSrcReg,       // if !=, GR64 = GR64
500                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
501                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
502                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
503                                     X86_COND_NE, EFLAGS))]>, TB;
504 def CMOVA64rr : RI<0x47, MRMSrcReg,       // if >u, GR64 = GR64
505                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
506                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
507                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
508                                     X86_COND_A, EFLAGS))]>, TB;
509 def CMOVL64rr : RI<0x4C, MRMSrcReg,       // if <s, GR64 = GR64
510                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
511                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
512                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
513                                     X86_COND_L, EFLAGS))]>, TB;
514 def CMOVGE64rr: RI<0x4D, MRMSrcReg,       // if >=s, GR64 = GR64
515                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
516                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
517                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
518                                     X86_COND_GE, EFLAGS))]>, TB;
519 def CMOVLE64rr: RI<0x4E, MRMSrcReg,       // if <=s, GR64 = GR64
520                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
521                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
522                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
523                                     X86_COND_LE, EFLAGS))]>, TB;
524 def CMOVG64rr : RI<0x4F, MRMSrcReg,       // if >s, GR64 = GR64
525                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
526                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
527                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
528                                     X86_COND_G, EFLAGS))]>, TB;
529 def CMOVS64rr : RI<0x48, MRMSrcReg,       // if signed, GR64 = GR64
530                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
531                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
532                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
533                                     X86_COND_S, EFLAGS))]>, TB;
534 def CMOVNS64rr: RI<0x49, MRMSrcReg,       // if !signed, GR64 = GR64
535                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
536                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
537                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
538                                     X86_COND_NS, EFLAGS))]>, TB;
539 def CMOVP64rr : RI<0x4A, MRMSrcReg,       // if parity, GR64 = GR64
540                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
541                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
542                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
543                                     X86_COND_P, EFLAGS))]>, TB;
544 def CMOVNP64rr : RI<0x4B, MRMSrcReg,       // if !parity, GR64 = GR64
545                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
546                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
547                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
548                                      X86_COND_NP, EFLAGS))]>, TB;
549 def CMOVO64rr : RI<0x40, MRMSrcReg,       // if overflow, GR64 = GR64
550                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
551                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
552                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
553                                     X86_COND_O, EFLAGS))]>, TB;
554 def CMOVNO64rr : RI<0x41, MRMSrcReg,       // if !overflow, GR64 = GR64
555                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
556                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
557                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
558                                      X86_COND_NO, EFLAGS))]>, TB;
559 } // isCommutable = 1
560
561 def CMOVB64rm : RI<0x42, MRMSrcMem,       // if <u, GR64 = [mem64]
562                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
563                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
564                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
565                                      X86_COND_B, EFLAGS))]>, TB;
566 def CMOVAE64rm: RI<0x43, MRMSrcMem,       // if >=u, GR64 = [mem64]
567                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
568                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
569                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
570                                      X86_COND_AE, EFLAGS))]>, TB;
571 def CMOVE64rm : RI<0x44, MRMSrcMem,       // if ==, GR64 = [mem64]
572                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
573                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
574                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
575                                      X86_COND_E, EFLAGS))]>, TB;
576 def CMOVNE64rm: RI<0x45, MRMSrcMem,       // if !=, GR64 = [mem64]
577                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
578                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
579                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
580                                     X86_COND_NE, EFLAGS))]>, TB;
581 def CMOVA64rm : RI<0x47, MRMSrcMem,       // if >u, GR64 = [mem64]
582                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
583                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
584                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
585                                     X86_COND_A, EFLAGS))]>, TB;
586 def CMOVL64rm : RI<0x4C, MRMSrcMem,       // if <s, GR64 = [mem64]
587                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
588                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
589                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
590                                     X86_COND_L, EFLAGS))]>, TB;
591 def CMOVGE64rm: RI<0x4D, MRMSrcMem,       // if >=s, GR64 = [mem64]
592                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
593                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
594                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
595                                     X86_COND_GE, EFLAGS))]>, TB;
596 def CMOVLE64rm: RI<0x4E, MRMSrcMem,       // if <=s, GR64 = [mem64]
597                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
598                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
599                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
600                                     X86_COND_LE, EFLAGS))]>, TB;
601 def CMOVG64rm : RI<0x4F, MRMSrcMem,       // if >s, GR64 = [mem64]
602                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
603                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
604                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
605                                     X86_COND_G, EFLAGS))]>, TB;
606 def CMOVS64rm : RI<0x48, MRMSrcMem,       // if signed, GR64 = [mem64]
607                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
608                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
609                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
610                                     X86_COND_S, EFLAGS))]>, TB;
611 def CMOVNS64rm: RI<0x49, MRMSrcMem,       // if !signed, GR64 = [mem64]
612                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
613                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
614                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
615                                     X86_COND_NS, EFLAGS))]>, TB;
616 def CMOVP64rm : RI<0x4A, MRMSrcMem,       // if parity, GR64 = [mem64]
617                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
618                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
619                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
620                                     X86_COND_P, EFLAGS))]>, TB;
621 def CMOVNP64rm : RI<0x4B, MRMSrcMem,       // if !parity, GR64 = [mem64]
622                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
623                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
624                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
625                                      X86_COND_NP, EFLAGS))]>, TB;
626 def CMOVO64rm : RI<0x40, MRMSrcMem,       // if overflow, GR64 = [mem64]
627                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
628                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
629                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
630                                     X86_COND_O, EFLAGS))]>, TB;
631 def CMOVNO64rm : RI<0x41, MRMSrcMem,       // if !overflow, GR64 = [mem64]
632                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
633                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
634                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
635                                      X86_COND_NO, EFLAGS))]>, TB;
636 } // Constraints = "$src1 = $dst"
637
638
639 // SetCC instructions.
640 multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
641   let Uses = [EFLAGS] in {
642     def r    : I<opc, MRM0r,  (outs GR8:$dst), (ins),
643                      !strconcat(Mnemonic, "\t$dst"),
644                      [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
645     def m    : I<opc, MRM0m,  (outs), (ins i8mem:$dst),
646                      !strconcat(Mnemonic, "\t$dst"),
647                      [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
648   } // Uses = [EFLAGS]
649 }
650
651 defm SETO  : SETCC<0x90, "seto",  X86_COND_O>;   // is overflow bit set
652 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>;  // is overflow bit not set
653 defm SETB  : SETCC<0x92, "setb",  X86_COND_B>;   // unsigned less than
654 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>;  // unsigned greater or equal
655 defm SETE  : SETCC<0x94, "sete",  X86_COND_E>;   // equal to
656 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>;  // not equal to
657 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>;  // unsigned less than or equal
658 defm SETA  : SETCC<0x97, "seta",  X86_COND_A>;   // unsigned greater than
659 defm SETS  : SETCC<0x98, "sets",  X86_COND_S>;   // is signed bit set
660 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>;  // is not signed
661 defm SETP  : SETCC<0x9A, "setp",  X86_COND_P>;   // is parity bit set
662 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>;  // is parity bit not set
663 defm SETL  : SETCC<0x9C, "setl",  X86_COND_L>;   // signed less than
664 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>;  // signed greater or equal
665 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>;  // signed less than or equal
666 defm SETG  : SETCC<0x9F, "setg",  X86_COND_G>;   // signed greater than
667