Use #NAME# to have the CMOV multiclass define things with the same names as before
[oota-llvm.git] / lib / Target / X86 / X86InstrCMovSetCC.td
1 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
2 // 
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 // 
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the X86 conditional move and set on condition
11 // instructions.
12 //
13 //===----------------------------------------------------------------------===//
14
15
16 // SetCC instructions.
17 multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
18   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
19       isCommutable = 1 in {
20     def #NAME#16rr
21       : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
22           !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
23           [(set GR16:$dst,
24                 (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,TB,OpSize;
25     def #NAME#32rr
26       : I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
27           !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
28           [(set GR32:$dst,
29                 (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>, TB;
30     def #NAME#64rr
31       :RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
32           !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
33           [(set GR64:$dst,
34                 (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB;
35   }
36
37   let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in {
38     def #NAME#16rm
39       : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
40           !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
41           [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
42                                     CondNode, EFLAGS))]>, TB, OpSize;
43     def #NAME#32rm
44       : I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
45           !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
46           [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
47                                     CondNode, EFLAGS))]>, TB;
48     def #NAME#64rm
49       :RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
50           !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
51           [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
52                                     CondNode, EFLAGS))]>, TB;
53   } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
54 } // end multiclass
55
56
57 // Conditional Moves.
58 defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
59
60
61 let Constraints = "$src1 = $dst" in {
62
63 // Conditional moves
64 let Uses = [EFLAGS] in {
65
66 let Predicates = [HasCMov] in {
67 let isCommutable = 1 in {
68 def CMOVB16rr : I<0x42, MRMSrcReg,       // if <u, GR16 = GR16
69                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
70                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
71                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
72                                    X86_COND_B, EFLAGS))]>,
73                   TB, OpSize;
74 def CMOVB32rr : I<0x42, MRMSrcReg,       // if <u, GR32 = GR32
75                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
76                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
77                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
78                                    X86_COND_B, EFLAGS))]>,
79                    TB;
80 def CMOVAE16rr: I<0x43, MRMSrcReg,       // if >=u, GR16 = GR16
81                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
82                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
83                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
84                                    X86_COND_AE, EFLAGS))]>,
85                    TB, OpSize;
86 def CMOVAE32rr: I<0x43, MRMSrcReg,       // if >=u, GR32 = GR32
87                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
88                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
89                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
90                                    X86_COND_AE, EFLAGS))]>,
91                    TB;
92 def CMOVE16rr : I<0x44, MRMSrcReg,       // if ==, GR16 = GR16
93                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
94                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
95                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
96                                    X86_COND_E, EFLAGS))]>,
97                    TB, OpSize;
98 def CMOVE32rr : I<0x44, MRMSrcReg,       // if ==, GR32 = GR32
99                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
100                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
101                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
102                                    X86_COND_E, EFLAGS))]>,
103                    TB;
104 def CMOVNE16rr: I<0x45, MRMSrcReg,       // if !=, GR16 = GR16
105                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
106                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
107                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
108                                    X86_COND_NE, EFLAGS))]>,
109                    TB, OpSize;
110 def CMOVNE32rr: I<0x45, MRMSrcReg,       // if !=, GR32 = GR32
111                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
112                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
113                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
114                                    X86_COND_NE, EFLAGS))]>,
115                    TB;
116 def CMOVA16rr : I<0x47, MRMSrcReg,       // if >u, GR16 = GR16
117                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
118                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
119                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
120                                    X86_COND_A, EFLAGS))]>,
121                    TB, OpSize;
122 def CMOVA32rr : I<0x47, MRMSrcReg,       // if >u, GR32 = GR32
123                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
124                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
125                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
126                                    X86_COND_A, EFLAGS))]>,
127                    TB;
128 def CMOVL16rr : I<0x4C, MRMSrcReg,       // if <s, GR16 = GR16
129                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
130                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
131                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
132                                    X86_COND_L, EFLAGS))]>,
133                    TB, OpSize;
134 def CMOVL32rr : I<0x4C, MRMSrcReg,       // if <s, GR32 = GR32
135                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
136                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
137                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
138                                    X86_COND_L, EFLAGS))]>,
139                    TB;
140 def CMOVGE16rr: I<0x4D, MRMSrcReg,       // if >=s, GR16 = GR16
141                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
142                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
143                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
144                                    X86_COND_GE, EFLAGS))]>,
145                    TB, OpSize;
146 def CMOVGE32rr: I<0x4D, MRMSrcReg,       // if >=s, GR32 = GR32
147                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
148                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
149                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
150                                    X86_COND_GE, EFLAGS))]>,
151                    TB;
152 def CMOVLE16rr: I<0x4E, MRMSrcReg,       // if <=s, GR16 = GR16
153                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
154                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
155                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
156                                    X86_COND_LE, EFLAGS))]>,
157                    TB, OpSize;
158 def CMOVLE32rr: I<0x4E, MRMSrcReg,       // if <=s, GR32 = GR32
159                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
160                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
161                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
162                                    X86_COND_LE, EFLAGS))]>,
163                    TB;
164 def CMOVG16rr : I<0x4F, MRMSrcReg,       // if >s, GR16 = GR16
165                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
166                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
167                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
168                                    X86_COND_G, EFLAGS))]>,
169                    TB, OpSize;
170 def CMOVG32rr : I<0x4F, MRMSrcReg,       // if >s, GR32 = GR32
171                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
172                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
173                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
174                                    X86_COND_G, EFLAGS))]>,
175                    TB;
176 def CMOVS16rr : I<0x48, MRMSrcReg,       // if signed, GR16 = GR16
177                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
178                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
179                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
180                                    X86_COND_S, EFLAGS))]>,
181                   TB, OpSize;
182 def CMOVS32rr : I<0x48, MRMSrcReg,       // if signed, GR32 = GR32
183                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
184                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
185                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
186                                    X86_COND_S, EFLAGS))]>,
187                   TB;
188 def CMOVNS16rr: I<0x49, MRMSrcReg,       // if !signed, GR16 = GR16
189                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
190                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
191                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
192                                    X86_COND_NS, EFLAGS))]>,
193                   TB, OpSize;
194 def CMOVNS32rr: I<0x49, MRMSrcReg,       // if !signed, GR32 = GR32
195                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
196                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
197                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
198                                    X86_COND_NS, EFLAGS))]>,
199                   TB;
200 def CMOVP16rr : I<0x4A, MRMSrcReg,       // if parity, GR16 = GR16
201                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
202                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
203                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
204                                    X86_COND_P, EFLAGS))]>,
205                   TB, OpSize;
206 def CMOVP32rr : I<0x4A, MRMSrcReg,       // if parity, GR32 = GR32
207                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
208                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
209                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
210                                    X86_COND_P, EFLAGS))]>,
211                   TB;
212 def CMOVNP16rr : I<0x4B, MRMSrcReg,       // if !parity, GR16 = GR16
213                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
214                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
215                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
216                                     X86_COND_NP, EFLAGS))]>,
217                   TB, OpSize;
218 def CMOVNP32rr : I<0x4B, MRMSrcReg,       // if !parity, GR32 = GR32
219                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
220                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
221                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
222                                     X86_COND_NP, EFLAGS))]>,
223                   TB;
224 def CMOVO16rr : I<0x40, MRMSrcReg,       // if overflow, GR16 = GR16
225                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
226                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
227                   [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
228                                    X86_COND_O, EFLAGS))]>,
229                   TB, OpSize;
230 def CMOVO32rr : I<0x40, MRMSrcReg,       // if overflow, GR32 = GR32
231                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
232                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
233                   [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
234                                    X86_COND_O, EFLAGS))]>,
235                   TB;
236 def CMOVNO16rr : I<0x41, MRMSrcReg,       // if !overflow, GR16 = GR16
237                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
238                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
239                    [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
240                                     X86_COND_NO, EFLAGS))]>,
241                   TB, OpSize;
242 def CMOVNO32rr : I<0x41, MRMSrcReg,       // if !overflow, GR32 = GR32
243                   (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
244                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
245                    [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
246                                     X86_COND_NO, EFLAGS))]>,
247                   TB;
248 } // isCommutable = 1
249
250 def CMOVB16rm : I<0x42, MRMSrcMem,       // if <u, GR16 = [mem16]
251                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
252                   "cmovb{w}\t{$src2, $dst|$dst, $src2}",
253                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
254                                    X86_COND_B, EFLAGS))]>,
255                   TB, OpSize;
256 def CMOVB32rm : I<0x42, MRMSrcMem,       // if <u, GR32 = [mem32]
257                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
258                   "cmovb{l}\t{$src2, $dst|$dst, $src2}",
259                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
260                                    X86_COND_B, EFLAGS))]>,
261                    TB;
262 def CMOVAE16rm: I<0x43, MRMSrcMem,       // if >=u, GR16 = [mem16]
263                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
264                   "cmovae{w}\t{$src2, $dst|$dst, $src2}",
265                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
266                                    X86_COND_AE, EFLAGS))]>,
267                    TB, OpSize;
268 def CMOVAE32rm: I<0x43, MRMSrcMem,       // if >=u, GR32 = [mem32]
269                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
270                   "cmovae{l}\t{$src2, $dst|$dst, $src2}",
271                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
272                                    X86_COND_AE, EFLAGS))]>,
273                    TB;
274 def CMOVE16rm : I<0x44, MRMSrcMem,       // if ==, GR16 = [mem16]
275                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
276                   "cmove{w}\t{$src2, $dst|$dst, $src2}",
277                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
278                                    X86_COND_E, EFLAGS))]>,
279                    TB, OpSize;
280 def CMOVE32rm : I<0x44, MRMSrcMem,       // if ==, GR32 = [mem32]
281                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
282                   "cmove{l}\t{$src2, $dst|$dst, $src2}",
283                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
284                                    X86_COND_E, EFLAGS))]>,
285                    TB;
286 def CMOVNE16rm: I<0x45, MRMSrcMem,       // if !=, GR16 = [mem16]
287                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
288                   "cmovne{w}\t{$src2, $dst|$dst, $src2}",
289                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
290                                    X86_COND_NE, EFLAGS))]>,
291                    TB, OpSize;
292 def CMOVNE32rm: I<0x45, MRMSrcMem,       // if !=, GR32 = [mem32]
293                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
294                   "cmovne{l}\t{$src2, $dst|$dst, $src2}",
295                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
296                                    X86_COND_NE, EFLAGS))]>,
297                    TB;
298 def CMOVA16rm : I<0x47, MRMSrcMem,       // if >u, GR16 = [mem16]
299                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
300                   "cmova{w}\t{$src2, $dst|$dst, $src2}",
301                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
302                                    X86_COND_A, EFLAGS))]>,
303                    TB, OpSize;
304 def CMOVA32rm : I<0x47, MRMSrcMem,       // if >u, GR32 = [mem32]
305                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
306                   "cmova{l}\t{$src2, $dst|$dst, $src2}",
307                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
308                                    X86_COND_A, EFLAGS))]>,
309                    TB;
310 def CMOVL16rm : I<0x4C, MRMSrcMem,       // if <s, GR16 = [mem16]
311                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
312                   "cmovl{w}\t{$src2, $dst|$dst, $src2}",
313                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
314                                    X86_COND_L, EFLAGS))]>,
315                    TB, OpSize;
316 def CMOVL32rm : I<0x4C, MRMSrcMem,       // if <s, GR32 = [mem32]
317                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
318                   "cmovl{l}\t{$src2, $dst|$dst, $src2}",
319                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
320                                    X86_COND_L, EFLAGS))]>,
321                    TB;
322 def CMOVGE16rm: I<0x4D, MRMSrcMem,       // if >=s, GR16 = [mem16]
323                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
324                   "cmovge{w}\t{$src2, $dst|$dst, $src2}",
325                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
326                                    X86_COND_GE, EFLAGS))]>,
327                    TB, OpSize;
328 def CMOVGE32rm: I<0x4D, MRMSrcMem,       // if >=s, GR32 = [mem32]
329                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
330                   "cmovge{l}\t{$src2, $dst|$dst, $src2}",
331                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
332                                    X86_COND_GE, EFLAGS))]>,
333                    TB;
334 def CMOVLE16rm: I<0x4E, MRMSrcMem,       // if <=s, GR16 = [mem16]
335                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
336                   "cmovle{w}\t{$src2, $dst|$dst, $src2}",
337                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
338                                    X86_COND_LE, EFLAGS))]>,
339                    TB, OpSize;
340 def CMOVLE32rm: I<0x4E, MRMSrcMem,       // if <=s, GR32 = [mem32]
341                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
342                   "cmovle{l}\t{$src2, $dst|$dst, $src2}",
343                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
344                                    X86_COND_LE, EFLAGS))]>,
345                    TB;
346 def CMOVG16rm : I<0x4F, MRMSrcMem,       // if >s, GR16 = [mem16]
347                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
348                   "cmovg{w}\t{$src2, $dst|$dst, $src2}",
349                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
350                                    X86_COND_G, EFLAGS))]>,
351                    TB, OpSize;
352 def CMOVG32rm : I<0x4F, MRMSrcMem,       // if >s, GR32 = [mem32]
353                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
354                   "cmovg{l}\t{$src2, $dst|$dst, $src2}",
355                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
356                                    X86_COND_G, EFLAGS))]>,
357                    TB;
358 def CMOVS16rm : I<0x48, MRMSrcMem,       // if signed, GR16 = [mem16]
359                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
360                   "cmovs{w}\t{$src2, $dst|$dst, $src2}",
361                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
362                                    X86_COND_S, EFLAGS))]>,
363                   TB, OpSize;
364 def CMOVS32rm : I<0x48, MRMSrcMem,       // if signed, GR32 = [mem32]
365                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
366                   "cmovs{l}\t{$src2, $dst|$dst, $src2}",
367                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
368                                    X86_COND_S, EFLAGS))]>,
369                   TB;
370 def CMOVNS16rm: I<0x49, MRMSrcMem,       // if !signed, GR16 = [mem16]
371                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
372                   "cmovns{w}\t{$src2, $dst|$dst, $src2}",
373                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
374                                    X86_COND_NS, EFLAGS))]>,
375                   TB, OpSize;
376 def CMOVNS32rm: I<0x49, MRMSrcMem,       // if !signed, GR32 = [mem32]
377                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
378                   "cmovns{l}\t{$src2, $dst|$dst, $src2}",
379                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
380                                    X86_COND_NS, EFLAGS))]>,
381                   TB;
382 def CMOVP16rm : I<0x4A, MRMSrcMem,       // if parity, GR16 = [mem16]
383                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
384                   "cmovp{w}\t{$src2, $dst|$dst, $src2}",
385                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
386                                    X86_COND_P, EFLAGS))]>,
387                   TB, OpSize;
388 def CMOVP32rm : I<0x4A, MRMSrcMem,       // if parity, GR32 = [mem32]
389                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
390                   "cmovp{l}\t{$src2, $dst|$dst, $src2}",
391                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
392                                    X86_COND_P, EFLAGS))]>,
393                   TB;
394 def CMOVNP16rm : I<0x4B, MRMSrcMem,       // if !parity, GR16 = [mem16]
395                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
396                   "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
397                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
398                                     X86_COND_NP, EFLAGS))]>,
399                   TB, OpSize;
400 def CMOVNP32rm : I<0x4B, MRMSrcMem,       // if !parity, GR32 = [mem32]
401                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
402                   "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
403                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
404                                     X86_COND_NP, EFLAGS))]>,
405                   TB;
406 def CMOVO16rm : I<0x40, MRMSrcMem,       // if overflow, GR16 = [mem16]
407                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
408                   "cmovo{w}\t{$src2, $dst|$dst, $src2}",
409                   [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
410                                    X86_COND_O, EFLAGS))]>,
411                   TB, OpSize;
412 def CMOVO32rm : I<0x40, MRMSrcMem,       // if overflow, GR32 = [mem32]
413                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
414                   "cmovo{l}\t{$src2, $dst|$dst, $src2}",
415                   [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
416                                    X86_COND_O, EFLAGS))]>,
417                   TB;
418 def CMOVNO16rm : I<0x41, MRMSrcMem,       // if !overflow, GR16 = [mem16]
419                   (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
420                   "cmovno{w}\t{$src2, $dst|$dst, $src2}",
421                    [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
422                                     X86_COND_NO, EFLAGS))]>,
423                   TB, OpSize;
424 def CMOVNO32rm : I<0x41, MRMSrcMem,       // if !overflow, GR32 = [mem32]
425                   (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
426                   "cmovno{l}\t{$src2, $dst|$dst, $src2}",
427                    [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
428                                     X86_COND_NO, EFLAGS))]>,
429                   TB;
430 } // Predicates = [HasCMov]
431
432 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
433 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
434 // however that requires promoting the operands, and can induce additional
435 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
436 // clobber EFLAGS, because if one of the operands is zero, the expansion
437 // could involve an xor.
438 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
439 def CMOV_GR8 : I<0, Pseudo,
440                  (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
441                  "#CMOV_GR8 PSEUDO!",
442                  [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
443                                           imm:$cond, EFLAGS))]>;
444
445 let Predicates = [NoCMov] in {
446 def CMOV_GR32 : I<0, Pseudo,
447                     (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
448                     "#CMOV_GR32* PSEUDO!",
449                     [(set GR32:$dst,
450                       (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
451 def CMOV_GR16 : I<0, Pseudo,
452                     (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
453                     "#CMOV_GR16* PSEUDO!",
454                     [(set GR16:$dst,
455                       (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
456 def CMOV_RFP32 : I<0, Pseudo,
457                     (outs RFP32:$dst),
458                     (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
459                     "#CMOV_RFP32 PSEUDO!",
460                     [(set RFP32:$dst,
461                       (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
462                                                   EFLAGS))]>;
463 def CMOV_RFP64 : I<0, Pseudo,
464                     (outs RFP64:$dst),
465                     (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
466                     "#CMOV_RFP64 PSEUDO!",
467                     [(set RFP64:$dst,
468                       (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
469                                                   EFLAGS))]>;
470 def CMOV_RFP80 : I<0, Pseudo,
471                     (outs RFP80:$dst),
472                     (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
473                     "#CMOV_RFP80 PSEUDO!",
474                     [(set RFP80:$dst,
475                       (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
476                                                   EFLAGS))]>;
477 } // Predicates = [NoCMov]
478 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] 
479 } // Uses = [EFLAGS]
480
481 } // Constraints = "$src1 = $dst" in
482
483
484 // Conditional moves
485 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
486 let isCommutable = 1 in {
487 def CMOVB64rr : RI<0x42, MRMSrcReg,       // if <u, GR64 = GR64
488                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
489                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
490                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
491                                      X86_COND_B, EFLAGS))]>, TB;
492 def CMOVAE64rr: RI<0x43, MRMSrcReg,       // if >=u, GR64 = GR64
493                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
494                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
495                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
496                                      X86_COND_AE, EFLAGS))]>, TB;
497 def CMOVE64rr : RI<0x44, MRMSrcReg,       // if ==, GR64 = GR64
498                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
499                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
500                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
501                                      X86_COND_E, EFLAGS))]>, TB;
502 def CMOVNE64rr: RI<0x45, MRMSrcReg,       // if !=, GR64 = GR64
503                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
504                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
505                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
506                                     X86_COND_NE, EFLAGS))]>, TB;
507 def CMOVA64rr : RI<0x47, MRMSrcReg,       // if >u, GR64 = GR64
508                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
509                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
510                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
511                                     X86_COND_A, EFLAGS))]>, TB;
512 def CMOVL64rr : RI<0x4C, MRMSrcReg,       // if <s, GR64 = GR64
513                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
514                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
515                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
516                                     X86_COND_L, EFLAGS))]>, TB;
517 def CMOVGE64rr: RI<0x4D, MRMSrcReg,       // if >=s, GR64 = GR64
518                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
519                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
520                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
521                                     X86_COND_GE, EFLAGS))]>, TB;
522 def CMOVLE64rr: RI<0x4E, MRMSrcReg,       // if <=s, GR64 = GR64
523                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
524                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
525                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
526                                     X86_COND_LE, EFLAGS))]>, TB;
527 def CMOVG64rr : RI<0x4F, MRMSrcReg,       // if >s, GR64 = GR64
528                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
529                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
530                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
531                                     X86_COND_G, EFLAGS))]>, TB;
532 def CMOVS64rr : RI<0x48, MRMSrcReg,       // if signed, GR64 = GR64
533                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
534                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
535                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
536                                     X86_COND_S, EFLAGS))]>, TB;
537 def CMOVNS64rr: RI<0x49, MRMSrcReg,       // if !signed, GR64 = GR64
538                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
539                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
540                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
541                                     X86_COND_NS, EFLAGS))]>, TB;
542 def CMOVP64rr : RI<0x4A, MRMSrcReg,       // if parity, GR64 = GR64
543                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
544                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
545                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
546                                     X86_COND_P, EFLAGS))]>, TB;
547 def CMOVNP64rr : RI<0x4B, MRMSrcReg,       // if !parity, GR64 = GR64
548                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
549                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
550                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
551                                      X86_COND_NP, EFLAGS))]>, TB;
552 def CMOVO64rr : RI<0x40, MRMSrcReg,       // if overflow, GR64 = GR64
553                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
554                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
555                    [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
556                                     X86_COND_O, EFLAGS))]>, TB;
557 def CMOVNO64rr : RI<0x41, MRMSrcReg,       // if !overflow, GR64 = GR64
558                    (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
559                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
560                     [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
561                                      X86_COND_NO, EFLAGS))]>, TB;
562 } // isCommutable = 1
563
564 def CMOVB64rm : RI<0x42, MRMSrcMem,       // if <u, GR64 = [mem64]
565                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
566                    "cmovb{q}\t{$src2, $dst|$dst, $src2}",
567                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
568                                      X86_COND_B, EFLAGS))]>, TB;
569 def CMOVAE64rm: RI<0x43, MRMSrcMem,       // if >=u, GR64 = [mem64]
570                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
571                    "cmovae{q}\t{$src2, $dst|$dst, $src2}",
572                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
573                                      X86_COND_AE, EFLAGS))]>, TB;
574 def CMOVE64rm : RI<0x44, MRMSrcMem,       // if ==, GR64 = [mem64]
575                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
576                    "cmove{q}\t{$src2, $dst|$dst, $src2}",
577                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
578                                      X86_COND_E, EFLAGS))]>, TB;
579 def CMOVNE64rm: RI<0x45, MRMSrcMem,       // if !=, GR64 = [mem64]
580                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
581                    "cmovne{q}\t{$src2, $dst|$dst, $src2}",
582                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
583                                     X86_COND_NE, EFLAGS))]>, TB;
584 def CMOVA64rm : RI<0x47, MRMSrcMem,       // if >u, GR64 = [mem64]
585                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
586                    "cmova{q}\t{$src2, $dst|$dst, $src2}",
587                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
588                                     X86_COND_A, EFLAGS))]>, TB;
589 def CMOVL64rm : RI<0x4C, MRMSrcMem,       // if <s, GR64 = [mem64]
590                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
591                    "cmovl{q}\t{$src2, $dst|$dst, $src2}",
592                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
593                                     X86_COND_L, EFLAGS))]>, TB;
594 def CMOVGE64rm: RI<0x4D, MRMSrcMem,       // if >=s, GR64 = [mem64]
595                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
596                    "cmovge{q}\t{$src2, $dst|$dst, $src2}",
597                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
598                                     X86_COND_GE, EFLAGS))]>, TB;
599 def CMOVLE64rm: RI<0x4E, MRMSrcMem,       // if <=s, GR64 = [mem64]
600                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
601                    "cmovle{q}\t{$src2, $dst|$dst, $src2}",
602                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
603                                     X86_COND_LE, EFLAGS))]>, TB;
604 def CMOVG64rm : RI<0x4F, MRMSrcMem,       // if >s, GR64 = [mem64]
605                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
606                    "cmovg{q}\t{$src2, $dst|$dst, $src2}",
607                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
608                                     X86_COND_G, EFLAGS))]>, TB;
609 def CMOVS64rm : RI<0x48, MRMSrcMem,       // if signed, GR64 = [mem64]
610                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
611                    "cmovs{q}\t{$src2, $dst|$dst, $src2}",
612                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
613                                     X86_COND_S, EFLAGS))]>, TB;
614 def CMOVNS64rm: RI<0x49, MRMSrcMem,       // if !signed, GR64 = [mem64]
615                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
616                    "cmovns{q}\t{$src2, $dst|$dst, $src2}",
617                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
618                                     X86_COND_NS, EFLAGS))]>, TB;
619 def CMOVP64rm : RI<0x4A, MRMSrcMem,       // if parity, GR64 = [mem64]
620                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
621                    "cmovp{q}\t{$src2, $dst|$dst, $src2}",
622                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
623                                     X86_COND_P, EFLAGS))]>, TB;
624 def CMOVNP64rm : RI<0x4B, MRMSrcMem,       // if !parity, GR64 = [mem64]
625                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
626                    "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
627                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
628                                      X86_COND_NP, EFLAGS))]>, TB;
629 def CMOVO64rm : RI<0x40, MRMSrcMem,       // if overflow, GR64 = [mem64]
630                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
631                    "cmovo{q}\t{$src2, $dst|$dst, $src2}",
632                    [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
633                                     X86_COND_O, EFLAGS))]>, TB;
634 def CMOVNO64rm : RI<0x41, MRMSrcMem,       // if !overflow, GR64 = [mem64]
635                    (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
636                    "cmovno{q}\t{$src2, $dst|$dst, $src2}",
637                     [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
638                                      X86_COND_NO, EFLAGS))]>, TB;
639 } // Constraints = "$src1 = $dst"
640
641
642 // SetCC instructions.
643 multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
644   let Uses = [EFLAGS] in {
645     def r    : I<opc, MRM0r,  (outs GR8:$dst), (ins),
646                      !strconcat(Mnemonic, "\t$dst"),
647                      [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
648     def m    : I<opc, MRM0m,  (outs), (ins i8mem:$dst),
649                      !strconcat(Mnemonic, "\t$dst"),
650                      [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
651   } // Uses = [EFLAGS]
652 }
653
654 defm SETO  : SETCC<0x90, "seto",  X86_COND_O>;   // is overflow bit set
655 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>;  // is overflow bit not set
656 defm SETB  : SETCC<0x92, "setb",  X86_COND_B>;   // unsigned less than
657 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>;  // unsigned greater or equal
658 defm SETE  : SETCC<0x94, "sete",  X86_COND_E>;   // equal to
659 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>;  // not equal to
660 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>;  // unsigned less than or equal
661 defm SETA  : SETCC<0x97, "seta",  X86_COND_A>;   // unsigned greater than
662 defm SETS  : SETCC<0x98, "sets",  X86_COND_S>;   // is signed bit set
663 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>;  // is not signed
664 defm SETP  : SETCC<0x9A, "setp",  X86_COND_P>;   // is parity bit set
665 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>;  // is parity bit not set
666 defm SETL  : SETCC<0x9C, "setl",  X86_COND_L>;   // signed less than
667 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>;  // signed greater or equal
668 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>;  // signed less than or equal
669 defm SETG  : SETCC<0x9F, "setg",  X86_COND_G>;   // signed greater than
670