1 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 conditional move and set on condition
13 //===----------------------------------------------------------------------===//
16 // SetCC instructions.
17 multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
18 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
20 def rr16 : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
21 !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
23 (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,
25 def rr32 : I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
26 !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
28 (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>,
30 def rr64 :RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
31 !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
33 (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>,
37 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in {
38 def rm16 : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
39 !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
40 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
41 CondNode, EFLAGS))]>, TB, OpSize;
42 def rm32 : I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
43 !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
44 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
45 CondNode, EFLAGS))]>, TB;
46 def rm64 :RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
47 !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
48 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
49 CondNode, EFLAGS))]>, TB;
50 } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
55 defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
58 let Constraints = "$src1 = $dst" in {
61 let Uses = [EFLAGS] in {
63 let Predicates = [HasCMov] in {
64 let isCommutable = 1 in {
65 def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
66 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
67 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
68 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
69 X86_COND_B, EFLAGS))]>,
71 def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
72 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
73 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
74 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
75 X86_COND_B, EFLAGS))]>,
77 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
78 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
79 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
80 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
81 X86_COND_AE, EFLAGS))]>,
83 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
84 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
85 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
86 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
87 X86_COND_AE, EFLAGS))]>,
89 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
90 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
91 "cmove{w}\t{$src2, $dst|$dst, $src2}",
92 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
93 X86_COND_E, EFLAGS))]>,
95 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
96 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
97 "cmove{l}\t{$src2, $dst|$dst, $src2}",
98 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
99 X86_COND_E, EFLAGS))]>,
101 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
102 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
103 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
104 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
105 X86_COND_NE, EFLAGS))]>,
107 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
108 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
109 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
110 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
111 X86_COND_NE, EFLAGS))]>,
113 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
114 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
115 "cmova{w}\t{$src2, $dst|$dst, $src2}",
116 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
117 X86_COND_A, EFLAGS))]>,
119 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
120 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
121 "cmova{l}\t{$src2, $dst|$dst, $src2}",
122 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
123 X86_COND_A, EFLAGS))]>,
125 def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
126 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
127 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
128 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
129 X86_COND_L, EFLAGS))]>,
131 def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
132 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
133 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
134 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
135 X86_COND_L, EFLAGS))]>,
137 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
138 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
139 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
140 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
141 X86_COND_GE, EFLAGS))]>,
143 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
144 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
145 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
146 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
147 X86_COND_GE, EFLAGS))]>,
149 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
150 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
151 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
152 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
153 X86_COND_LE, EFLAGS))]>,
155 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
156 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
157 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
158 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
159 X86_COND_LE, EFLAGS))]>,
161 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
162 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
163 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
164 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
165 X86_COND_G, EFLAGS))]>,
167 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
168 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
169 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
170 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
171 X86_COND_G, EFLAGS))]>,
173 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
174 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
175 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
176 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
177 X86_COND_S, EFLAGS))]>,
179 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
180 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
181 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
182 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
183 X86_COND_S, EFLAGS))]>,
185 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
186 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
187 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
188 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
189 X86_COND_NS, EFLAGS))]>,
191 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
192 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
193 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
194 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
195 X86_COND_NS, EFLAGS))]>,
197 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
198 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
199 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
200 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
201 X86_COND_P, EFLAGS))]>,
203 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
204 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
205 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
206 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
207 X86_COND_P, EFLAGS))]>,
209 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
210 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
211 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
212 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
213 X86_COND_NP, EFLAGS))]>,
215 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
216 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
217 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
218 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
219 X86_COND_NP, EFLAGS))]>,
221 def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16
222 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
223 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
224 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
225 X86_COND_O, EFLAGS))]>,
227 def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32
228 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
229 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
230 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
231 X86_COND_O, EFLAGS))]>,
233 def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16
234 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
235 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
236 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
237 X86_COND_NO, EFLAGS))]>,
239 def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32
240 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
241 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
242 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
243 X86_COND_NO, EFLAGS))]>,
245 } // isCommutable = 1
247 def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
248 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
249 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
250 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
251 X86_COND_B, EFLAGS))]>,
253 def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
254 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
255 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
256 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
257 X86_COND_B, EFLAGS))]>,
259 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
260 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
261 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
262 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
263 X86_COND_AE, EFLAGS))]>,
265 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
266 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
267 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
268 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
269 X86_COND_AE, EFLAGS))]>,
271 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
272 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
273 "cmove{w}\t{$src2, $dst|$dst, $src2}",
274 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
275 X86_COND_E, EFLAGS))]>,
277 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
278 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
279 "cmove{l}\t{$src2, $dst|$dst, $src2}",
280 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
281 X86_COND_E, EFLAGS))]>,
283 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
284 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
285 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
286 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
287 X86_COND_NE, EFLAGS))]>,
289 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
290 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
291 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
292 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
293 X86_COND_NE, EFLAGS))]>,
295 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
296 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
297 "cmova{w}\t{$src2, $dst|$dst, $src2}",
298 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
299 X86_COND_A, EFLAGS))]>,
301 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
302 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
303 "cmova{l}\t{$src2, $dst|$dst, $src2}",
304 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
305 X86_COND_A, EFLAGS))]>,
307 def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
308 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
309 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
310 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
311 X86_COND_L, EFLAGS))]>,
313 def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
314 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
315 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
316 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
317 X86_COND_L, EFLAGS))]>,
319 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
320 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
321 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
322 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
323 X86_COND_GE, EFLAGS))]>,
325 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
326 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
327 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
328 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
329 X86_COND_GE, EFLAGS))]>,
331 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
332 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
333 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
334 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
335 X86_COND_LE, EFLAGS))]>,
337 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
338 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
339 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
340 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
341 X86_COND_LE, EFLAGS))]>,
343 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
344 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
345 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
346 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
347 X86_COND_G, EFLAGS))]>,
349 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
350 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
351 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
352 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
353 X86_COND_G, EFLAGS))]>,
355 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
356 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
357 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
358 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
359 X86_COND_S, EFLAGS))]>,
361 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
362 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
363 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
364 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
365 X86_COND_S, EFLAGS))]>,
367 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
368 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
369 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
370 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
371 X86_COND_NS, EFLAGS))]>,
373 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
374 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
375 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
376 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
377 X86_COND_NS, EFLAGS))]>,
379 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
380 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
381 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
382 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
383 X86_COND_P, EFLAGS))]>,
385 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
386 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
387 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
388 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
389 X86_COND_P, EFLAGS))]>,
391 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
392 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
393 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
394 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
395 X86_COND_NP, EFLAGS))]>,
397 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
398 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
399 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
400 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
401 X86_COND_NP, EFLAGS))]>,
403 def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16]
404 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
405 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
406 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
407 X86_COND_O, EFLAGS))]>,
409 def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32]
410 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
411 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
412 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
413 X86_COND_O, EFLAGS))]>,
415 def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16]
416 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
417 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
418 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
419 X86_COND_NO, EFLAGS))]>,
421 def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
422 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
423 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
424 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
425 X86_COND_NO, EFLAGS))]>,
427 } // Predicates = [HasCMov]
429 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
430 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
431 // however that requires promoting the operands, and can induce additional
432 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
433 // clobber EFLAGS, because if one of the operands is zero, the expansion
434 // could involve an xor.
435 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
436 def CMOV_GR8 : I<0, Pseudo,
437 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
439 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
440 imm:$cond, EFLAGS))]>;
442 let Predicates = [NoCMov] in {
443 def CMOV_GR32 : I<0, Pseudo,
444 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
445 "#CMOV_GR32* PSEUDO!",
447 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
448 def CMOV_GR16 : I<0, Pseudo,
449 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
450 "#CMOV_GR16* PSEUDO!",
452 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
453 def CMOV_RFP32 : I<0, Pseudo,
455 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
456 "#CMOV_RFP32 PSEUDO!",
458 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
460 def CMOV_RFP64 : I<0, Pseudo,
462 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
463 "#CMOV_RFP64 PSEUDO!",
465 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
467 def CMOV_RFP80 : I<0, Pseudo,
469 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
470 "#CMOV_RFP80 PSEUDO!",
472 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
474 } // Predicates = [NoCMov]
475 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
478 } // Constraints = "$src1 = $dst" in
482 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
483 let isCommutable = 1 in {
484 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
485 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
486 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
487 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
488 X86_COND_B, EFLAGS))]>, TB;
489 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
490 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
491 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
492 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
493 X86_COND_AE, EFLAGS))]>, TB;
494 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
495 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
496 "cmove{q}\t{$src2, $dst|$dst, $src2}",
497 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
498 X86_COND_E, EFLAGS))]>, TB;
499 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
500 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
501 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
502 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
503 X86_COND_NE, EFLAGS))]>, TB;
504 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
505 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
506 "cmova{q}\t{$src2, $dst|$dst, $src2}",
507 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
508 X86_COND_A, EFLAGS))]>, TB;
509 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
510 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
511 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
512 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
513 X86_COND_L, EFLAGS))]>, TB;
514 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
515 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
516 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
517 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
518 X86_COND_GE, EFLAGS))]>, TB;
519 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
520 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
521 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
522 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
523 X86_COND_LE, EFLAGS))]>, TB;
524 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
525 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
526 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
527 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
528 X86_COND_G, EFLAGS))]>, TB;
529 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
530 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
531 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
532 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
533 X86_COND_S, EFLAGS))]>, TB;
534 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
535 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
536 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
537 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
538 X86_COND_NS, EFLAGS))]>, TB;
539 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
540 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
541 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
542 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
543 X86_COND_P, EFLAGS))]>, TB;
544 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
545 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
546 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
547 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
548 X86_COND_NP, EFLAGS))]>, TB;
549 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
550 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
551 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
552 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
553 X86_COND_O, EFLAGS))]>, TB;
554 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
555 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
556 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
557 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
558 X86_COND_NO, EFLAGS))]>, TB;
559 } // isCommutable = 1
561 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
562 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
563 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
564 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
565 X86_COND_B, EFLAGS))]>, TB;
566 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
567 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
568 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
569 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
570 X86_COND_AE, EFLAGS))]>, TB;
571 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
572 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
573 "cmove{q}\t{$src2, $dst|$dst, $src2}",
574 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
575 X86_COND_E, EFLAGS))]>, TB;
576 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
577 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
578 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
579 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
580 X86_COND_NE, EFLAGS))]>, TB;
581 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
582 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
583 "cmova{q}\t{$src2, $dst|$dst, $src2}",
584 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
585 X86_COND_A, EFLAGS))]>, TB;
586 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
587 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
588 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
589 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
590 X86_COND_L, EFLAGS))]>, TB;
591 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
592 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
593 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
594 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
595 X86_COND_GE, EFLAGS))]>, TB;
596 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
597 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
598 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
599 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
600 X86_COND_LE, EFLAGS))]>, TB;
601 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
602 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
603 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
604 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
605 X86_COND_G, EFLAGS))]>, TB;
606 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
607 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
608 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
609 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
610 X86_COND_S, EFLAGS))]>, TB;
611 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
612 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
613 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
614 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
615 X86_COND_NS, EFLAGS))]>, TB;
616 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
617 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
618 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
619 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
620 X86_COND_P, EFLAGS))]>, TB;
621 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
622 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
623 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
624 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
625 X86_COND_NP, EFLAGS))]>, TB;
626 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
627 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
628 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
629 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
630 X86_COND_O, EFLAGS))]>, TB;
631 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
632 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
633 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
634 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
635 X86_COND_NO, EFLAGS))]>, TB;
636 } // Constraints = "$src1 = $dst"
639 // SetCC instructions.
640 multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
641 let Uses = [EFLAGS] in {
642 def r : I<opc, MRM0r, (outs GR8:$dst), (ins),
643 !strconcat(Mnemonic, "\t$dst"),
644 [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
645 def m : I<opc, MRM0m, (outs), (ins i8mem:$dst),
646 !strconcat(Mnemonic, "\t$dst"),
647 [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
651 defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set
652 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set
653 defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than
654 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal
655 defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to
656 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to
657 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal
658 defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than
659 defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set
660 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed
661 defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set
662 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set
663 defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than
664 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal
665 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal
666 defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than