1 //====- X86InstrX86-64.td - Describe the X86 Instruction Set ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions...
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let NumMIOperands = 4;
28 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
31 def lea64_32mem : Operand<i32> {
32 let PrintMethod = "printlea64_32mem";
33 let NumMIOperands = 4;
34 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
37 //===----------------------------------------------------------------------===//
38 // Complex Pattern Definitions...
40 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
41 [add, mul, shl, or, frameindex, X86Wrapper]>;
43 //===----------------------------------------------------------------------===//
44 // Instruction templates...
47 class RI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
48 : I<o, F, ops, asm, pattern>, REX_W;
49 class RIi8 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
50 : Ii8<o, F, ops, asm, pattern>, REX_W;
51 class RIi32 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
52 : Ii32<o, F, ops, asm, pattern>, REX_W;
54 class RIi64<bits<8> o, Format f, dag ops, string asm, list<dag> pattern>
55 : X86Inst<o, f, Imm64, ops, asm>, REX_W {
56 let Pattern = pattern;
60 class RSSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
61 : SSI<o, F, ops, asm, pattern>, REX_W;
62 class RSDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
63 : SDI<o, F, ops, asm, pattern>, REX_W;
65 //===----------------------------------------------------------------------===//
66 // Pattern fragments...
69 def i64immSExt32 : PatLeaf<(i64 imm), [{
70 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
71 // sign extended field.
72 return (int64_t)N->getValue() == (int32_t)N->getValue();
75 def i64immZExt32 : PatLeaf<(i64 imm), [{
76 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
77 // unsignedsign extended field.
78 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
81 def i64immSExt8 : PatLeaf<(i64 imm), [{
82 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
83 // sign extended field.
84 return (int64_t)N->getValue() == (int8_t)N->getValue();
87 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextload node:$ptr, i1))>;
88 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextload node:$ptr, i8))>;
89 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextload node:$ptr, i16))>;
90 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextload node:$ptr, i32))>;
92 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextload node:$ptr, i1))>;
93 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextload node:$ptr, i8))>;
94 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextload node:$ptr, i16))>;
95 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextload node:$ptr, i32))>;
97 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extload node:$ptr, i1))>;
98 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extload node:$ptr, i8))>;
99 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extload node:$ptr, i16))>;
100 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extload node:$ptr, i32))>;
102 //===----------------------------------------------------------------------===//
103 // Instruction list...
106 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (ops GR64:$dst),
107 "#IMPLICIT_DEF $dst",
108 [(set GR64:$dst, (undef))]>;
110 //===----------------------------------------------------------------------===//
111 // Call Instructions...
113 let isCall = 1, noResults = 1 in
114 // All calls clobber the non-callee saved registers...
115 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
116 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
117 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
118 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15] in {
119 def CALL64pcrel32 : I<0xE8, RawFrm, (ops i64imm:$dst, variable_ops),
120 "call ${dst:call}", []>;
121 def CALL64r : I<0xFF, MRM2r, (ops GR64:$dst, variable_ops),
122 "call {*}$dst", [(X86call GR64:$dst)]>;
123 def CALL64m : I<0xFF, MRM2m, (ops i64mem:$dst, variable_ops),
128 let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in {
129 def JMP64r : I<0xFF, MRM4r, (ops GR64:$dst), "jmp{q} {*}$dst",
130 [(brind GR64:$dst)]>;
131 def JMP64m : I<0xFF, MRM4m, (ops i64mem:$dst), "jmp{q} {*}$dst",
132 [(brind (loadi64 addr:$dst))]>;
135 //===----------------------------------------------------------------------===//
136 // Miscellaneous Instructions...
138 def LEAVE64 : I<0xC9, RawFrm,
139 (ops), "leave", []>, Imp<[RBP,RSP],[RBP,RSP]>;
140 def POP64r : I<0x58, AddRegFrm,
141 (ops GR64:$reg), "pop{q} $reg", []>, Imp<[RSP],[RSP]>;
143 def LEA64_32r : I<0x8D, MRMSrcMem,
144 (ops GR32:$dst, lea64_32mem:$src),
145 "lea{l} {$src|$dst}, {$dst|$src}",
146 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
148 def LEA64r : RI<0x8D, MRMSrcMem, (ops GR64:$dst, lea64mem:$src),
149 "lea{q} {$src|$dst}, {$dst|$src}",
150 [(set GR64:$dst, lea64addr:$src)]>;
152 let isTwoAddress = 1 in
153 def BSWAP64r : RI<0xC8, AddRegFrm, (ops GR64:$dst, GR64:$src),
155 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
157 def XCHG64rr : RI<0x87, MRMDestReg, (ops GR64:$src1, GR64:$src2),
158 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
159 def XCHG64mr : RI<0x87, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
160 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
161 def XCHG64rm : RI<0x87, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
162 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
165 def REP_MOVSQ : RI<0xA5, RawFrm, (ops), "{rep;movsq|rep movsq}",
166 [(X86rep_movs i64)]>,
167 Imp<[RCX,RDI,RSI], [RCX,RDI,RSI]>, REP;
168 def REP_STOSQ : RI<0xAB, RawFrm, (ops), "{rep;stosq|rep stosq}",
169 [(X86rep_stos i64)]>,
170 Imp<[RAX,RCX,RDI], [RCX,RDI]>, REP;
172 //===----------------------------------------------------------------------===//
173 // Move Instructions...
176 def MOV64rr : RI<0x89, MRMDestReg, (ops GR64:$dst, GR64:$src),
177 "mov{q} {$src, $dst|$dst, $src}", []>;
179 def MOV64ri : RIi64<0xB8, AddRegFrm, (ops GR64:$dst, i64imm:$src),
180 "movabs{q} {$src, $dst|$dst, $src}",
181 [(set GR64:$dst, imm:$src)]>;
182 def MOV64ri32 : RIi32<0xC7, MRM0r, (ops GR64:$dst, i64i32imm:$src),
183 "mov{q} {$src, $dst|$dst, $src}",
184 [(set GR64:$dst, i64immSExt32:$src)]>;
186 def MOV64rm : RI<0x8B, MRMSrcMem, (ops GR64:$dst, i64mem:$src),
187 "mov{q} {$src, $dst|$dst, $src}",
188 [(set GR64:$dst, (load addr:$src))]>;
190 def MOV64mr : RI<0x89, MRMDestMem, (ops i64mem:$dst, GR64:$src),
191 "mov{q} {$src, $dst|$dst, $src}",
192 [(store GR64:$src, addr:$dst)]>;
193 def MOV64mi32 : RIi32<0xC7, MRM0m, (ops i64mem:$dst, i64i32imm:$src),
194 "mov{q} {$src, $dst|$dst, $src}",
195 [(store i64immSExt32:$src, addr:$dst)]>;
197 // Sign/Zero extenders
199 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
200 "movs{bq|x} {$src, $dst|$dst, $src}",
201 [(set GR64:$dst, (sext GR8:$src))]>, TB;
202 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
203 "movs{bq|x} {$src, $dst|$dst, $src}",
204 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
205 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (ops GR64:$dst, GR16:$src),
206 "movs{wq|x} {$src, $dst|$dst, $src}",
207 [(set GR64:$dst, (sext GR16:$src))]>, TB;
208 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
209 "movs{wq|x} {$src, $dst|$dst, $src}",
210 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
211 def MOVSX64rr32: RI<0x63, MRMSrcReg, (ops GR64:$dst, GR32:$src),
212 "movs{lq|xd} {$src, $dst|$dst, $src}",
213 [(set GR64:$dst, (sext GR32:$src))]>;
214 def MOVSX64rm32: RI<0x63, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
215 "movs{lq|xd} {$src, $dst|$dst, $src}",
216 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
218 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
219 "movz{bq|x} {$src, $dst|$dst, $src}",
220 [(set GR64:$dst, (zext GR8:$src))]>, TB;
221 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
222 "movz{bq|x} {$src, $dst|$dst, $src}",
223 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
224 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (ops GR64:$dst, GR16:$src),
225 "movz{wq|x} {$src, $dst|$dst, $src}",
226 [(set GR64:$dst, (zext GR16:$src))]>, TB;
227 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
228 "movz{wq|x} {$src, $dst|$dst, $src}",
229 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
231 def CDQE : RI<0x98, RawFrm, (ops),
232 "{cltq|cdqe}", []>, Imp<[EAX],[RAX]>; // RAX = signext(EAX)
234 def CQO : RI<0x99, RawFrm, (ops),
235 "{cqto|cqo}", []>, Imp<[RAX],[RAX,RDX]>; // RDX:RAX = signext(RAX)
237 //===----------------------------------------------------------------------===//
238 // Arithmetic Instructions...
241 let isTwoAddress = 1 in {
242 let isConvertibleToThreeAddress = 1 in {
243 let isCommutable = 1 in
244 def ADD64rr : RI<0x01, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
245 "add{q} {$src2, $dst|$dst, $src2}",
246 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
248 def ADD64ri32 : RIi32<0x81, MRM0r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
249 "add{q} {$src2, $dst|$dst, $src2}",
250 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
251 def ADD64ri8 : RIi8<0x83, MRM0r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
252 "add{q} {$src2, $dst|$dst, $src2}",
253 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
254 } // isConvertibleToThreeAddress
256 def ADD64rm : RI<0x03, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
257 "add{q} {$src2, $dst|$dst, $src2}",
258 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
261 def ADD64mr : RI<0x01, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
262 "add{q} {$src2, $dst|$dst, $src2}",
263 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
264 def ADD64mi32 : RIi32<0x81, MRM0m, (ops i64mem:$dst, i64i32imm :$src2),
265 "add{q} {$src2, $dst|$dst, $src2}",
266 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
267 def ADD64mi8 : RIi8<0x83, MRM0m, (ops i64mem:$dst, i64i8imm :$src2),
268 "add{q} {$src2, $dst|$dst, $src2}",
269 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
271 let isTwoAddress = 1 in {
272 let isCommutable = 1 in
273 def ADC64rr : RI<0x11, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
274 "adc{q} {$src2, $dst|$dst, $src2}",
275 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
277 def ADC64rm : RI<0x13, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
278 "adc{q} {$src2, $dst|$dst, $src2}",
279 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
281 def ADC64ri32 : RIi32<0x81, MRM2r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
282 "adc{q} {$src2, $dst|$dst, $src2}",
283 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
284 def ADC64ri8 : RIi8<0x83, MRM2r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
285 "adc{q} {$src2, $dst|$dst, $src2}",
286 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
289 def ADC64mr : RI<0x11, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
290 "adc{q} {$src2, $dst|$dst, $src2}",
291 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
292 def ADC64mi32 : RIi32<0x81, MRM2m, (ops i64mem:$dst, i64i32imm:$src2),
293 "adc{q} {$src2, $dst|$dst, $src2}",
294 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
295 def ADC64mi8 : RIi8<0x83, MRM2m, (ops i64mem:$dst, i64i8imm :$src2),
296 "adc{q} {$src2, $dst|$dst, $src2}",
297 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
299 let isTwoAddress = 1 in {
300 def SUB64rr : RI<0x29, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
301 "sub{q} {$src2, $dst|$dst, $src2}",
302 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
304 def SUB64rm : RI<0x2B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
305 "sub{q} {$src2, $dst|$dst, $src2}",
306 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
308 def SUB64ri32 : RIi32<0x81, MRM5r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
309 "sub{q} {$src2, $dst|$dst, $src2}",
310 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
311 def SUB64ri8 : RIi8<0x83, MRM5r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
312 "sub{q} {$src2, $dst|$dst, $src2}",
313 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
316 def SUB64mr : RI<0x29, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
317 "sub{q} {$src2, $dst|$dst, $src2}",
318 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
319 def SUB64mi32 : RIi32<0x81, MRM5m, (ops i64mem:$dst, i64i32imm:$src2),
320 "sub{q} {$src2, $dst|$dst, $src2}",
321 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
322 def SUB64mi8 : RIi8<0x83, MRM5m, (ops i64mem:$dst, i64i8imm :$src2),
323 "sub{q} {$src2, $dst|$dst, $src2}",
324 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
326 let isTwoAddress = 1 in {
327 def SBB64rr : RI<0x19, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
328 "sbb{q} {$src2, $dst|$dst, $src2}",
329 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
331 def SBB64rm : RI<0x1B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
332 "sbb{q} {$src2, $dst|$dst, $src2}",
333 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
335 def SBB64ri32 : RIi32<0x81, MRM3r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
336 "sbb{q} {$src2, $dst|$dst, $src2}",
337 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
338 def SBB64ri8 : RIi8<0x83, MRM3r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
339 "sbb{q} {$src2, $dst|$dst, $src2}",
340 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
343 def SBB64mr : RI<0x19, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
344 "sbb{q} {$src2, $dst|$dst, $src2}",
345 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
346 def SBB64mi32 : RIi32<0x81, MRM3m, (ops i64mem:$dst, i64i32imm:$src2),
347 "sbb{q} {$src2, $dst|$dst, $src2}",
348 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
349 def SBB64mi8 : RIi8<0x83, MRM3m, (ops i64mem:$dst, i64i8imm :$src2),
350 "sbb{q} {$src2, $dst|$dst, $src2}",
351 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
353 // Unsigned multiplication
354 def MUL64r : RI<0xF7, MRM4r, (ops GR64:$src),
356 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
357 def MUL64m : RI<0xF7, MRM4m, (ops i64mem:$src),
359 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
361 // Signed multiplication
362 def IMUL64r : RI<0xF7, MRM5r, (ops GR64:$src),
364 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
365 def IMUL64m : RI<0xF7, MRM5m, (ops i64mem:$src),
367 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
369 let isTwoAddress = 1 in {
370 let isCommutable = 1 in
371 def IMUL64rr : RI<0xAF, MRMSrcReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
372 "imul{q} {$src2, $dst|$dst, $src2}",
373 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
375 def IMUL64rm : RI<0xAF, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
376 "imul{q} {$src2, $dst|$dst, $src2}",
377 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
380 // Suprisingly enough, these are not two address instructions!
381 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
382 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
383 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
384 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
385 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
386 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
387 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
388 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
389 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
390 (ops GR64:$dst, i64mem:$src1, i64i32imm:$src2),
391 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
392 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
393 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
394 (ops GR64:$dst, i64mem:$src1, i64i8imm: $src2),
395 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
396 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
398 // Unsigned division / remainder
399 def DIV64r : RI<0xF7, MRM6r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
400 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
401 def DIV64m : RI<0xF7, MRM6m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
402 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
404 // Signed division / remainder
405 def IDIV64r: RI<0xF7, MRM7r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
406 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
407 def IDIV64m: RI<0xF7, MRM7m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
408 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
410 // Unary instructions
411 let CodeSize = 2 in {
412 let isTwoAddress = 1 in
413 def NEG64r : RI<0xF7, MRM3r, (ops GR64:$dst, GR64:$src), "neg{q} $dst",
414 [(set GR64:$dst, (ineg GR64:$src))]>;
415 def NEG64m : RI<0xF7, MRM3m, (ops i64mem:$dst), "neg{q} $dst",
416 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
418 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
419 def INC64r : RI<0xFF, MRM0r, (ops GR64:$dst, GR64:$src), "inc{q} $dst",
420 [(set GR64:$dst, (add GR64:$src, 1))]>;
421 def INC64m : RI<0xFF, MRM0m, (ops i64mem:$dst), "inc{q} $dst",
422 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
424 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
425 def DEC64r : RI<0xFF, MRM1r, (ops GR64:$dst, GR64:$src), "dec{q} $dst",
426 [(set GR64:$dst, (add GR64:$src, -1))]>;
427 def DEC64m : RI<0xFF, MRM1m, (ops i64mem:$dst), "dec{q} $dst",
428 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
430 // In 64-bit mode, single byte INC and DEC cannot be encoded.
431 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
432 // Can transform into LEA.
433 def INC64_16r : I<0xFF, MRM0r, (ops GR16:$dst, GR16:$src), "inc{w} $dst",
434 [(set GR16:$dst, (add GR16:$src, 1))]>,
435 OpSize, Requires<[In64BitMode]>;
436 def INC64_32r : I<0xFF, MRM0r, (ops GR32:$dst, GR32:$src), "inc{l} $dst",
437 [(set GR32:$dst, (add GR32:$src, 1))]>,
438 Requires<[In64BitMode]>;
439 def DEC64_16r : I<0xFF, MRM1r, (ops GR16:$dst, GR16:$src), "dec{w} $dst",
440 [(set GR16:$dst, (add GR16:$src, -1))]>,
441 OpSize, Requires<[In64BitMode]>;
442 def DEC64_32r : I<0xFF, MRM1r, (ops GR32:$dst, GR32:$src), "dec{l} $dst",
443 [(set GR32:$dst, (add GR32:$src, -1))]>,
444 Requires<[In64BitMode]>;
445 } // isConvertibleToThreeAddress
449 // Shift instructions
450 let isTwoAddress = 1 in {
451 def SHL64rCL : RI<0xD3, MRM4r, (ops GR64:$dst, GR64:$src),
452 "shl{q} {%cl, $dst|$dst, %CL}",
453 [(set GR64:$dst, (shl GR64:$src, CL))]>,
455 def SHL64ri : RIi8<0xC1, MRM4r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
456 "shl{q} {$src2, $dst|$dst, $src2}",
457 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
458 def SHL64r1 : RI<0xD1, MRM4r, (ops GR64:$dst, GR64:$src1),
462 def SHL64mCL : RI<0xD3, MRM4m, (ops i64mem:$dst),
463 "shl{q} {%cl, $dst|$dst, %CL}",
464 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
466 def SHL64mi : RIi8<0xC1, MRM4m, (ops i64mem:$dst, i8imm:$src),
467 "shl{q} {$src, $dst|$dst, $src}",
468 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
469 def SHL64m1 : RI<0xC1, MRM4m, (ops i64mem:$dst),
471 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
473 let isTwoAddress = 1 in {
474 def SHR64rCL : RI<0xD3, MRM5r, (ops GR64:$dst, GR64:$src),
475 "shr{q} {%cl, $dst|$dst, %CL}",
476 [(set GR64:$dst, (srl GR64:$src, CL))]>,
478 def SHR64ri : RIi8<0xC1, MRM5r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
479 "shr{q} {$src2, $dst|$dst, $src2}",
480 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
481 def SHR64r1 : RI<0xD1, MRM5r, (ops GR64:$dst, GR64:$src1),
483 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
486 def SHR64mCL : RI<0xD3, MRM5m, (ops i64mem:$dst),
487 "shr{q} {%cl, $dst|$dst, %CL}",
488 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
490 def SHR64mi : RIi8<0xC1, MRM5m, (ops i64mem:$dst, i8imm:$src),
491 "shr{q} {$src, $dst|$dst, $src}",
492 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
493 def SHR64m1 : RI<0xC1, MRM5m, (ops i64mem:$dst),
495 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
497 let isTwoAddress = 1 in {
498 def SAR64rCL : RI<0xD3, MRM7r, (ops GR64:$dst, GR64:$src),
499 "sar{q} {%cl, $dst|$dst, %CL}",
500 [(set GR64:$dst, (sra GR64:$src, CL))]>, Imp<[CL],[]>;
501 def SAR64ri : RIi8<0xC1, MRM7r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
502 "sar{q} {$src2, $dst|$dst, $src2}",
503 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
504 def SAR64r1 : RI<0xD1, MRM7r, (ops GR64:$dst, GR64:$src1),
506 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
509 def SAR64mCL : RI<0xD3, MRM7m, (ops i64mem:$dst),
510 "sar{q} {%cl, $dst|$dst, %CL}",
511 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
513 def SAR64mi : RIi8<0xC1, MRM7m, (ops i64mem:$dst, i8imm:$src),
514 "sar{q} {$src, $dst|$dst, $src}",
515 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
516 def SAR64m1 : RI<0xC1, MRM7m, (ops i64mem:$dst),
518 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
520 // Rotate instructions
521 let isTwoAddress = 1 in {
522 def ROL64rCL : RI<0xD3, MRM0r, (ops GR64:$dst, GR64:$src),
523 "rol{q} {%cl, $dst|$dst, %CL}",
524 [(set GR64:$dst, (rotl GR64:$src, CL))]>, Imp<[CL],[]>;
525 def ROL64ri : RIi8<0xC1, MRM0r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
526 "rol{q} {$src2, $dst|$dst, $src2}",
527 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
528 def ROL64r1 : RI<0xC1, MRM0r, (ops GR64:$dst, GR64:$src1),
530 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
533 def ROL64mCL : I<0xD3, MRM0m, (ops i64mem:$dst),
534 "rol{q} {%cl, $dst|$dst, %CL}",
535 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
537 def ROL64mi : RIi8<0xC1, MRM0m, (ops i64mem:$dst, i8imm:$src),
538 "rol{q} {$src, $dst|$dst, $src}",
539 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
540 def ROL64m1 : RI<0xD1, MRM0m, (ops i64mem:$dst),
542 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
544 let isTwoAddress = 1 in {
545 def ROR64rCL : RI<0xD3, MRM1r, (ops GR64:$dst, GR64:$src),
546 "ror{q} {%cl, $dst|$dst, %CL}",
547 [(set GR64:$dst, (rotr GR64:$src, CL))]>, Imp<[CL],[]>;
548 def ROR64ri : RIi8<0xC1, MRM1r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
549 "ror{q} {$src2, $dst|$dst, $src2}",
550 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
551 def ROR64r1 : RI<0xC1, MRM1r, (ops GR64:$dst, GR64:$src1),
553 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
556 def ROR64mCL : RI<0xD3, MRM1m, (ops i64mem:$dst),
557 "ror{q} {%cl, $dst|$dst, %CL}",
558 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
560 def ROR64mi : RIi8<0xC1, MRM1m, (ops i64mem:$dst, i8imm:$src),
561 "ror{q} {$src, $dst|$dst, $src}",
562 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
563 def ROR64m1 : RI<0xD1, MRM1m, (ops i64mem:$dst),
565 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
567 // Double shift instructions (generalizations of rotate)
568 let isTwoAddress = 1 in {
569 def SHLD64rrCL : RI<0xA5, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
570 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
572 def SHRD64rrCL : RI<0xAD, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
573 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
576 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
577 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
578 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
579 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
581 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
582 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
583 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
588 // Temporary hack: there is no patterns associated with these instructions
589 // so we have to tell tblgen that these do not produce results.
590 let noResults = 1 in {
591 def SHLD64mrCL : RI<0xA5, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
592 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
594 def SHRD64mrCL : RI<0xAD, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
595 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
597 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
598 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
599 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
601 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
602 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
603 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
607 //===----------------------------------------------------------------------===//
608 // Logical Instructions...
611 let isTwoAddress = 1 in
612 def NOT64r : RI<0xF7, MRM2r, (ops GR64:$dst, GR64:$src), "not{q} $dst",
613 [(set GR64:$dst, (not GR64:$src))]>;
614 def NOT64m : RI<0xF7, MRM2m, (ops i64mem:$dst), "not{q} $dst",
615 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
617 let isTwoAddress = 1 in {
618 let isCommutable = 1 in
619 def AND64rr : RI<0x21, MRMDestReg,
620 (ops GR64:$dst, GR64:$src1, GR64:$src2),
621 "and{q} {$src2, $dst|$dst, $src2}",
622 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
623 def AND64rm : RI<0x23, MRMSrcMem,
624 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
625 "and{q} {$src2, $dst|$dst, $src2}",
626 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
627 def AND64ri32 : RIi32<0x81, MRM4r,
628 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
629 "and{q} {$src2, $dst|$dst, $src2}",
630 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
631 def AND64ri8 : RIi8<0x83, MRM4r,
632 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
633 "and{q} {$src2, $dst|$dst, $src2}",
634 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
637 def AND64mr : RI<0x21, MRMDestMem,
638 (ops i64mem:$dst, GR64:$src),
639 "and{q} {$src, $dst|$dst, $src}",
640 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
641 def AND64mi32 : RIi32<0x81, MRM4m,
642 (ops i64mem:$dst, i64i32imm:$src),
643 "and{q} {$src, $dst|$dst, $src}",
644 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
645 def AND64mi8 : RIi8<0x83, MRM4m,
646 (ops i64mem:$dst, i64i8imm :$src),
647 "and{q} {$src, $dst|$dst, $src}",
648 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
650 let isTwoAddress = 1 in {
651 let isCommutable = 1 in
652 def OR64rr : RI<0x09, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
653 "or{q} {$src2, $dst|$dst, $src2}",
654 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
655 def OR64rm : RI<0x0B, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
656 "or{q} {$src2, $dst|$dst, $src2}",
657 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
658 def OR64ri32 : RIi32<0x81, MRM1r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
659 "or{q} {$src2, $dst|$dst, $src2}",
660 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
661 def OR64ri8 : RIi8<0x83, MRM1r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
662 "or{q} {$src2, $dst|$dst, $src2}",
663 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
666 def OR64mr : RI<0x09, MRMDestMem, (ops i64mem:$dst, GR64:$src),
667 "or{q} {$src, $dst|$dst, $src}",
668 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
669 def OR64mi32 : RIi32<0x81, MRM1m, (ops i64mem:$dst, i64i32imm:$src),
670 "or{q} {$src, $dst|$dst, $src}",
671 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
672 def OR64mi8 : RIi8<0x83, MRM1m, (ops i64mem:$dst, i64i8imm:$src),
673 "or{q} {$src, $dst|$dst, $src}",
674 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
676 let isTwoAddress = 1 in {
677 let isCommutable = 1 in
678 def XOR64rr : RI<0x31, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
679 "xor{q} {$src2, $dst|$dst, $src2}",
680 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
681 def XOR64rm : RI<0x33, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
682 "xor{q} {$src2, $dst|$dst, $src2}",
683 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
684 def XOR64ri32 : RIi32<0x81, MRM6r,
685 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
686 "xor{q} {$src2, $dst|$dst, $src2}",
687 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
688 def XOR64ri8 : RIi8<0x83, MRM6r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
689 "xor{q} {$src2, $dst|$dst, $src2}",
690 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
693 def XOR64mr : RI<0x31, MRMDestMem, (ops i64mem:$dst, GR64:$src),
694 "xor{q} {$src, $dst|$dst, $src}",
695 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
696 def XOR64mi32 : RIi32<0x81, MRM6m, (ops i64mem:$dst, i64i32imm:$src),
697 "xor{q} {$src, $dst|$dst, $src}",
698 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
699 def XOR64mi8 : RIi8<0x83, MRM6m, (ops i64mem:$dst, i64i8imm :$src),
700 "xor{q} {$src, $dst|$dst, $src}",
701 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
703 //===----------------------------------------------------------------------===//
704 // Comparison Instructions...
707 // Integer comparison
708 let isCommutable = 1 in
709 def TEST64rr : RI<0x85, MRMDestReg, (ops GR64:$src1, GR64:$src2),
710 "test{q} {$src2, $src1|$src1, $src2}",
711 [(X86cmp (and GR64:$src1, GR64:$src2), 0)]>;
712 def TEST64mr : RI<0x85, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
713 "test{q} {$src2, $src1|$src1, $src2}",
714 [/*(X86cmp (and (loadi64 addr:$src1), GR64:$src2), 0)*/]>;
715 def TEST64rm : RI<0x85, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
716 "test{q} {$src2, $src1|$src1, $src2}",
717 [/*(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0)*/]>;
718 def TEST64ri32 : RIi32<0xF7, MRM0r, (ops GR64:$src1, i64i32imm:$src2),
719 "test{q} {$src2, $src1|$src1, $src2}",
720 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0)]>;
721 def TEST64mi32 : RIi32<0xF7, MRM0m, (ops i64mem:$src1, i64i32imm:$src2),
722 "test{q} {$src2, $src1|$src1, $src2}",
723 [/*(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2),
726 def CMP64rr : RI<0x39, MRMDestReg, (ops GR64:$src1, GR64:$src2),
727 "cmp{q} {$src2, $src1|$src1, $src2}",
728 [(X86cmp GR64:$src1, GR64:$src2)]>;
729 def CMP64mr : RI<0x39, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
730 "cmp{q} {$src2, $src1|$src1, $src2}",
731 [(X86cmp (loadi64 addr:$src1), GR64:$src2)]>;
732 def CMP64rm : RI<0x3B, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
733 "cmp{q} {$src2, $src1|$src1, $src2}",
734 [(X86cmp GR64:$src1, (loadi64 addr:$src2))]>;
735 def CMP64ri32 : RIi32<0x81, MRM7r, (ops GR64:$src1, i64i32imm:$src2),
736 "cmp{q} {$src2, $src1|$src1, $src2}",
737 [(X86cmp GR64:$src1, i64immSExt32:$src2)]>;
738 def CMP64mi32 : RIi32<0x81, MRM7m, (ops i64mem:$src1, i64i32imm:$src2),
739 "cmp{q} {$src2, $src1|$src1, $src2}",
740 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2)]>;
741 def CMP64mi8 : RIi8<0x83, MRM7m, (ops i64mem:$src1, i64i8imm:$src2),
742 "cmp{q} {$src2, $src1|$src1, $src2}",
743 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2)]>;
744 def CMP64ri8 : RIi8<0x83, MRM7r, (ops GR64:$src1, i64i8imm:$src2),
745 "cmp{q} {$src2, $src1|$src1, $src2}",
746 [(X86cmp GR64:$src1, i64immSExt8:$src2)]>;
749 let isTwoAddress = 1 in {
750 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
751 (ops GR64:$dst, GR64:$src1, GR64:$src2),
752 "cmovb {$src2, $dst|$dst, $src2}",
753 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
755 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
756 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
757 "cmovb {$src2, $dst|$dst, $src2}",
758 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
760 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
761 (ops GR64:$dst, GR64:$src1, GR64:$src2),
762 "cmovae {$src2, $dst|$dst, $src2}",
763 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
765 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
766 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
767 "cmovae {$src2, $dst|$dst, $src2}",
768 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
770 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
771 (ops GR64:$dst, GR64:$src1, GR64:$src2),
772 "cmove {$src2, $dst|$dst, $src2}",
773 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
775 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
776 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
777 "cmove {$src2, $dst|$dst, $src2}",
778 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
780 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
781 (ops GR64:$dst, GR64:$src1, GR64:$src2),
782 "cmovne {$src2, $dst|$dst, $src2}",
783 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
785 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
786 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
787 "cmovne {$src2, $dst|$dst, $src2}",
788 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
790 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
791 (ops GR64:$dst, GR64:$src1, GR64:$src2),
792 "cmovbe {$src2, $dst|$dst, $src2}",
793 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
795 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
796 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
797 "cmovbe {$src2, $dst|$dst, $src2}",
798 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
800 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
801 (ops GR64:$dst, GR64:$src1, GR64:$src2),
802 "cmova {$src2, $dst|$dst, $src2}",
803 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
805 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
806 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
807 "cmova {$src2, $dst|$dst, $src2}",
808 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
810 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
811 (ops GR64:$dst, GR64:$src1, GR64:$src2),
812 "cmovl {$src2, $dst|$dst, $src2}",
813 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
815 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
816 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
817 "cmovl {$src2, $dst|$dst, $src2}",
818 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
820 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
821 (ops GR64:$dst, GR64:$src1, GR64:$src2),
822 "cmovge {$src2, $dst|$dst, $src2}",
823 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
825 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
826 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
827 "cmovge {$src2, $dst|$dst, $src2}",
828 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
830 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
831 (ops GR64:$dst, GR64:$src1, GR64:$src2),
832 "cmovle {$src2, $dst|$dst, $src2}",
833 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
835 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
836 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
837 "cmovle {$src2, $dst|$dst, $src2}",
838 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
840 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
841 (ops GR64:$dst, GR64:$src1, GR64:$src2),
842 "cmovg {$src2, $dst|$dst, $src2}",
843 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
845 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
846 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
847 "cmovg {$src2, $dst|$dst, $src2}",
848 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
850 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
851 (ops GR64:$dst, GR64:$src1, GR64:$src2),
852 "cmovs {$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
855 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
856 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
857 "cmovs {$src2, $dst|$dst, $src2}",
858 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
860 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
861 (ops GR64:$dst, GR64:$src1, GR64:$src2),
862 "cmovns {$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
865 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
866 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
867 "cmovns {$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
870 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
871 (ops GR64:$dst, GR64:$src1, GR64:$src2),
872 "cmovp {$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
875 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
876 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
877 "cmovp {$src2, $dst|$dst, $src2}",
878 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
880 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
881 (ops GR64:$dst, GR64:$src1, GR64:$src2),
882 "cmovnp {$src2, $dst|$dst, $src2}",
883 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
885 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
886 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
887 "cmovnp {$src2, $dst|$dst, $src2}",
888 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
892 //===----------------------------------------------------------------------===//
893 // Conversion Instructions...
897 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
898 "cvtsd2si{q} {$src, $dst|$dst, $src}",
899 []>; // TODO: add intrinsic
900 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
901 "cvtsd2si{q} {$src, $dst|$dst, $src}",
902 []>; // TODO: add intrinsic
903 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, FR64:$src),
904 "cvttsd2si{q} {$src, $dst|$dst, $src}",
905 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
906 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f64mem:$src),
907 "cvttsd2si{q} {$src, $dst|$dst, $src}",
908 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
909 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
910 "cvttsd2si{q} {$src, $dst|$dst, $src}",
911 []>; // TODO: add intrinsic
912 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
913 "cvttsd2si{q} {$src, $dst|$dst, $src}",
914 []>; // TODO: add intrinsic
917 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (ops FR64:$dst, GR64:$src),
918 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
919 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
920 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (ops FR64:$dst, i64mem:$src),
921 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
922 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
923 let isTwoAddress = 1 in {
924 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
925 (ops VR128:$dst, VR128:$src1, GR64:$src2),
926 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
927 []>; // TODO: add intrinsic
928 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
929 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
930 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
931 []>; // TODO: add intrinsic
935 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (ops FR32:$dst, GR64:$src),
936 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
937 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
938 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (ops FR32:$dst, i64mem:$src),
939 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
940 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
941 let isTwoAddress = 1 in {
942 def Int_CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg,
943 (ops VR128:$dst, VR128:$src1, GR64:$src2),
944 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
945 []>; // TODO: add intrinsic
946 def Int_CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem,
947 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
948 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
949 []>; // TODO: add intrinsic
953 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
954 "cvtss2si{q} {$src, $dst|$dst, $src}",
955 []>; // TODO: add intrinsic
956 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
957 "cvtss2si{q} {$src, $dst|$dst, $src}",
958 []>; // TODO: add intrinsic
959 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, FR32:$src),
960 "cvttss2si{q} {$src, $dst|$dst, $src}",
961 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
962 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
963 "cvttss2si{q} {$src, $dst|$dst, $src}",
964 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
965 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
966 "cvttss2si{q} {$src, $dst|$dst, $src}",
967 []>; // TODO: add intrinsic
968 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
969 "cvttss2si{q} {$src, $dst|$dst, $src}",
970 []>; // TODO: add intrinsic
972 //===----------------------------------------------------------------------===//
973 // Alias Instructions
974 //===----------------------------------------------------------------------===//
977 // In 64-mode, each 64-bit and 32-bit registers has a low 8-bit sub-register.
978 def TRUNC_64to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR64:$src),
979 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
980 [(set GR8:$dst, (trunc GR64:$src))]>;
981 def TRUNC_32to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR32:$src),
982 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
983 [(set GR8:$dst, (trunc GR32:$src))]>,
984 Requires<[In64BitMode]>;
985 def TRUNC_16to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR16:$src),
986 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}}",
987 [(set GR8:$dst, (trunc GR16:$src))]>,
988 Requires<[In64BitMode]>;
990 def TRUNC_64to16 : I<0x89, MRMDestReg, (ops GR16:$dst, GR64:$src),
991 "mov{w} {${src:subreg16}, $dst|$dst, ${src:subreg16}}",
992 [(set GR16:$dst, (trunc GR64:$src))]>;
994 def TRUNC_64to32 : I<0x89, MRMDestReg, (ops GR32:$dst, GR64:$src),
995 "mov{l} {${src:subreg32}, $dst|$dst, ${src:subreg32}}",
996 [(set GR32:$dst, (trunc GR64:$src))]>;
999 // TODO: Remove this after proper i32 -> i64 zext support.
1000 def PsMOVZX64rr32: I<0x89, MRMDestReg, (ops GR64:$dst, GR32:$src),
1001 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1002 [(set GR64:$dst, (zext GR32:$src))]>;
1003 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
1004 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1005 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1008 // Alias instructions that map movr0 to xor.
1009 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1010 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1011 // when we have a better way to specify isel priority.
1012 let AddedComplexity = 1 in
1013 def MOV64r0 : RI<0x31, MRMInitReg, (ops GR64:$dst),
1014 "xor{q} $dst, $dst",
1015 [(set GR64:$dst, 0)]>;
1017 // Materialize i64 constant where top 32-bits are zero.
1018 let AddedComplexity = 1 in
1019 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (ops GR64:$dst, i64i32imm:$src),
1020 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1021 [(set GR64:$dst, i64immZExt32:$src)]>;
1023 //===----------------------------------------------------------------------===//
1024 // Non-Instruction Patterns
1025 //===----------------------------------------------------------------------===//
1028 // Direct PC relative function call for small code model. 32-bit displacement
1029 // sign extended to 64-bit.
1030 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1031 (CALL64pcrel32 tglobaladdr:$dst)>;
1032 def : Pat<(X86call (i64 texternalsym:$dst)),
1033 (CALL64pcrel32 texternalsym:$dst)>;
1035 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1036 (CALL64pcrel32 tglobaladdr:$dst)>;
1037 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1038 (CALL64pcrel32 texternalsym:$dst)>;
1040 def : Pat<(X86tailcall GR64:$dst),
1041 (CALL64r GR64:$dst)>;
1043 // {s|z}extload bool -> {s|z}extload byte
1044 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1045 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1048 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1049 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1050 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1051 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1054 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1055 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1056 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1057 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1058 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1059 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1061 //===----------------------------------------------------------------------===//
1063 //===----------------------------------------------------------------------===//
1065 // (shl x, 1) ==> (add x, x)
1066 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1068 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1069 def : Pat<(or (srl GR64:$src1, CL:$amt),
1070 (shl GR64:$src2, (sub 64, CL:$amt))),
1071 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1073 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1074 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1075 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1077 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1078 def : Pat<(or (shl GR64:$src1, CL:$amt),
1079 (srl GR64:$src2, (sub 64, CL:$amt))),
1080 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1082 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1083 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1084 (SHLD64mrCL addr:$dst, GR64:$src2)>;