1 //====- X86InstrX86-64.td - Describe the X86 Instruction Set ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions...
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions...
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
43 // Instruction templates...
46 class RI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
47 : I<o, F, ops, asm, pattern>, REX_W;
48 class RIi8 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
49 : Ii8<o, F, ops, asm, pattern>, REX_W;
50 class RIi32 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
51 : Ii32<o, F, ops, asm, pattern>, REX_W;
53 class RIi64<bits<8> o, Format f, dag ops, string asm, list<dag> pattern>
54 : X86Inst<o, f, Imm64, ops, asm>, REX_W {
55 let Pattern = pattern;
59 class RSSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
60 : SSI<o, F, ops, asm, pattern>, REX_W;
61 class RSDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
62 : SDI<o, F, ops, asm, pattern>, REX_W;
63 class RPDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
64 : PDI<o, F, ops, asm, pattern>, REX_W;
66 //===----------------------------------------------------------------------===//
67 // Pattern fragments...
70 def i64immSExt32 : PatLeaf<(i64 imm), [{
71 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
72 // sign extended field.
73 return (int64_t)N->getValue() == (int32_t)N->getValue();
76 def i64immZExt32 : PatLeaf<(i64 imm), [{
77 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
78 // unsignedsign extended field.
79 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
82 def i64immSExt8 : PatLeaf<(i64 imm), [{
83 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
84 // sign extended field.
85 return (int64_t)N->getValue() == (int8_t)N->getValue();
88 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
89 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
90 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
91 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
93 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
94 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
95 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
96 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
98 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
99 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
100 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
101 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
103 //===----------------------------------------------------------------------===//
104 // Instruction list...
107 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (ops GR64:$dst),
108 "#IMPLICIT_DEF $dst",
109 [(set GR64:$dst, (undef))]>;
111 //===----------------------------------------------------------------------===//
112 // Call Instructions...
114 let isCall = 1, noResults = 1 in
115 // All calls clobber the non-callee saved registers...
116 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
117 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
118 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
119 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15] in {
120 def CALL64pcrel32 : I<0xE8, RawFrm, (ops i64imm:$dst, variable_ops),
121 "call ${dst:call}", []>;
122 def CALL64r : I<0xFF, MRM2r, (ops GR64:$dst, variable_ops),
123 "call {*}$dst", [(X86call GR64:$dst)]>;
124 def CALL64m : I<0xFF, MRM2m, (ops i64mem:$dst, variable_ops),
129 let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in {
130 def JMP64r : I<0xFF, MRM4r, (ops GR64:$dst), "jmp{q} {*}$dst",
131 [(brind GR64:$dst)]>;
132 def JMP64m : I<0xFF, MRM4m, (ops i64mem:$dst), "jmp{q} {*}$dst",
133 [(brind (loadi64 addr:$dst))]>;
136 //===----------------------------------------------------------------------===//
137 // Miscellaneous Instructions...
139 def LEAVE64 : I<0xC9, RawFrm,
140 (ops), "leave", []>, Imp<[RBP,RSP],[RBP,RSP]>;
141 def POP64r : I<0x58, AddRegFrm,
142 (ops GR64:$reg), "pop{q} $reg", []>, Imp<[RSP],[RSP]>;
144 def LEA64_32r : I<0x8D, MRMSrcMem,
145 (ops GR32:$dst, lea64_32mem:$src),
146 "lea{l} {$src|$dst}, {$dst|$src}",
147 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
149 def LEA64r : RI<0x8D, MRMSrcMem, (ops GR64:$dst, lea64mem:$src),
150 "lea{q} {$src|$dst}, {$dst|$src}",
151 [(set GR64:$dst, lea64addr:$src)]>;
153 let isTwoAddress = 1 in
154 def BSWAP64r : RI<0xC8, AddRegFrm, (ops GR64:$dst, GR64:$src),
156 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
158 def XCHG64rr : RI<0x87, MRMDestReg, (ops GR64:$src1, GR64:$src2),
159 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
160 def XCHG64mr : RI<0x87, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
161 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
162 def XCHG64rm : RI<0x87, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
163 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
166 def REP_MOVSQ : RI<0xA5, RawFrm, (ops), "{rep;movsq|rep movsq}",
167 [(X86rep_movs i64)]>,
168 Imp<[RCX,RDI,RSI], [RCX,RDI,RSI]>, REP;
169 def REP_STOSQ : RI<0xAB, RawFrm, (ops), "{rep;stosq|rep stosq}",
170 [(X86rep_stos i64)]>,
171 Imp<[RAX,RCX,RDI], [RCX,RDI]>, REP;
173 //===----------------------------------------------------------------------===//
174 // Move Instructions...
177 def MOV64rr : RI<0x89, MRMDestReg, (ops GR64:$dst, GR64:$src),
178 "mov{q} {$src, $dst|$dst, $src}", []>;
180 def MOV64ri : RIi64<0xB8, AddRegFrm, (ops GR64:$dst, i64imm:$src),
181 "movabs{q} {$src, $dst|$dst, $src}",
182 [(set GR64:$dst, imm:$src)]>;
183 def MOV64ri32 : RIi32<0xC7, MRM0r, (ops GR64:$dst, i64i32imm:$src),
184 "mov{q} {$src, $dst|$dst, $src}",
185 [(set GR64:$dst, i64immSExt32:$src)]>;
187 def MOV64rm : RI<0x8B, MRMSrcMem, (ops GR64:$dst, i64mem:$src),
188 "mov{q} {$src, $dst|$dst, $src}",
189 [(set GR64:$dst, (load addr:$src))]>;
191 def MOV64mr : RI<0x89, MRMDestMem, (ops i64mem:$dst, GR64:$src),
192 "mov{q} {$src, $dst|$dst, $src}",
193 [(store GR64:$src, addr:$dst)]>;
194 def MOV64mi32 : RIi32<0xC7, MRM0m, (ops i64mem:$dst, i64i32imm:$src),
195 "mov{q} {$src, $dst|$dst, $src}",
196 [(store i64immSExt32:$src, addr:$dst)]>;
198 // Sign/Zero extenders
200 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
201 "movs{bq|x} {$src, $dst|$dst, $src}",
202 [(set GR64:$dst, (sext GR8:$src))]>, TB;
203 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
204 "movs{bq|x} {$src, $dst|$dst, $src}",
205 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
206 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (ops GR64:$dst, GR16:$src),
207 "movs{wq|x} {$src, $dst|$dst, $src}",
208 [(set GR64:$dst, (sext GR16:$src))]>, TB;
209 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
210 "movs{wq|x} {$src, $dst|$dst, $src}",
211 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
212 def MOVSX64rr32: RI<0x63, MRMSrcReg, (ops GR64:$dst, GR32:$src),
213 "movs{lq|xd} {$src, $dst|$dst, $src}",
214 [(set GR64:$dst, (sext GR32:$src))]>;
215 def MOVSX64rm32: RI<0x63, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
216 "movs{lq|xd} {$src, $dst|$dst, $src}",
217 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
219 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
220 "movz{bq|x} {$src, $dst|$dst, $src}",
221 [(set GR64:$dst, (zext GR8:$src))]>, TB;
222 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
223 "movz{bq|x} {$src, $dst|$dst, $src}",
224 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
225 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (ops GR64:$dst, GR16:$src),
226 "movz{wq|x} {$src, $dst|$dst, $src}",
227 [(set GR64:$dst, (zext GR16:$src))]>, TB;
228 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
229 "movz{wq|x} {$src, $dst|$dst, $src}",
230 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
232 def CDQE : RI<0x98, RawFrm, (ops),
233 "{cltq|cdqe}", []>, Imp<[EAX],[RAX]>; // RAX = signext(EAX)
235 def CQO : RI<0x99, RawFrm, (ops),
236 "{cqto|cqo}", []>, Imp<[RAX],[RAX,RDX]>; // RDX:RAX = signext(RAX)
238 //===----------------------------------------------------------------------===//
239 // Arithmetic Instructions...
242 let isTwoAddress = 1 in {
243 let isConvertibleToThreeAddress = 1 in {
244 let isCommutable = 1 in
245 def ADD64rr : RI<0x01, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
246 "add{q} {$src2, $dst|$dst, $src2}",
247 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
249 def ADD64ri32 : RIi32<0x81, MRM0r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
250 "add{q} {$src2, $dst|$dst, $src2}",
251 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
252 def ADD64ri8 : RIi8<0x83, MRM0r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
253 "add{q} {$src2, $dst|$dst, $src2}",
254 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
255 } // isConvertibleToThreeAddress
257 def ADD64rm : RI<0x03, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
258 "add{q} {$src2, $dst|$dst, $src2}",
259 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
262 def ADD64mr : RI<0x01, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
263 "add{q} {$src2, $dst|$dst, $src2}",
264 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
265 def ADD64mi32 : RIi32<0x81, MRM0m, (ops i64mem:$dst, i64i32imm :$src2),
266 "add{q} {$src2, $dst|$dst, $src2}",
267 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
268 def ADD64mi8 : RIi8<0x83, MRM0m, (ops i64mem:$dst, i64i8imm :$src2),
269 "add{q} {$src2, $dst|$dst, $src2}",
270 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
272 let isTwoAddress = 1 in {
273 let isCommutable = 1 in
274 def ADC64rr : RI<0x11, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
275 "adc{q} {$src2, $dst|$dst, $src2}",
276 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
278 def ADC64rm : RI<0x13, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
279 "adc{q} {$src2, $dst|$dst, $src2}",
280 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
282 def ADC64ri32 : RIi32<0x81, MRM2r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
283 "adc{q} {$src2, $dst|$dst, $src2}",
284 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
285 def ADC64ri8 : RIi8<0x83, MRM2r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
286 "adc{q} {$src2, $dst|$dst, $src2}",
287 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
290 def ADC64mr : RI<0x11, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
291 "adc{q} {$src2, $dst|$dst, $src2}",
292 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
293 def ADC64mi32 : RIi32<0x81, MRM2m, (ops i64mem:$dst, i64i32imm:$src2),
294 "adc{q} {$src2, $dst|$dst, $src2}",
295 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
296 def ADC64mi8 : RIi8<0x83, MRM2m, (ops i64mem:$dst, i64i8imm :$src2),
297 "adc{q} {$src2, $dst|$dst, $src2}",
298 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
300 let isTwoAddress = 1 in {
301 def SUB64rr : RI<0x29, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
302 "sub{q} {$src2, $dst|$dst, $src2}",
303 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
305 def SUB64rm : RI<0x2B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
306 "sub{q} {$src2, $dst|$dst, $src2}",
307 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
309 def SUB64ri32 : RIi32<0x81, MRM5r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
310 "sub{q} {$src2, $dst|$dst, $src2}",
311 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
312 def SUB64ri8 : RIi8<0x83, MRM5r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
313 "sub{q} {$src2, $dst|$dst, $src2}",
314 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
317 def SUB64mr : RI<0x29, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
318 "sub{q} {$src2, $dst|$dst, $src2}",
319 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
320 def SUB64mi32 : RIi32<0x81, MRM5m, (ops i64mem:$dst, i64i32imm:$src2),
321 "sub{q} {$src2, $dst|$dst, $src2}",
322 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
323 def SUB64mi8 : RIi8<0x83, MRM5m, (ops i64mem:$dst, i64i8imm :$src2),
324 "sub{q} {$src2, $dst|$dst, $src2}",
325 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
327 let isTwoAddress = 1 in {
328 def SBB64rr : RI<0x19, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
329 "sbb{q} {$src2, $dst|$dst, $src2}",
330 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
332 def SBB64rm : RI<0x1B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
333 "sbb{q} {$src2, $dst|$dst, $src2}",
334 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
336 def SBB64ri32 : RIi32<0x81, MRM3r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
337 "sbb{q} {$src2, $dst|$dst, $src2}",
338 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
339 def SBB64ri8 : RIi8<0x83, MRM3r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
340 "sbb{q} {$src2, $dst|$dst, $src2}",
341 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
344 def SBB64mr : RI<0x19, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
345 "sbb{q} {$src2, $dst|$dst, $src2}",
346 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
347 def SBB64mi32 : RIi32<0x81, MRM3m, (ops i64mem:$dst, i64i32imm:$src2),
348 "sbb{q} {$src2, $dst|$dst, $src2}",
349 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
350 def SBB64mi8 : RIi8<0x83, MRM3m, (ops i64mem:$dst, i64i8imm :$src2),
351 "sbb{q} {$src2, $dst|$dst, $src2}",
352 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
354 // Unsigned multiplication
355 def MUL64r : RI<0xF7, MRM4r, (ops GR64:$src),
357 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
358 def MUL64m : RI<0xF7, MRM4m, (ops i64mem:$src),
360 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
362 // Signed multiplication
363 def IMUL64r : RI<0xF7, MRM5r, (ops GR64:$src),
365 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
366 def IMUL64m : RI<0xF7, MRM5m, (ops i64mem:$src),
368 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
370 let isTwoAddress = 1 in {
371 let isCommutable = 1 in
372 def IMUL64rr : RI<0xAF, MRMSrcReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
373 "imul{q} {$src2, $dst|$dst, $src2}",
374 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
376 def IMUL64rm : RI<0xAF, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
377 "imul{q} {$src2, $dst|$dst, $src2}",
378 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
381 // Suprisingly enough, these are not two address instructions!
382 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
383 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
384 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
385 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
386 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
387 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
388 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
389 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
390 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
391 (ops GR64:$dst, i64mem:$src1, i64i32imm:$src2),
392 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
393 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
394 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
395 (ops GR64:$dst, i64mem:$src1, i64i8imm: $src2),
396 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
397 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
399 // Unsigned division / remainder
400 def DIV64r : RI<0xF7, MRM6r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
401 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
402 def DIV64m : RI<0xF7, MRM6m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
403 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
405 // Signed division / remainder
406 def IDIV64r: RI<0xF7, MRM7r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
407 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
408 def IDIV64m: RI<0xF7, MRM7m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
409 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
411 // Unary instructions
412 let CodeSize = 2 in {
413 let isTwoAddress = 1 in
414 def NEG64r : RI<0xF7, MRM3r, (ops GR64:$dst, GR64:$src), "neg{q} $dst",
415 [(set GR64:$dst, (ineg GR64:$src))]>;
416 def NEG64m : RI<0xF7, MRM3m, (ops i64mem:$dst), "neg{q} $dst",
417 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
419 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
420 def INC64r : RI<0xFF, MRM0r, (ops GR64:$dst, GR64:$src), "inc{q} $dst",
421 [(set GR64:$dst, (add GR64:$src, 1))]>;
422 def INC64m : RI<0xFF, MRM0m, (ops i64mem:$dst), "inc{q} $dst",
423 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
425 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
426 def DEC64r : RI<0xFF, MRM1r, (ops GR64:$dst, GR64:$src), "dec{q} $dst",
427 [(set GR64:$dst, (add GR64:$src, -1))]>;
428 def DEC64m : RI<0xFF, MRM1m, (ops i64mem:$dst), "dec{q} $dst",
429 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
431 // In 64-bit mode, single byte INC and DEC cannot be encoded.
432 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
433 // Can transform into LEA.
434 def INC64_16r : I<0xFF, MRM0r, (ops GR16:$dst, GR16:$src), "inc{w} $dst",
435 [(set GR16:$dst, (add GR16:$src, 1))]>,
436 OpSize, Requires<[In64BitMode]>;
437 def INC64_32r : I<0xFF, MRM0r, (ops GR32:$dst, GR32:$src), "inc{l} $dst",
438 [(set GR32:$dst, (add GR32:$src, 1))]>,
439 Requires<[In64BitMode]>;
440 def DEC64_16r : I<0xFF, MRM1r, (ops GR16:$dst, GR16:$src), "dec{w} $dst",
441 [(set GR16:$dst, (add GR16:$src, -1))]>,
442 OpSize, Requires<[In64BitMode]>;
443 def DEC64_32r : I<0xFF, MRM1r, (ops GR32:$dst, GR32:$src), "dec{l} $dst",
444 [(set GR32:$dst, (add GR32:$src, -1))]>,
445 Requires<[In64BitMode]>;
446 } // isConvertibleToThreeAddress
450 // Shift instructions
451 let isTwoAddress = 1 in {
452 def SHL64rCL : RI<0xD3, MRM4r, (ops GR64:$dst, GR64:$src),
453 "shl{q} {%cl, $dst|$dst, %CL}",
454 [(set GR64:$dst, (shl GR64:$src, CL))]>,
456 def SHL64ri : RIi8<0xC1, MRM4r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
457 "shl{q} {$src2, $dst|$dst, $src2}",
458 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
459 def SHL64r1 : RI<0xD1, MRM4r, (ops GR64:$dst, GR64:$src1),
463 def SHL64mCL : RI<0xD3, MRM4m, (ops i64mem:$dst),
464 "shl{q} {%cl, $dst|$dst, %CL}",
465 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
467 def SHL64mi : RIi8<0xC1, MRM4m, (ops i64mem:$dst, i8imm:$src),
468 "shl{q} {$src, $dst|$dst, $src}",
469 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
470 def SHL64m1 : RI<0xD1, MRM4m, (ops i64mem:$dst),
472 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
474 let isTwoAddress = 1 in {
475 def SHR64rCL : RI<0xD3, MRM5r, (ops GR64:$dst, GR64:$src),
476 "shr{q} {%cl, $dst|$dst, %CL}",
477 [(set GR64:$dst, (srl GR64:$src, CL))]>,
479 def SHR64ri : RIi8<0xC1, MRM5r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
480 "shr{q} {$src2, $dst|$dst, $src2}",
481 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
482 def SHR64r1 : RI<0xD1, MRM5r, (ops GR64:$dst, GR64:$src1),
484 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
487 def SHR64mCL : RI<0xD3, MRM5m, (ops i64mem:$dst),
488 "shr{q} {%cl, $dst|$dst, %CL}",
489 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
491 def SHR64mi : RIi8<0xC1, MRM5m, (ops i64mem:$dst, i8imm:$src),
492 "shr{q} {$src, $dst|$dst, $src}",
493 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
494 def SHR64m1 : RI<0xD1, MRM5m, (ops i64mem:$dst),
496 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
498 let isTwoAddress = 1 in {
499 def SAR64rCL : RI<0xD3, MRM7r, (ops GR64:$dst, GR64:$src),
500 "sar{q} {%cl, $dst|$dst, %CL}",
501 [(set GR64:$dst, (sra GR64:$src, CL))]>, Imp<[CL],[]>;
502 def SAR64ri : RIi8<0xC1, MRM7r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
503 "sar{q} {$src2, $dst|$dst, $src2}",
504 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
505 def SAR64r1 : RI<0xD1, MRM7r, (ops GR64:$dst, GR64:$src1),
507 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
510 def SAR64mCL : RI<0xD3, MRM7m, (ops i64mem:$dst),
511 "sar{q} {%cl, $dst|$dst, %CL}",
512 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
514 def SAR64mi : RIi8<0xC1, MRM7m, (ops i64mem:$dst, i8imm:$src),
515 "sar{q} {$src, $dst|$dst, $src}",
516 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
517 def SAR64m1 : RI<0xD1, MRM7m, (ops i64mem:$dst),
519 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
521 // Rotate instructions
522 let isTwoAddress = 1 in {
523 def ROL64rCL : RI<0xD3, MRM0r, (ops GR64:$dst, GR64:$src),
524 "rol{q} {%cl, $dst|$dst, %CL}",
525 [(set GR64:$dst, (rotl GR64:$src, CL))]>, Imp<[CL],[]>;
526 def ROL64ri : RIi8<0xC1, MRM0r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
527 "rol{q} {$src2, $dst|$dst, $src2}",
528 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
529 def ROL64r1 : RI<0xD1, MRM0r, (ops GR64:$dst, GR64:$src1),
531 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
534 def ROL64mCL : I<0xD3, MRM0m, (ops i64mem:$dst),
535 "rol{q} {%cl, $dst|$dst, %CL}",
536 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
538 def ROL64mi : RIi8<0xC1, MRM0m, (ops i64mem:$dst, i8imm:$src),
539 "rol{q} {$src, $dst|$dst, $src}",
540 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
541 def ROL64m1 : RI<0xD1, MRM0m, (ops i64mem:$dst),
543 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
545 let isTwoAddress = 1 in {
546 def ROR64rCL : RI<0xD3, MRM1r, (ops GR64:$dst, GR64:$src),
547 "ror{q} {%cl, $dst|$dst, %CL}",
548 [(set GR64:$dst, (rotr GR64:$src, CL))]>, Imp<[CL],[]>;
549 def ROR64ri : RIi8<0xC1, MRM1r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
550 "ror{q} {$src2, $dst|$dst, $src2}",
551 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
552 def ROR64r1 : RI<0xD1, MRM1r, (ops GR64:$dst, GR64:$src1),
554 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
557 def ROR64mCL : RI<0xD3, MRM1m, (ops i64mem:$dst),
558 "ror{q} {%cl, $dst|$dst, %CL}",
559 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
561 def ROR64mi : RIi8<0xC1, MRM1m, (ops i64mem:$dst, i8imm:$src),
562 "ror{q} {$src, $dst|$dst, $src}",
563 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
564 def ROR64m1 : RI<0xD1, MRM1m, (ops i64mem:$dst),
566 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
568 // Double shift instructions (generalizations of rotate)
569 let isTwoAddress = 1 in {
570 def SHLD64rrCL : RI<0xA5, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
571 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
573 def SHRD64rrCL : RI<0xAD, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
574 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
577 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
578 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
579 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
580 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
582 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
583 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
584 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
589 // Temporary hack: there is no patterns associated with these instructions
590 // so we have to tell tblgen that these do not produce results.
591 let noResults = 1 in {
592 def SHLD64mrCL : RI<0xA5, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
593 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
595 def SHRD64mrCL : RI<0xAD, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
596 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
598 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
599 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
600 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
602 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
603 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
604 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
608 //===----------------------------------------------------------------------===//
609 // Logical Instructions...
612 let isTwoAddress = 1 in
613 def NOT64r : RI<0xF7, MRM2r, (ops GR64:$dst, GR64:$src), "not{q} $dst",
614 [(set GR64:$dst, (not GR64:$src))]>;
615 def NOT64m : RI<0xF7, MRM2m, (ops i64mem:$dst), "not{q} $dst",
616 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
618 let isTwoAddress = 1 in {
619 let isCommutable = 1 in
620 def AND64rr : RI<0x21, MRMDestReg,
621 (ops GR64:$dst, GR64:$src1, GR64:$src2),
622 "and{q} {$src2, $dst|$dst, $src2}",
623 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
624 def AND64rm : RI<0x23, MRMSrcMem,
625 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
626 "and{q} {$src2, $dst|$dst, $src2}",
627 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
628 def AND64ri32 : RIi32<0x81, MRM4r,
629 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
630 "and{q} {$src2, $dst|$dst, $src2}",
631 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
632 def AND64ri8 : RIi8<0x83, MRM4r,
633 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
634 "and{q} {$src2, $dst|$dst, $src2}",
635 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
638 def AND64mr : RI<0x21, MRMDestMem,
639 (ops i64mem:$dst, GR64:$src),
640 "and{q} {$src, $dst|$dst, $src}",
641 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
642 def AND64mi32 : RIi32<0x81, MRM4m,
643 (ops i64mem:$dst, i64i32imm:$src),
644 "and{q} {$src, $dst|$dst, $src}",
645 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
646 def AND64mi8 : RIi8<0x83, MRM4m,
647 (ops i64mem:$dst, i64i8imm :$src),
648 "and{q} {$src, $dst|$dst, $src}",
649 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
651 let isTwoAddress = 1 in {
652 let isCommutable = 1 in
653 def OR64rr : RI<0x09, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
654 "or{q} {$src2, $dst|$dst, $src2}",
655 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
656 def OR64rm : RI<0x0B, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
657 "or{q} {$src2, $dst|$dst, $src2}",
658 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
659 def OR64ri32 : RIi32<0x81, MRM1r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
660 "or{q} {$src2, $dst|$dst, $src2}",
661 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
662 def OR64ri8 : RIi8<0x83, MRM1r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
663 "or{q} {$src2, $dst|$dst, $src2}",
664 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
667 def OR64mr : RI<0x09, MRMDestMem, (ops i64mem:$dst, GR64:$src),
668 "or{q} {$src, $dst|$dst, $src}",
669 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
670 def OR64mi32 : RIi32<0x81, MRM1m, (ops i64mem:$dst, i64i32imm:$src),
671 "or{q} {$src, $dst|$dst, $src}",
672 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
673 def OR64mi8 : RIi8<0x83, MRM1m, (ops i64mem:$dst, i64i8imm:$src),
674 "or{q} {$src, $dst|$dst, $src}",
675 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
677 let isTwoAddress = 1 in {
678 let isCommutable = 1 in
679 def XOR64rr : RI<0x31, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
680 "xor{q} {$src2, $dst|$dst, $src2}",
681 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
682 def XOR64rm : RI<0x33, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
683 "xor{q} {$src2, $dst|$dst, $src2}",
684 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
685 def XOR64ri32 : RIi32<0x81, MRM6r,
686 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
687 "xor{q} {$src2, $dst|$dst, $src2}",
688 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
689 def XOR64ri8 : RIi8<0x83, MRM6r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
690 "xor{q} {$src2, $dst|$dst, $src2}",
691 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
694 def XOR64mr : RI<0x31, MRMDestMem, (ops i64mem:$dst, GR64:$src),
695 "xor{q} {$src, $dst|$dst, $src}",
696 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
697 def XOR64mi32 : RIi32<0x81, MRM6m, (ops i64mem:$dst, i64i32imm:$src),
698 "xor{q} {$src, $dst|$dst, $src}",
699 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
700 def XOR64mi8 : RIi8<0x83, MRM6m, (ops i64mem:$dst, i64i8imm :$src),
701 "xor{q} {$src, $dst|$dst, $src}",
702 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
704 //===----------------------------------------------------------------------===//
705 // Comparison Instructions...
708 // Integer comparison
709 let isCommutable = 1 in
710 def TEST64rr : RI<0x85, MRMDestReg, (ops GR64:$src1, GR64:$src2),
711 "test{q} {$src2, $src1|$src1, $src2}",
712 [(X86cmp (and GR64:$src1, GR64:$src2), 0)]>;
713 def TEST64rm : RI<0x85, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
714 "test{q} {$src2, $src1|$src1, $src2}",
715 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0)]>;
716 def TEST64ri32 : RIi32<0xF7, MRM0r, (ops GR64:$src1, i64i32imm:$src2),
717 "test{q} {$src2, $src1|$src1, $src2}",
718 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0)]>;
719 def TEST64mi32 : RIi32<0xF7, MRM0m, (ops i64mem:$src1, i64i32imm:$src2),
720 "test{q} {$src2, $src1|$src1, $src2}",
721 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0)]>;
723 def CMP64rr : RI<0x39, MRMDestReg, (ops GR64:$src1, GR64:$src2),
724 "cmp{q} {$src2, $src1|$src1, $src2}",
725 [(X86cmp GR64:$src1, GR64:$src2)]>;
726 def CMP64mr : RI<0x39, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
727 "cmp{q} {$src2, $src1|$src1, $src2}",
728 [(X86cmp (loadi64 addr:$src1), GR64:$src2)]>;
729 def CMP64rm : RI<0x3B, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
730 "cmp{q} {$src2, $src1|$src1, $src2}",
731 [(X86cmp GR64:$src1, (loadi64 addr:$src2))]>;
732 def CMP64ri32 : RIi32<0x81, MRM7r, (ops GR64:$src1, i64i32imm:$src2),
733 "cmp{q} {$src2, $src1|$src1, $src2}",
734 [(X86cmp GR64:$src1, i64immSExt32:$src2)]>;
735 def CMP64mi32 : RIi32<0x81, MRM7m, (ops i64mem:$src1, i64i32imm:$src2),
736 "cmp{q} {$src2, $src1|$src1, $src2}",
737 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2)]>;
738 def CMP64mi8 : RIi8<0x83, MRM7m, (ops i64mem:$src1, i64i8imm:$src2),
739 "cmp{q} {$src2, $src1|$src1, $src2}",
740 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2)]>;
741 def CMP64ri8 : RIi8<0x83, MRM7r, (ops GR64:$src1, i64i8imm:$src2),
742 "cmp{q} {$src2, $src1|$src1, $src2}",
743 [(X86cmp GR64:$src1, i64immSExt8:$src2)]>;
746 let isTwoAddress = 1 in {
747 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
748 (ops GR64:$dst, GR64:$src1, GR64:$src2),
749 "cmovb {$src2, $dst|$dst, $src2}",
750 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
752 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
753 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
754 "cmovb {$src2, $dst|$dst, $src2}",
755 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
757 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
758 (ops GR64:$dst, GR64:$src1, GR64:$src2),
759 "cmovae {$src2, $dst|$dst, $src2}",
760 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
762 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
763 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
764 "cmovae {$src2, $dst|$dst, $src2}",
765 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
767 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
768 (ops GR64:$dst, GR64:$src1, GR64:$src2),
769 "cmove {$src2, $dst|$dst, $src2}",
770 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
772 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
773 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
774 "cmove {$src2, $dst|$dst, $src2}",
775 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
777 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
778 (ops GR64:$dst, GR64:$src1, GR64:$src2),
779 "cmovne {$src2, $dst|$dst, $src2}",
780 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
782 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
783 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
784 "cmovne {$src2, $dst|$dst, $src2}",
785 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
787 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
788 (ops GR64:$dst, GR64:$src1, GR64:$src2),
789 "cmovbe {$src2, $dst|$dst, $src2}",
790 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
792 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
793 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
794 "cmovbe {$src2, $dst|$dst, $src2}",
795 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
797 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
798 (ops GR64:$dst, GR64:$src1, GR64:$src2),
799 "cmova {$src2, $dst|$dst, $src2}",
800 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
802 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
803 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
804 "cmova {$src2, $dst|$dst, $src2}",
805 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
807 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
808 (ops GR64:$dst, GR64:$src1, GR64:$src2),
809 "cmovl {$src2, $dst|$dst, $src2}",
810 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
812 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
813 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
814 "cmovl {$src2, $dst|$dst, $src2}",
815 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
817 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
818 (ops GR64:$dst, GR64:$src1, GR64:$src2),
819 "cmovge {$src2, $dst|$dst, $src2}",
820 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
822 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
823 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
824 "cmovge {$src2, $dst|$dst, $src2}",
825 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
827 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
828 (ops GR64:$dst, GR64:$src1, GR64:$src2),
829 "cmovle {$src2, $dst|$dst, $src2}",
830 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
832 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
833 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
834 "cmovle {$src2, $dst|$dst, $src2}",
835 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
837 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
838 (ops GR64:$dst, GR64:$src1, GR64:$src2),
839 "cmovg {$src2, $dst|$dst, $src2}",
840 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
842 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
843 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
844 "cmovg {$src2, $dst|$dst, $src2}",
845 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
847 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
848 (ops GR64:$dst, GR64:$src1, GR64:$src2),
849 "cmovs {$src2, $dst|$dst, $src2}",
850 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
852 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
853 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
854 "cmovs {$src2, $dst|$dst, $src2}",
855 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
857 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
858 (ops GR64:$dst, GR64:$src1, GR64:$src2),
859 "cmovns {$src2, $dst|$dst, $src2}",
860 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
862 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
863 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
864 "cmovns {$src2, $dst|$dst, $src2}",
865 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
867 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
868 (ops GR64:$dst, GR64:$src1, GR64:$src2),
869 "cmovp {$src2, $dst|$dst, $src2}",
870 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
872 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
873 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
874 "cmovp {$src2, $dst|$dst, $src2}",
875 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
877 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
878 (ops GR64:$dst, GR64:$src1, GR64:$src2),
879 "cmovnp {$src2, $dst|$dst, $src2}",
880 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
882 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
883 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
884 "cmovnp {$src2, $dst|$dst, $src2}",
885 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
889 //===----------------------------------------------------------------------===//
890 // Conversion Instructions...
894 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
895 "cvtsd2si{q} {$src, $dst|$dst, $src}",
896 []>; // TODO: add intrinsic
897 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
898 "cvtsd2si{q} {$src, $dst|$dst, $src}",
899 []>; // TODO: add intrinsic
900 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, FR64:$src),
901 "cvttsd2si{q} {$src, $dst|$dst, $src}",
902 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
903 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f64mem:$src),
904 "cvttsd2si{q} {$src, $dst|$dst, $src}",
905 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
906 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
907 "cvttsd2si{q} {$src, $dst|$dst, $src}",
908 []>; // TODO: add intrinsic
909 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
910 "cvttsd2si{q} {$src, $dst|$dst, $src}",
911 []>; // TODO: add intrinsic
914 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (ops FR64:$dst, GR64:$src),
915 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
916 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
917 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (ops FR64:$dst, i64mem:$src),
918 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
919 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
920 let isTwoAddress = 1 in {
921 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
922 (ops VR128:$dst, VR128:$src1, GR64:$src2),
923 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
924 []>; // TODO: add intrinsic
925 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
926 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
927 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
928 []>; // TODO: add intrinsic
932 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (ops FR32:$dst, GR64:$src),
933 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
934 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
935 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (ops FR32:$dst, i64mem:$src),
936 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
937 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
938 let isTwoAddress = 1 in {
939 def Int_CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg,
940 (ops VR128:$dst, VR128:$src1, GR64:$src2),
941 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
942 []>; // TODO: add intrinsic
943 def Int_CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem,
944 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
945 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
946 []>; // TODO: add intrinsic
950 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
951 "cvtss2si{q} {$src, $dst|$dst, $src}",
952 []>; // TODO: add intrinsic
953 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
954 "cvtss2si{q} {$src, $dst|$dst, $src}",
955 []>; // TODO: add intrinsic
956 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, FR32:$src),
957 "cvttss2si{q} {$src, $dst|$dst, $src}",
958 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
959 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
960 "cvttss2si{q} {$src, $dst|$dst, $src}",
961 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
962 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
963 "cvttss2si{q} {$src, $dst|$dst, $src}",
964 []>; // TODO: add intrinsic
965 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
966 "cvttss2si{q} {$src, $dst|$dst, $src}",
967 []>; // TODO: add intrinsic
969 //===----------------------------------------------------------------------===//
970 // Alias Instructions
971 //===----------------------------------------------------------------------===//
974 // In 64-mode, each 64-bit and 32-bit registers has a low 8-bit sub-register.
975 def TRUNC_64to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR64:$src),
976 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
977 [(set GR8:$dst, (trunc GR64:$src))]>;
978 def TRUNC_32to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR32:$src),
979 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
980 [(set GR8:$dst, (trunc GR32:$src))]>,
981 Requires<[In64BitMode]>;
982 def TRUNC_16to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR16:$src),
983 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}}",
984 [(set GR8:$dst, (trunc GR16:$src))]>,
985 Requires<[In64BitMode]>;
987 def TRUNC_64to16 : I<0x89, MRMDestReg, (ops GR16:$dst, GR64:$src),
988 "mov{w} {${src:subreg16}, $dst|$dst, ${src:subreg16}}",
989 [(set GR16:$dst, (trunc GR64:$src))]>;
991 def TRUNC_64to32 : I<0x89, MRMDestReg, (ops GR32:$dst, GR64:$src),
992 "mov{l} {${src:subreg32}, $dst|$dst, ${src:subreg32}}",
993 [(set GR32:$dst, (trunc GR64:$src))]>;
996 // TODO: Remove this after proper i32 -> i64 zext support.
997 def PsMOVZX64rr32: I<0x89, MRMDestReg, (ops GR64:$dst, GR32:$src),
998 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
999 [(set GR64:$dst, (zext GR32:$src))]>;
1000 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
1001 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1002 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1005 // Alias instructions that map movr0 to xor.
1006 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1007 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1008 // when we have a better way to specify isel priority.
1009 let AddedComplexity = 1 in
1010 def MOV64r0 : RI<0x31, MRMInitReg, (ops GR64:$dst),
1011 "xor{q} $dst, $dst",
1012 [(set GR64:$dst, 0)]>;
1014 // Materialize i64 constant where top 32-bits are zero.
1015 let AddedComplexity = 1 in
1016 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (ops GR64:$dst, i64i32imm:$src),
1017 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1018 [(set GR64:$dst, i64immZExt32:$src)]>;
1020 //===----------------------------------------------------------------------===//
1021 // Non-Instruction Patterns
1022 //===----------------------------------------------------------------------===//
1024 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1025 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1026 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1027 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1028 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1029 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1030 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1031 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1032 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1034 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1035 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1036 Requires<[SmallCode, IsStatic]>;
1037 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1038 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1039 Requires<[SmallCode, IsStatic]>;
1040 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1041 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1042 Requires<[SmallCode, IsStatic]>;
1043 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1044 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1045 Requires<[SmallCode, IsStatic]>;
1048 // Direct PC relative function call for small code model. 32-bit displacement
1049 // sign extended to 64-bit.
1050 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1051 (CALL64pcrel32 tglobaladdr:$dst)>;
1052 def : Pat<(X86call (i64 texternalsym:$dst)),
1053 (CALL64pcrel32 texternalsym:$dst)>;
1055 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1056 (CALL64pcrel32 tglobaladdr:$dst)>;
1057 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1058 (CALL64pcrel32 texternalsym:$dst)>;
1060 def : Pat<(X86tailcall GR64:$dst),
1061 (CALL64r GR64:$dst)>;
1063 // {s|z}extload bool -> {s|z}extload byte
1064 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1065 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1068 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1069 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1070 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1071 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1074 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1075 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1076 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1077 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1078 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1079 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1081 //===----------------------------------------------------------------------===//
1083 //===----------------------------------------------------------------------===//
1085 // (shl x, 1) ==> (add x, x)
1086 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1088 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1089 def : Pat<(or (srl GR64:$src1, CL:$amt),
1090 (shl GR64:$src2, (sub 64, CL:$amt))),
1091 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1093 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1094 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1095 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1097 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1098 def : Pat<(or (shl GR64:$src1, CL:$amt),
1099 (srl GR64:$src2, (sub 64, CL:$amt))),
1100 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1102 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1103 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1104 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1106 //===----------------------------------------------------------------------===//
1107 // X86-64 SSE Instructions
1108 //===----------------------------------------------------------------------===//
1110 // Move instructions...
1112 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR64:$src),
1113 "mov{d|q} {$src, $dst|$dst, $src}",
1115 (v2i64 (scalar_to_vector GR64:$src)))]>;
1116 def MOV64toPQIrm : RPDI<0x6E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
1117 "mov{d|q} {$src, $dst|$dst, $src}",
1119 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>;
1121 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (ops GR64:$dst, VR128:$src),
1122 "mov{d|q} {$src, $dst|$dst, $src}",
1123 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1125 def MOVPQIto64mr : RPDI<0x7E, MRMDestMem, (ops i64mem:$dst, VR128:$src),
1126 "mov{d|q} {$src, $dst|$dst, $src}",
1127 [(store (i64 (vector_extract (v2i64 VR128:$src),
1128 (iPTR 0))), addr:$dst)]>;
1130 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (ops FR64:$dst, GR64:$src),
1131 "mov{d|q} {$src, $dst|$dst, $src}",
1132 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1133 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (ops FR64:$dst, i64mem:$src),
1134 "mov{d|q} {$src, $dst|$dst, $src}",
1135 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1137 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (ops GR64:$dst, FR64:$src),
1138 "mov{d|q} {$src, $dst|$dst, $src}",
1139 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1140 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (ops i64mem:$dst, FR64:$src),
1141 "mov{d|q} {$src, $dst|$dst, $src}",
1142 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;