1 //====- X86InstrX86-64.td - Describe the X86 Instruction Set ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions...
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions...
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
43 // Instruction templates...
46 class RI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
47 : I<o, F, ops, asm, pattern>, REX_W;
48 class RIi8 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
49 : Ii8<o, F, ops, asm, pattern>, REX_W;
50 class RIi32 <bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
51 : Ii32<o, F, ops, asm, pattern>, REX_W;
53 class RIi64<bits<8> o, Format f, dag ops, string asm, list<dag> pattern>
54 : X86Inst<o, f, Imm64, ops, asm>, REX_W {
55 let Pattern = pattern;
59 class RSSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
60 : SSI<o, F, ops, asm, pattern>, REX_W;
61 class RSDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
62 : SDI<o, F, ops, asm, pattern>, REX_W;
63 class RPDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
64 : PDI<o, F, ops, asm, pattern>, REX_W;
66 //===----------------------------------------------------------------------===//
67 // Pattern fragments...
70 def i64immSExt32 : PatLeaf<(i64 imm), [{
71 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
72 // sign extended field.
73 return (int64_t)N->getValue() == (int32_t)N->getValue();
76 def i64immZExt32 : PatLeaf<(i64 imm), [{
77 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
78 // unsignedsign extended field.
79 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
82 def i64immSExt8 : PatLeaf<(i64 imm), [{
83 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
84 // sign extended field.
85 return (int64_t)N->getValue() == (int8_t)N->getValue();
88 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
89 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
90 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
91 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
93 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
94 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
95 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
96 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
98 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
99 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
100 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
101 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
103 //===----------------------------------------------------------------------===//
104 // Instruction list...
107 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (ops GR64:$dst),
108 "#IMPLICIT_DEF $dst",
109 [(set GR64:$dst, (undef))]>;
111 //===----------------------------------------------------------------------===//
112 // Call Instructions...
114 let isCall = 1, noResults = 1 in
115 // All calls clobber the non-callee saved registers...
116 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
117 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
118 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
119 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
120 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15] in {
121 def CALL64pcrel32 : I<0xE8, RawFrm, (ops i64imm:$dst, variable_ops),
122 "call ${dst:call}", []>;
123 def CALL64r : I<0xFF, MRM2r, (ops GR64:$dst, variable_ops),
124 "call {*}$dst", [(X86call GR64:$dst)]>;
125 def CALL64m : I<0xFF, MRM2m, (ops i64mem:$dst, variable_ops),
130 let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in {
131 def JMP64r : I<0xFF, MRM4r, (ops GR64:$dst), "jmp{q} {*}$dst",
132 [(brind GR64:$dst)]>;
133 def JMP64m : I<0xFF, MRM4m, (ops i64mem:$dst), "jmp{q} {*}$dst",
134 [(brind (loadi64 addr:$dst))]>;
137 //===----------------------------------------------------------------------===//
138 // Miscellaneous Instructions...
140 def LEAVE64 : I<0xC9, RawFrm,
141 (ops), "leave", []>, Imp<[RBP,RSP],[RBP,RSP]>;
142 def POP64r : I<0x58, AddRegFrm,
143 (ops GR64:$reg), "pop{q} $reg", []>, Imp<[RSP],[RSP]>;
144 def PUSH64r : I<0x50, AddRegFrm,
145 (ops GR64:$reg), "push{q} $reg", []>, Imp<[RSP],[RSP]>;
147 def LEA64_32r : I<0x8D, MRMSrcMem,
148 (ops GR32:$dst, lea64_32mem:$src),
149 "lea{l} {$src|$dst}, {$dst|$src}",
150 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
152 def LEA64r : RI<0x8D, MRMSrcMem, (ops GR64:$dst, lea64mem:$src),
153 "lea{q} {$src|$dst}, {$dst|$src}",
154 [(set GR64:$dst, lea64addr:$src)]>;
156 let isTwoAddress = 1 in
157 def BSWAP64r : RI<0xC8, AddRegFrm, (ops GR64:$dst, GR64:$src),
159 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
161 def XCHG64rr : RI<0x87, MRMDestReg, (ops GR64:$src1, GR64:$src2),
162 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
163 def XCHG64mr : RI<0x87, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
164 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
165 def XCHG64rm : RI<0x87, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
166 "xchg{q} {$src2|$src1}, {$src1|$src2}", []>;
169 def REP_MOVSQ : RI<0xA5, RawFrm, (ops), "{rep;movsq|rep movsq}",
170 [(X86rep_movs i64)]>,
171 Imp<[RCX,RDI,RSI], [RCX,RDI,RSI]>, REP;
172 def REP_STOSQ : RI<0xAB, RawFrm, (ops), "{rep;stosq|rep stosq}",
173 [(X86rep_stos i64)]>,
174 Imp<[RAX,RCX,RDI], [RCX,RDI]>, REP;
176 //===----------------------------------------------------------------------===//
177 // Move Instructions...
180 def MOV64rr : RI<0x89, MRMDestReg, (ops GR64:$dst, GR64:$src),
181 "mov{q} {$src, $dst|$dst, $src}", []>;
183 def MOV64ri : RIi64<0xB8, AddRegFrm, (ops GR64:$dst, i64imm:$src),
184 "movabs{q} {$src, $dst|$dst, $src}",
185 [(set GR64:$dst, imm:$src)]>;
186 def MOV64ri32 : RIi32<0xC7, MRM0r, (ops GR64:$dst, i64i32imm:$src),
187 "mov{q} {$src, $dst|$dst, $src}",
188 [(set GR64:$dst, i64immSExt32:$src)]>;
190 def MOV64rm : RI<0x8B, MRMSrcMem, (ops GR64:$dst, i64mem:$src),
191 "mov{q} {$src, $dst|$dst, $src}",
192 [(set GR64:$dst, (load addr:$src))]>;
194 def MOV64mr : RI<0x89, MRMDestMem, (ops i64mem:$dst, GR64:$src),
195 "mov{q} {$src, $dst|$dst, $src}",
196 [(store GR64:$src, addr:$dst)]>;
197 def MOV64mi32 : RIi32<0xC7, MRM0m, (ops i64mem:$dst, i64i32imm:$src),
198 "mov{q} {$src, $dst|$dst, $src}",
199 [(store i64immSExt32:$src, addr:$dst)]>;
201 // Sign/Zero extenders
203 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
204 "movs{bq|x} {$src, $dst|$dst, $src}",
205 [(set GR64:$dst, (sext GR8:$src))]>, TB;
206 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
207 "movs{bq|x} {$src, $dst|$dst, $src}",
208 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
209 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (ops GR64:$dst, GR16:$src),
210 "movs{wq|x} {$src, $dst|$dst, $src}",
211 [(set GR64:$dst, (sext GR16:$src))]>, TB;
212 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
213 "movs{wq|x} {$src, $dst|$dst, $src}",
214 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
215 def MOVSX64rr32: RI<0x63, MRMSrcReg, (ops GR64:$dst, GR32:$src),
216 "movs{lq|xd} {$src, $dst|$dst, $src}",
217 [(set GR64:$dst, (sext GR32:$src))]>;
218 def MOVSX64rm32: RI<0x63, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
219 "movs{lq|xd} {$src, $dst|$dst, $src}",
220 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
222 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (ops GR64:$dst, GR8 :$src),
223 "movz{bq|x} {$src, $dst|$dst, $src}",
224 [(set GR64:$dst, (zext GR8:$src))]>, TB;
225 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (ops GR64:$dst, i8mem :$src),
226 "movz{bq|x} {$src, $dst|$dst, $src}",
227 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
228 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (ops GR64:$dst, GR16:$src),
229 "movz{wq|x} {$src, $dst|$dst, $src}",
230 [(set GR64:$dst, (zext GR16:$src))]>, TB;
231 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (ops GR64:$dst, i16mem:$src),
232 "movz{wq|x} {$src, $dst|$dst, $src}",
233 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
235 def CDQE : RI<0x98, RawFrm, (ops),
236 "{cltq|cdqe}", []>, Imp<[EAX],[RAX]>; // RAX = signext(EAX)
238 def CQO : RI<0x99, RawFrm, (ops),
239 "{cqto|cqo}", []>, Imp<[RAX],[RAX,RDX]>; // RDX:RAX = signext(RAX)
241 //===----------------------------------------------------------------------===//
242 // Arithmetic Instructions...
245 let isTwoAddress = 1 in {
246 let isConvertibleToThreeAddress = 1 in {
247 let isCommutable = 1 in
248 def ADD64rr : RI<0x01, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
249 "add{q} {$src2, $dst|$dst, $src2}",
250 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
252 def ADD64ri32 : RIi32<0x81, MRM0r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
253 "add{q} {$src2, $dst|$dst, $src2}",
254 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
255 def ADD64ri8 : RIi8<0x83, MRM0r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
256 "add{q} {$src2, $dst|$dst, $src2}",
257 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
258 } // isConvertibleToThreeAddress
260 def ADD64rm : RI<0x03, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
261 "add{q} {$src2, $dst|$dst, $src2}",
262 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
265 def ADD64mr : RI<0x01, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
266 "add{q} {$src2, $dst|$dst, $src2}",
267 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
268 def ADD64mi32 : RIi32<0x81, MRM0m, (ops i64mem:$dst, i64i32imm :$src2),
269 "add{q} {$src2, $dst|$dst, $src2}",
270 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
271 def ADD64mi8 : RIi8<0x83, MRM0m, (ops i64mem:$dst, i64i8imm :$src2),
272 "add{q} {$src2, $dst|$dst, $src2}",
273 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
275 let isTwoAddress = 1 in {
276 let isCommutable = 1 in
277 def ADC64rr : RI<0x11, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
278 "adc{q} {$src2, $dst|$dst, $src2}",
279 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
281 def ADC64rm : RI<0x13, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
282 "adc{q} {$src2, $dst|$dst, $src2}",
283 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
285 def ADC64ri32 : RIi32<0x81, MRM2r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
286 "adc{q} {$src2, $dst|$dst, $src2}",
287 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
288 def ADC64ri8 : RIi8<0x83, MRM2r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
289 "adc{q} {$src2, $dst|$dst, $src2}",
290 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
293 def ADC64mr : RI<0x11, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
294 "adc{q} {$src2, $dst|$dst, $src2}",
295 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
296 def ADC64mi32 : RIi32<0x81, MRM2m, (ops i64mem:$dst, i64i32imm:$src2),
297 "adc{q} {$src2, $dst|$dst, $src2}",
298 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
299 def ADC64mi8 : RIi8<0x83, MRM2m, (ops i64mem:$dst, i64i8imm :$src2),
300 "adc{q} {$src2, $dst|$dst, $src2}",
301 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
303 let isTwoAddress = 1 in {
304 def SUB64rr : RI<0x29, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
305 "sub{q} {$src2, $dst|$dst, $src2}",
306 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
308 def SUB64rm : RI<0x2B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
309 "sub{q} {$src2, $dst|$dst, $src2}",
310 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
312 def SUB64ri32 : RIi32<0x81, MRM5r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
313 "sub{q} {$src2, $dst|$dst, $src2}",
314 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
315 def SUB64ri8 : RIi8<0x83, MRM5r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
316 "sub{q} {$src2, $dst|$dst, $src2}",
317 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
320 def SUB64mr : RI<0x29, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
321 "sub{q} {$src2, $dst|$dst, $src2}",
322 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
323 def SUB64mi32 : RIi32<0x81, MRM5m, (ops i64mem:$dst, i64i32imm:$src2),
324 "sub{q} {$src2, $dst|$dst, $src2}",
325 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
326 def SUB64mi8 : RIi8<0x83, MRM5m, (ops i64mem:$dst, i64i8imm :$src2),
327 "sub{q} {$src2, $dst|$dst, $src2}",
328 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
330 let isTwoAddress = 1 in {
331 def SBB64rr : RI<0x19, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
332 "sbb{q} {$src2, $dst|$dst, $src2}",
333 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
335 def SBB64rm : RI<0x1B, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
336 "sbb{q} {$src2, $dst|$dst, $src2}",
337 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
339 def SBB64ri32 : RIi32<0x81, MRM3r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
340 "sbb{q} {$src2, $dst|$dst, $src2}",
341 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
342 def SBB64ri8 : RIi8<0x83, MRM3r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
343 "sbb{q} {$src2, $dst|$dst, $src2}",
344 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
347 def SBB64mr : RI<0x19, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
348 "sbb{q} {$src2, $dst|$dst, $src2}",
349 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
350 def SBB64mi32 : RIi32<0x81, MRM3m, (ops i64mem:$dst, i64i32imm:$src2),
351 "sbb{q} {$src2, $dst|$dst, $src2}",
352 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
353 def SBB64mi8 : RIi8<0x83, MRM3m, (ops i64mem:$dst, i64i8imm :$src2),
354 "sbb{q} {$src2, $dst|$dst, $src2}",
355 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
357 // Unsigned multiplication
358 def MUL64r : RI<0xF7, MRM4r, (ops GR64:$src),
360 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
361 def MUL64m : RI<0xF7, MRM4m, (ops i64mem:$src),
363 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
365 // Signed multiplication
366 def IMUL64r : RI<0xF7, MRM5r, (ops GR64:$src),
368 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64
369 def IMUL64m : RI<0xF7, MRM5m, (ops i64mem:$src),
371 Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64]
373 let isTwoAddress = 1 in {
374 let isCommutable = 1 in
375 def IMUL64rr : RI<0xAF, MRMSrcReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
376 "imul{q} {$src2, $dst|$dst, $src2}",
377 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
379 def IMUL64rm : RI<0xAF, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
380 "imul{q} {$src2, $dst|$dst, $src2}",
381 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
384 // Suprisingly enough, these are not two address instructions!
385 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
386 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
387 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
388 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
389 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
390 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
391 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
392 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
393 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
394 (ops GR64:$dst, i64mem:$src1, i64i32imm:$src2),
395 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
396 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
397 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
398 (ops GR64:$dst, i64mem:$src1, i64i8imm: $src2),
399 "imul{q} {$src2, $src1, $dst|$dst, $src1, $src2}",
400 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
402 // Unsigned division / remainder
403 def DIV64r : RI<0xF7, MRM6r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
404 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
405 def DIV64m : RI<0xF7, MRM6m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
406 "div{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
408 // Signed division / remainder
409 def IDIV64r: RI<0xF7, MRM7r, (ops GR64:$src), // RDX:RAX/r64 = RAX,RDX
410 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
411 def IDIV64m: RI<0xF7, MRM7m, (ops i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
412 "idiv{q} $src", []>, Imp<[RAX,RDX],[RAX,RDX]>;
414 // Unary instructions
415 let CodeSize = 2 in {
416 let isTwoAddress = 1 in
417 def NEG64r : RI<0xF7, MRM3r, (ops GR64:$dst, GR64:$src), "neg{q} $dst",
418 [(set GR64:$dst, (ineg GR64:$src))]>;
419 def NEG64m : RI<0xF7, MRM3m, (ops i64mem:$dst), "neg{q} $dst",
420 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
422 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
423 def INC64r : RI<0xFF, MRM0r, (ops GR64:$dst, GR64:$src), "inc{q} $dst",
424 [(set GR64:$dst, (add GR64:$src, 1))]>;
425 def INC64m : RI<0xFF, MRM0m, (ops i64mem:$dst), "inc{q} $dst",
426 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
428 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
429 def DEC64r : RI<0xFF, MRM1r, (ops GR64:$dst, GR64:$src), "dec{q} $dst",
430 [(set GR64:$dst, (add GR64:$src, -1))]>;
431 def DEC64m : RI<0xFF, MRM1m, (ops i64mem:$dst), "dec{q} $dst",
432 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
434 // In 64-bit mode, single byte INC and DEC cannot be encoded.
435 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
436 // Can transform into LEA.
437 def INC64_16r : I<0xFF, MRM0r, (ops GR16:$dst, GR16:$src), "inc{w} $dst",
438 [(set GR16:$dst, (add GR16:$src, 1))]>,
439 OpSize, Requires<[In64BitMode]>;
440 def INC64_32r : I<0xFF, MRM0r, (ops GR32:$dst, GR32:$src), "inc{l} $dst",
441 [(set GR32:$dst, (add GR32:$src, 1))]>,
442 Requires<[In64BitMode]>;
443 def DEC64_16r : I<0xFF, MRM1r, (ops GR16:$dst, GR16:$src), "dec{w} $dst",
444 [(set GR16:$dst, (add GR16:$src, -1))]>,
445 OpSize, Requires<[In64BitMode]>;
446 def DEC64_32r : I<0xFF, MRM1r, (ops GR32:$dst, GR32:$src), "dec{l} $dst",
447 [(set GR32:$dst, (add GR32:$src, -1))]>,
448 Requires<[In64BitMode]>;
449 } // isConvertibleToThreeAddress
453 // Shift instructions
454 let isTwoAddress = 1 in {
455 def SHL64rCL : RI<0xD3, MRM4r, (ops GR64:$dst, GR64:$src),
456 "shl{q} {%cl, $dst|$dst, %CL}",
457 [(set GR64:$dst, (shl GR64:$src, CL))]>,
459 def SHL64ri : RIi8<0xC1, MRM4r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
460 "shl{q} {$src2, $dst|$dst, $src2}",
461 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
462 def SHL64r1 : RI<0xD1, MRM4r, (ops GR64:$dst, GR64:$src1),
466 def SHL64mCL : RI<0xD3, MRM4m, (ops i64mem:$dst),
467 "shl{q} {%cl, $dst|$dst, %CL}",
468 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
470 def SHL64mi : RIi8<0xC1, MRM4m, (ops i64mem:$dst, i8imm:$src),
471 "shl{q} {$src, $dst|$dst, $src}",
472 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
473 def SHL64m1 : RI<0xD1, MRM4m, (ops i64mem:$dst),
475 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
477 let isTwoAddress = 1 in {
478 def SHR64rCL : RI<0xD3, MRM5r, (ops GR64:$dst, GR64:$src),
479 "shr{q} {%cl, $dst|$dst, %CL}",
480 [(set GR64:$dst, (srl GR64:$src, CL))]>,
482 def SHR64ri : RIi8<0xC1, MRM5r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
483 "shr{q} {$src2, $dst|$dst, $src2}",
484 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
485 def SHR64r1 : RI<0xD1, MRM5r, (ops GR64:$dst, GR64:$src1),
487 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
490 def SHR64mCL : RI<0xD3, MRM5m, (ops i64mem:$dst),
491 "shr{q} {%cl, $dst|$dst, %CL}",
492 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
494 def SHR64mi : RIi8<0xC1, MRM5m, (ops i64mem:$dst, i8imm:$src),
495 "shr{q} {$src, $dst|$dst, $src}",
496 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
497 def SHR64m1 : RI<0xD1, MRM5m, (ops i64mem:$dst),
499 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
501 let isTwoAddress = 1 in {
502 def SAR64rCL : RI<0xD3, MRM7r, (ops GR64:$dst, GR64:$src),
503 "sar{q} {%cl, $dst|$dst, %CL}",
504 [(set GR64:$dst, (sra GR64:$src, CL))]>, Imp<[CL],[]>;
505 def SAR64ri : RIi8<0xC1, MRM7r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
506 "sar{q} {$src2, $dst|$dst, $src2}",
507 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
508 def SAR64r1 : RI<0xD1, MRM7r, (ops GR64:$dst, GR64:$src1),
510 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
513 def SAR64mCL : RI<0xD3, MRM7m, (ops i64mem:$dst),
514 "sar{q} {%cl, $dst|$dst, %CL}",
515 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
517 def SAR64mi : RIi8<0xC1, MRM7m, (ops i64mem:$dst, i8imm:$src),
518 "sar{q} {$src, $dst|$dst, $src}",
519 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
520 def SAR64m1 : RI<0xD1, MRM7m, (ops i64mem:$dst),
522 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
524 // Rotate instructions
525 let isTwoAddress = 1 in {
526 def ROL64rCL : RI<0xD3, MRM0r, (ops GR64:$dst, GR64:$src),
527 "rol{q} {%cl, $dst|$dst, %CL}",
528 [(set GR64:$dst, (rotl GR64:$src, CL))]>, Imp<[CL],[]>;
529 def ROL64ri : RIi8<0xC1, MRM0r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
530 "rol{q} {$src2, $dst|$dst, $src2}",
531 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
532 def ROL64r1 : RI<0xD1, MRM0r, (ops GR64:$dst, GR64:$src1),
534 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
537 def ROL64mCL : I<0xD3, MRM0m, (ops i64mem:$dst),
538 "rol{q} {%cl, $dst|$dst, %CL}",
539 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
541 def ROL64mi : RIi8<0xC1, MRM0m, (ops i64mem:$dst, i8imm:$src),
542 "rol{q} {$src, $dst|$dst, $src}",
543 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
544 def ROL64m1 : RI<0xD1, MRM0m, (ops i64mem:$dst),
546 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
548 let isTwoAddress = 1 in {
549 def ROR64rCL : RI<0xD3, MRM1r, (ops GR64:$dst, GR64:$src),
550 "ror{q} {%cl, $dst|$dst, %CL}",
551 [(set GR64:$dst, (rotr GR64:$src, CL))]>, Imp<[CL],[]>;
552 def ROR64ri : RIi8<0xC1, MRM1r, (ops GR64:$dst, GR64:$src1, i8imm:$src2),
553 "ror{q} {$src2, $dst|$dst, $src2}",
554 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
555 def ROR64r1 : RI<0xD1, MRM1r, (ops GR64:$dst, GR64:$src1),
557 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
560 def ROR64mCL : RI<0xD3, MRM1m, (ops i64mem:$dst),
561 "ror{q} {%cl, $dst|$dst, %CL}",
562 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
564 def ROR64mi : RIi8<0xC1, MRM1m, (ops i64mem:$dst, i8imm:$src),
565 "ror{q} {$src, $dst|$dst, $src}",
566 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
567 def ROR64m1 : RI<0xD1, MRM1m, (ops i64mem:$dst),
569 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
571 // Double shift instructions (generalizations of rotate)
572 let isTwoAddress = 1 in {
573 def SHLD64rrCL : RI<0xA5, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
574 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
576 def SHRD64rrCL : RI<0xAD, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
577 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
580 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
581 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
582 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
583 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
585 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
586 (ops GR64:$dst, GR64:$src1, GR64:$src2, i8imm:$src3),
587 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
592 // Temporary hack: there is no patterns associated with these instructions
593 // so we have to tell tblgen that these do not produce results.
594 let noResults = 1 in {
595 def SHLD64mrCL : RI<0xA5, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
596 "shld{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
598 def SHRD64mrCL : RI<0xAD, MRMDestMem, (ops i64mem:$dst, GR64:$src2),
599 "shrd{q} {%cl, $src2, $dst|$dst, $src2, %CL}", []>,
601 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
602 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
603 "shld{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
605 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
606 (ops i64mem:$dst, GR64:$src2, i8imm:$src3),
607 "shrd{q} {$src3, $src2, $dst|$dst, $src2, $src3}", []>,
611 //===----------------------------------------------------------------------===//
612 // Logical Instructions...
615 let isTwoAddress = 1 in
616 def NOT64r : RI<0xF7, MRM2r, (ops GR64:$dst, GR64:$src), "not{q} $dst",
617 [(set GR64:$dst, (not GR64:$src))]>;
618 def NOT64m : RI<0xF7, MRM2m, (ops i64mem:$dst), "not{q} $dst",
619 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
621 let isTwoAddress = 1 in {
622 let isCommutable = 1 in
623 def AND64rr : RI<0x21, MRMDestReg,
624 (ops GR64:$dst, GR64:$src1, GR64:$src2),
625 "and{q} {$src2, $dst|$dst, $src2}",
626 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
627 def AND64rm : RI<0x23, MRMSrcMem,
628 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
629 "and{q} {$src2, $dst|$dst, $src2}",
630 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
631 def AND64ri32 : RIi32<0x81, MRM4r,
632 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
633 "and{q} {$src2, $dst|$dst, $src2}",
634 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
635 def AND64ri8 : RIi8<0x83, MRM4r,
636 (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
637 "and{q} {$src2, $dst|$dst, $src2}",
638 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
641 def AND64mr : RI<0x21, MRMDestMem,
642 (ops i64mem:$dst, GR64:$src),
643 "and{q} {$src, $dst|$dst, $src}",
644 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
645 def AND64mi32 : RIi32<0x81, MRM4m,
646 (ops i64mem:$dst, i64i32imm:$src),
647 "and{q} {$src, $dst|$dst, $src}",
648 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
649 def AND64mi8 : RIi8<0x83, MRM4m,
650 (ops i64mem:$dst, i64i8imm :$src),
651 "and{q} {$src, $dst|$dst, $src}",
652 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
654 let isTwoAddress = 1 in {
655 let isCommutable = 1 in
656 def OR64rr : RI<0x09, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
657 "or{q} {$src2, $dst|$dst, $src2}",
658 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
659 def OR64rm : RI<0x0B, MRMSrcMem , (ops GR64:$dst, GR64:$src1, i64mem:$src2),
660 "or{q} {$src2, $dst|$dst, $src2}",
661 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
662 def OR64ri32 : RIi32<0x81, MRM1r, (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
663 "or{q} {$src2, $dst|$dst, $src2}",
664 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
665 def OR64ri8 : RIi8<0x83, MRM1r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
666 "or{q} {$src2, $dst|$dst, $src2}",
667 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
670 def OR64mr : RI<0x09, MRMDestMem, (ops i64mem:$dst, GR64:$src),
671 "or{q} {$src, $dst|$dst, $src}",
672 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
673 def OR64mi32 : RIi32<0x81, MRM1m, (ops i64mem:$dst, i64i32imm:$src),
674 "or{q} {$src, $dst|$dst, $src}",
675 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
676 def OR64mi8 : RIi8<0x83, MRM1m, (ops i64mem:$dst, i64i8imm:$src),
677 "or{q} {$src, $dst|$dst, $src}",
678 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
680 let isTwoAddress = 1 in {
681 let isCommutable = 1 in
682 def XOR64rr : RI<0x31, MRMDestReg, (ops GR64:$dst, GR64:$src1, GR64:$src2),
683 "xor{q} {$src2, $dst|$dst, $src2}",
684 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
685 def XOR64rm : RI<0x33, MRMSrcMem, (ops GR64:$dst, GR64:$src1, i64mem:$src2),
686 "xor{q} {$src2, $dst|$dst, $src2}",
687 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
688 def XOR64ri32 : RIi32<0x81, MRM6r,
689 (ops GR64:$dst, GR64:$src1, i64i32imm:$src2),
690 "xor{q} {$src2, $dst|$dst, $src2}",
691 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
692 def XOR64ri8 : RIi8<0x83, MRM6r, (ops GR64:$dst, GR64:$src1, i64i8imm:$src2),
693 "xor{q} {$src2, $dst|$dst, $src2}",
694 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
697 def XOR64mr : RI<0x31, MRMDestMem, (ops i64mem:$dst, GR64:$src),
698 "xor{q} {$src, $dst|$dst, $src}",
699 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
700 def XOR64mi32 : RIi32<0x81, MRM6m, (ops i64mem:$dst, i64i32imm:$src),
701 "xor{q} {$src, $dst|$dst, $src}",
702 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
703 def XOR64mi8 : RIi8<0x83, MRM6m, (ops i64mem:$dst, i64i8imm :$src),
704 "xor{q} {$src, $dst|$dst, $src}",
705 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
707 //===----------------------------------------------------------------------===//
708 // Comparison Instructions...
711 // Integer comparison
712 let isCommutable = 1 in
713 def TEST64rr : RI<0x85, MRMDestReg, (ops GR64:$src1, GR64:$src2),
714 "test{q} {$src2, $src1|$src1, $src2}",
715 [(X86cmp (and GR64:$src1, GR64:$src2), 0)]>;
716 def TEST64rm : RI<0x85, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
717 "test{q} {$src2, $src1|$src1, $src2}",
718 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0)]>;
719 def TEST64ri32 : RIi32<0xF7, MRM0r, (ops GR64:$src1, i64i32imm:$src2),
720 "test{q} {$src2, $src1|$src1, $src2}",
721 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0)]>;
722 def TEST64mi32 : RIi32<0xF7, MRM0m, (ops i64mem:$src1, i64i32imm:$src2),
723 "test{q} {$src2, $src1|$src1, $src2}",
724 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0)]>;
726 def CMP64rr : RI<0x39, MRMDestReg, (ops GR64:$src1, GR64:$src2),
727 "cmp{q} {$src2, $src1|$src1, $src2}",
728 [(X86cmp GR64:$src1, GR64:$src2)]>;
729 def CMP64mr : RI<0x39, MRMDestMem, (ops i64mem:$src1, GR64:$src2),
730 "cmp{q} {$src2, $src1|$src1, $src2}",
731 [(X86cmp (loadi64 addr:$src1), GR64:$src2)]>;
732 def CMP64rm : RI<0x3B, MRMSrcMem, (ops GR64:$src1, i64mem:$src2),
733 "cmp{q} {$src2, $src1|$src1, $src2}",
734 [(X86cmp GR64:$src1, (loadi64 addr:$src2))]>;
735 def CMP64ri32 : RIi32<0x81, MRM7r, (ops GR64:$src1, i64i32imm:$src2),
736 "cmp{q} {$src2, $src1|$src1, $src2}",
737 [(X86cmp GR64:$src1, i64immSExt32:$src2)]>;
738 def CMP64mi32 : RIi32<0x81, MRM7m, (ops i64mem:$src1, i64i32imm:$src2),
739 "cmp{q} {$src2, $src1|$src1, $src2}",
740 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2)]>;
741 def CMP64mi8 : RIi8<0x83, MRM7m, (ops i64mem:$src1, i64i8imm:$src2),
742 "cmp{q} {$src2, $src1|$src1, $src2}",
743 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2)]>;
744 def CMP64ri8 : RIi8<0x83, MRM7r, (ops GR64:$src1, i64i8imm:$src2),
745 "cmp{q} {$src2, $src1|$src1, $src2}",
746 [(X86cmp GR64:$src1, i64immSExt8:$src2)]>;
749 let isTwoAddress = 1 in {
750 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
751 (ops GR64:$dst, GR64:$src1, GR64:$src2),
752 "cmovb {$src2, $dst|$dst, $src2}",
753 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
755 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
756 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
757 "cmovb {$src2, $dst|$dst, $src2}",
758 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
760 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
761 (ops GR64:$dst, GR64:$src1, GR64:$src2),
762 "cmovae {$src2, $dst|$dst, $src2}",
763 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
765 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
766 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
767 "cmovae {$src2, $dst|$dst, $src2}",
768 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
770 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
771 (ops GR64:$dst, GR64:$src1, GR64:$src2),
772 "cmove {$src2, $dst|$dst, $src2}",
773 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
775 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
776 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
777 "cmove {$src2, $dst|$dst, $src2}",
778 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
780 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
781 (ops GR64:$dst, GR64:$src1, GR64:$src2),
782 "cmovne {$src2, $dst|$dst, $src2}",
783 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
785 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
786 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
787 "cmovne {$src2, $dst|$dst, $src2}",
788 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
790 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
791 (ops GR64:$dst, GR64:$src1, GR64:$src2),
792 "cmovbe {$src2, $dst|$dst, $src2}",
793 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
795 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
796 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
797 "cmovbe {$src2, $dst|$dst, $src2}",
798 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
800 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
801 (ops GR64:$dst, GR64:$src1, GR64:$src2),
802 "cmova {$src2, $dst|$dst, $src2}",
803 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
805 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
806 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
807 "cmova {$src2, $dst|$dst, $src2}",
808 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
810 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
811 (ops GR64:$dst, GR64:$src1, GR64:$src2),
812 "cmovl {$src2, $dst|$dst, $src2}",
813 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
815 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
816 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
817 "cmovl {$src2, $dst|$dst, $src2}",
818 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
820 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
821 (ops GR64:$dst, GR64:$src1, GR64:$src2),
822 "cmovge {$src2, $dst|$dst, $src2}",
823 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
825 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
826 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
827 "cmovge {$src2, $dst|$dst, $src2}",
828 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
830 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
831 (ops GR64:$dst, GR64:$src1, GR64:$src2),
832 "cmovle {$src2, $dst|$dst, $src2}",
833 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
835 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
836 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
837 "cmovle {$src2, $dst|$dst, $src2}",
838 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
840 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
841 (ops GR64:$dst, GR64:$src1, GR64:$src2),
842 "cmovg {$src2, $dst|$dst, $src2}",
843 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
845 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
846 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
847 "cmovg {$src2, $dst|$dst, $src2}",
848 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
850 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
851 (ops GR64:$dst, GR64:$src1, GR64:$src2),
852 "cmovs {$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
855 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
856 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
857 "cmovs {$src2, $dst|$dst, $src2}",
858 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
860 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
861 (ops GR64:$dst, GR64:$src1, GR64:$src2),
862 "cmovns {$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
865 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
866 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
867 "cmovns {$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
870 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
871 (ops GR64:$dst, GR64:$src1, GR64:$src2),
872 "cmovp {$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
875 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
876 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
877 "cmovp {$src2, $dst|$dst, $src2}",
878 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
880 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
881 (ops GR64:$dst, GR64:$src1, GR64:$src2),
882 "cmovnp {$src2, $dst|$dst, $src2}",
883 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
885 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
886 (ops GR64:$dst, GR64:$src1, i64mem:$src2),
887 "cmovnp {$src2, $dst|$dst, $src2}",
888 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
892 //===----------------------------------------------------------------------===//
893 // Conversion Instructions...
897 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
898 "cvtsd2si{q} {$src, $dst|$dst, $src}",
899 []>; // TODO: add intrinsic
900 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
901 "cvtsd2si{q} {$src, $dst|$dst, $src}",
902 []>; // TODO: add intrinsic
903 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, FR64:$src),
904 "cvttsd2si{q} {$src, $dst|$dst, $src}",
905 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
906 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f64mem:$src),
907 "cvttsd2si{q} {$src, $dst|$dst, $src}",
908 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
909 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
910 "cvttsd2si{q} {$src, $dst|$dst, $src}",
911 []>; // TODO: add intrinsic
912 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (ops GR64:$dst, f128mem:$src),
913 "cvttsd2si{q} {$src, $dst|$dst, $src}",
914 []>; // TODO: add intrinsic
917 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (ops FR64:$dst, GR64:$src),
918 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
919 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
920 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (ops FR64:$dst, i64mem:$src),
921 "cvtsi2sd{q} {$src, $dst|$dst, $src}",
922 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
923 let isTwoAddress = 1 in {
924 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
925 (ops VR128:$dst, VR128:$src1, GR64:$src2),
926 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
927 []>; // TODO: add intrinsic
928 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
929 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
930 "cvtsi2sd{q} {$src2, $dst|$dst, $src2}",
931 []>; // TODO: add intrinsic
935 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (ops FR32:$dst, GR64:$src),
936 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
937 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
938 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (ops FR32:$dst, i64mem:$src),
939 "cvtsi2ss{q} {$src, $dst|$dst, $src}",
940 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
941 let isTwoAddress = 1 in {
942 def Int_CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg,
943 (ops VR128:$dst, VR128:$src1, GR64:$src2),
944 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
945 []>; // TODO: add intrinsic
946 def Int_CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem,
947 (ops VR128:$dst, VR128:$src1, i64mem:$src2),
948 "cvtsi2ss{q} {$src2, $dst|$dst, $src2}",
949 []>; // TODO: add intrinsic
953 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (ops GR64:$dst, VR128:$src),
954 "cvtss2si{q} {$src, $dst|$dst, $src}",
955 []>; // TODO: add intrinsic
956 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
957 "cvtss2si{q} {$src, $dst|$dst, $src}",
958 []>; // TODO: add intrinsic
959 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, FR32:$src),
960 "cvttss2si{q} {$src, $dst|$dst, $src}",
961 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
962 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
963 "cvttss2si{q} {$src, $dst|$dst, $src}",
964 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
965 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (ops GR64:$dst, VR128:$src),
966 "cvttss2si{q} {$src, $dst|$dst, $src}",
967 []>; // TODO: add intrinsic
968 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (ops GR64:$dst, f32mem:$src),
969 "cvttss2si{q} {$src, $dst|$dst, $src}",
970 []>; // TODO: add intrinsic
972 //===----------------------------------------------------------------------===//
973 // Alias Instructions
974 //===----------------------------------------------------------------------===//
977 // In 64-mode, each 64-bit and 32-bit registers has a low 8-bit sub-register.
978 def TRUNC_64to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR64:$src),
979 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
980 [(set GR8:$dst, (trunc GR64:$src))]>;
981 def TRUNC_32to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR32:$src),
982 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}",
983 [(set GR8:$dst, (trunc GR32:$src))]>,
984 Requires<[In64BitMode]>;
985 def TRUNC_16to8 : I<0x88, MRMDestReg, (ops GR8:$dst, GR16:$src),
986 "mov{b} {${src:subreg8}, $dst|$dst, ${src:subreg8}}",
987 [(set GR8:$dst, (trunc GR16:$src))]>,
988 Requires<[In64BitMode]>;
990 def TRUNC_64to16 : I<0x89, MRMDestReg, (ops GR16:$dst, GR64:$src),
991 "mov{w} {${src:subreg16}, $dst|$dst, ${src:subreg16}}",
992 [(set GR16:$dst, (trunc GR64:$src))]>;
994 def TRUNC_64to32 : I<0x89, MRMDestReg, (ops GR32:$dst, GR64:$src),
995 "mov{l} {${src:subreg32}, $dst|$dst, ${src:subreg32}}",
996 [(set GR32:$dst, (trunc GR64:$src))]>;
999 // TODO: Remove this after proper i32 -> i64 zext support.
1000 def PsMOVZX64rr32: I<0x89, MRMDestReg, (ops GR64:$dst, GR32:$src),
1001 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1002 [(set GR64:$dst, (zext GR32:$src))]>;
1003 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (ops GR64:$dst, i32mem:$src),
1004 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1005 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1008 // Alias instructions that map movr0 to xor.
1009 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1010 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1011 // when we have a better way to specify isel priority.
1012 let AddedComplexity = 1 in
1013 def MOV64r0 : RI<0x31, MRMInitReg, (ops GR64:$dst),
1014 "xor{q} $dst, $dst",
1015 [(set GR64:$dst, 0)]>;
1017 // Materialize i64 constant where top 32-bits are zero.
1018 let AddedComplexity = 1 in
1019 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (ops GR64:$dst, i64i32imm:$src),
1020 "mov{l} {$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1021 [(set GR64:$dst, i64immZExt32:$src)]>;
1023 //===----------------------------------------------------------------------===//
1024 // Non-Instruction Patterns
1025 //===----------------------------------------------------------------------===//
1027 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1028 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1029 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1030 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1031 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1032 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1033 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1034 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1035 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1037 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1038 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1039 Requires<[SmallCode, IsStatic]>;
1040 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1041 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1042 Requires<[SmallCode, IsStatic]>;
1043 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1044 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1045 Requires<[SmallCode, IsStatic]>;
1046 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1047 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1048 Requires<[SmallCode, IsStatic]>;
1051 // Direct PC relative function call for small code model. 32-bit displacement
1052 // sign extended to 64-bit.
1053 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1054 (CALL64pcrel32 tglobaladdr:$dst)>;
1055 def : Pat<(X86call (i64 texternalsym:$dst)),
1056 (CALL64pcrel32 texternalsym:$dst)>;
1058 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1059 (CALL64pcrel32 tglobaladdr:$dst)>;
1060 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1061 (CALL64pcrel32 texternalsym:$dst)>;
1063 def : Pat<(X86tailcall GR64:$dst),
1064 (CALL64r GR64:$dst)>;
1066 // {s|z}extload bool -> {s|z}extload byte
1067 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1068 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1071 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1072 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1073 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1074 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1077 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1078 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1079 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1080 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1081 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1082 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1084 //===----------------------------------------------------------------------===//
1086 //===----------------------------------------------------------------------===//
1088 // (shl x, 1) ==> (add x, x)
1089 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1091 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1092 def : Pat<(or (srl GR64:$src1, CL:$amt),
1093 (shl GR64:$src2, (sub 64, CL:$amt))),
1094 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1096 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1097 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1098 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1100 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1101 def : Pat<(or (shl GR64:$src1, CL:$amt),
1102 (srl GR64:$src2, (sub 64, CL:$amt))),
1103 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1105 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1106 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1107 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1109 // X86 specific add which produces a flag.
1110 def : Pat<(addc GR64:$src1, GR64:$src2),
1111 (ADD64rr GR64:$src1, GR64:$src2)>;
1112 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1113 (ADD64rm GR64:$src1, addr:$src2)>;
1114 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1115 (ADD64ri32 GR64:$src1, imm:$src2)>;
1116 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1117 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1119 def : Pat<(subc GR64:$src1, GR64:$src2),
1120 (SUB64rr GR64:$src1, GR64:$src2)>;
1121 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1122 (SUB64rm GR64:$src1, addr:$src2)>;
1123 def : Pat<(subc GR64:$src1, imm:$src2),
1124 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1125 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1126 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1129 //===----------------------------------------------------------------------===//
1130 // X86-64 SSE Instructions
1131 //===----------------------------------------------------------------------===//
1133 // Move instructions...
1135 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR64:$src),
1136 "mov{d|q} {$src, $dst|$dst, $src}",
1138 (v2i64 (scalar_to_vector GR64:$src)))]>;
1139 def MOV64toPQIrm : RPDI<0x6E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
1140 "mov{d|q} {$src, $dst|$dst, $src}",
1142 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>;
1144 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (ops GR64:$dst, VR128:$src),
1145 "mov{d|q} {$src, $dst|$dst, $src}",
1146 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1148 def MOVPQIto64mr : RPDI<0x7E, MRMDestMem, (ops i64mem:$dst, VR128:$src),
1149 "mov{d|q} {$src, $dst|$dst, $src}",
1150 [(store (i64 (vector_extract (v2i64 VR128:$src),
1151 (iPTR 0))), addr:$dst)]>;
1153 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (ops FR64:$dst, GR64:$src),
1154 "mov{d|q} {$src, $dst|$dst, $src}",
1155 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1156 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (ops FR64:$dst, i64mem:$src),
1157 "mov{d|q} {$src, $dst|$dst, $src}",
1158 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1160 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (ops GR64:$dst, FR64:$src),
1161 "mov{d|q} {$src, $dst|$dst, $src}",
1162 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1163 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (ops i64mem:$dst, FR64:$src),
1164 "mov{d|q} {$src, $dst|$dst, $src}",
1165 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;