1 //====- X86InstrX86-64.td - Describe the X86 Instruction Set ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions...
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions...
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
43 // Pattern fragments...
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
65 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
66 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
67 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
69 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
70 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
71 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
72 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
74 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
75 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
76 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
77 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
79 //===----------------------------------------------------------------------===//
80 // Instruction list...
83 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (outs GR64:$dst), (ins),
85 [(set GR64:$dst, (undef))]>;
87 //===----------------------------------------------------------------------===//
88 // Call Instructions...
91 // All calls clobber the non-callee saved registers...
92 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
93 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
94 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
95 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
96 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15] in {
97 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
98 "call\t${dst:call}", []>;
99 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
100 "call\t{*}$dst", [(X86call GR64:$dst)]>;
101 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
102 "call\t{*}$dst", []>;
106 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
107 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
108 [(brind GR64:$dst)]>;
109 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
110 [(brind (loadi64 addr:$dst))]>;
113 //===----------------------------------------------------------------------===//
114 // Miscellaneous Instructions...
116 let Defs = [RBP,RSP], Uses = [RBP,RSP] in
117 def LEAVE64 : I<0xC9, RawFrm,
118 (outs), (ins), "leave", []>;
119 let Defs = [RSP], Uses = [RSP] in {
120 def POP64r : I<0x58, AddRegFrm,
121 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
122 def PUSH64r : I<0x50, AddRegFrm,
123 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
126 def LEA64_32r : I<0x8D, MRMSrcMem,
127 (outs GR32:$dst), (ins lea64_32mem:$src),
128 "lea{l}\t{$src|$dst}, {$dst|$src}",
129 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
131 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
132 "lea{q}\t{$src|$dst}, {$dst|$src}",
133 [(set GR64:$dst, lea64addr:$src)]>;
135 let isTwoAddress = 1 in
136 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
138 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
140 def XCHG64rr : RI<0x87, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
141 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
142 def XCHG64mr : RI<0x87, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
143 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
144 def XCHG64rm : RI<0x87, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
145 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
148 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
149 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
150 [(X86rep_movs i64)]>, REP;
151 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
152 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
153 [(X86rep_stos i64)]>, REP;
155 //===----------------------------------------------------------------------===//
156 // Move Instructions...
159 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
160 "mov{q}\t{$src, $dst|$dst, $src}", []>;
162 let isReMaterializable = 1 in {
163 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
164 "movabs{q}\t{$src, $dst|$dst, $src}",
165 [(set GR64:$dst, imm:$src)]>;
166 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
167 "mov{q}\t{$src, $dst|$dst, $src}",
168 [(set GR64:$dst, i64immSExt32:$src)]>;
172 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
173 "mov{q}\t{$src, $dst|$dst, $src}",
174 [(set GR64:$dst, (load addr:$src))]>;
176 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
177 "mov{q}\t{$src, $dst|$dst, $src}",
178 [(store GR64:$src, addr:$dst)]>;
179 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
180 "mov{q}\t{$src, $dst|$dst, $src}",
181 [(store i64immSExt32:$src, addr:$dst)]>;
183 // Sign/Zero extenders
185 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
186 "movs{bq|x}\t{$src, $dst|$dst, $src}",
187 [(set GR64:$dst, (sext GR8:$src))]>, TB;
188 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
189 "movs{bq|x}\t{$src, $dst|$dst, $src}",
190 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
191 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
192 "movs{wq|x}\t{$src, $dst|$dst, $src}",
193 [(set GR64:$dst, (sext GR16:$src))]>, TB;
194 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
195 "movs{wq|x}\t{$src, $dst|$dst, $src}",
196 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
197 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
198 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
199 [(set GR64:$dst, (sext GR32:$src))]>;
200 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
201 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
202 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
204 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
205 "movz{bq|x}\t{$src, $dst|$dst, $src}",
206 [(set GR64:$dst, (zext GR8:$src))]>, TB;
207 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
208 "movz{bq|x}\t{$src, $dst|$dst, $src}",
209 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
210 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
211 "movz{wq|x}\t{$src, $dst|$dst, $src}",
212 [(set GR64:$dst, (zext GR16:$src))]>, TB;
213 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
214 "movz{wq|x}\t{$src, $dst|$dst, $src}",
215 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
217 let Defs = [RAX], Uses = [EAX] in
218 def CDQE : RI<0x98, RawFrm, (outs), (ins),
219 "{cltq|cdqe}", []>; // RAX = signext(EAX)
221 let Defs = [RAX,RDX], Uses = [RAX] in
222 def CQO : RI<0x99, RawFrm, (outs), (ins),
223 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
225 //===----------------------------------------------------------------------===//
226 // Arithmetic Instructions...
229 let isTwoAddress = 1 in {
230 let isConvertibleToThreeAddress = 1 in {
231 let isCommutable = 1 in
232 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
233 "add{q}\t{$src2, $dst|$dst, $src2}",
234 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
236 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
237 "add{q}\t{$src2, $dst|$dst, $src2}",
238 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
239 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
240 "add{q}\t{$src2, $dst|$dst, $src2}",
241 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
242 } // isConvertibleToThreeAddress
244 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
245 "add{q}\t{$src2, $dst|$dst, $src2}",
246 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
249 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
250 "add{q}\t{$src2, $dst|$dst, $src2}",
251 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
252 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
253 "add{q}\t{$src2, $dst|$dst, $src2}",
254 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
255 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
256 "add{q}\t{$src2, $dst|$dst, $src2}",
257 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
259 let isTwoAddress = 1 in {
260 let isCommutable = 1 in
261 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
262 "adc{q}\t{$src2, $dst|$dst, $src2}",
263 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
265 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
266 "adc{q}\t{$src2, $dst|$dst, $src2}",
267 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
269 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
270 "adc{q}\t{$src2, $dst|$dst, $src2}",
271 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
272 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
273 "adc{q}\t{$src2, $dst|$dst, $src2}",
274 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
277 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
278 "adc{q}\t{$src2, $dst|$dst, $src2}",
279 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
280 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
281 "adc{q}\t{$src2, $dst|$dst, $src2}",
282 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
283 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
284 "adc{q}\t{$src2, $dst|$dst, $src2}",
285 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
287 let isTwoAddress = 1 in {
288 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
289 "sub{q}\t{$src2, $dst|$dst, $src2}",
290 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
292 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
293 "sub{q}\t{$src2, $dst|$dst, $src2}",
294 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
296 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
297 "sub{q}\t{$src2, $dst|$dst, $src2}",
298 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
299 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
300 "sub{q}\t{$src2, $dst|$dst, $src2}",
301 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
304 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
305 "sub{q}\t{$src2, $dst|$dst, $src2}",
306 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
307 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
308 "sub{q}\t{$src2, $dst|$dst, $src2}",
309 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
310 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
311 "sub{q}\t{$src2, $dst|$dst, $src2}",
312 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
314 let isTwoAddress = 1 in {
315 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
316 "sbb{q}\t{$src2, $dst|$dst, $src2}",
317 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
319 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
320 "sbb{q}\t{$src2, $dst|$dst, $src2}",
321 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
323 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
324 "sbb{q}\t{$src2, $dst|$dst, $src2}",
325 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
326 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
327 "sbb{q}\t{$src2, $dst|$dst, $src2}",
328 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
331 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
332 "sbb{q}\t{$src2, $dst|$dst, $src2}",
333 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
334 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
335 "sbb{q}\t{$src2, $dst|$dst, $src2}",
336 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
337 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
338 "sbb{q}\t{$src2, $dst|$dst, $src2}",
339 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
341 // Unsigned multiplication
342 let Defs = [RAX,RDX], Uses = [RAX] in {
343 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
344 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
345 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
346 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
348 // Signed multiplication
349 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
350 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
351 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
352 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
355 let isTwoAddress = 1 in {
356 let isCommutable = 1 in
357 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
358 "imul{q}\t{$src2, $dst|$dst, $src2}",
359 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
361 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
362 "imul{q}\t{$src2, $dst|$dst, $src2}",
363 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
366 // Suprisingly enough, these are not two address instructions!
367 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
368 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
369 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
370 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
371 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
372 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
373 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
374 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
375 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
376 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
377 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
378 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
379 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
380 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
381 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
382 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
384 // Unsigned division / remainder
385 let Defs = [RAX,RDX], Uses = [RAX,RDX] in {
386 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
388 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
391 // Signed division / remainder
392 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
393 "idiv{q}\t$src", []>;
394 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
395 "idiv{q}\t$src", []>;
398 // Unary instructions
399 let CodeSize = 2 in {
400 let isTwoAddress = 1 in
401 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
402 [(set GR64:$dst, (ineg GR64:$src))]>;
403 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
404 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
406 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
407 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
408 [(set GR64:$dst, (add GR64:$src, 1))]>;
409 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
410 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
412 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
413 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
414 [(set GR64:$dst, (add GR64:$src, -1))]>;
415 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
416 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
418 // In 64-bit mode, single byte INC and DEC cannot be encoded.
419 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
420 // Can transform into LEA.
421 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
422 [(set GR16:$dst, (add GR16:$src, 1))]>,
423 OpSize, Requires<[In64BitMode]>;
424 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
425 [(set GR32:$dst, (add GR32:$src, 1))]>,
426 Requires<[In64BitMode]>;
427 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
428 [(set GR16:$dst, (add GR16:$src, -1))]>,
429 OpSize, Requires<[In64BitMode]>;
430 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
431 [(set GR32:$dst, (add GR32:$src, -1))]>,
432 Requires<[In64BitMode]>;
433 } // isConvertibleToThreeAddress
437 // Shift instructions
438 let isTwoAddress = 1 in {
440 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
441 "shl{q}\t{%cl, $dst|$dst, %CL}",
442 [(set GR64:$dst, (shl GR64:$src, CL))]>;
443 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
444 "shl{q}\t{$src2, $dst|$dst, $src2}",
445 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
446 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
451 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
452 "shl{q}\t{%cl, $dst|$dst, %CL}",
453 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
454 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
455 "shl{q}\t{$src, $dst|$dst, $src}",
456 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
457 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
459 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
461 let isTwoAddress = 1 in {
463 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
464 "shr{q}\t{%cl, $dst|$dst, %CL}",
465 [(set GR64:$dst, (srl GR64:$src, CL))]>;
466 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
467 "shr{q}\t{$src2, $dst|$dst, $src2}",
468 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
469 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
471 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
475 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
476 "shr{q}\t{%cl, $dst|$dst, %CL}",
477 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
478 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
479 "shr{q}\t{$src, $dst|$dst, $src}",
480 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
481 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
483 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
485 let isTwoAddress = 1 in {
487 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
488 "sar{q}\t{%cl, $dst|$dst, %CL}",
489 [(set GR64:$dst, (sra GR64:$src, CL))]>;
490 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
491 "sar{q}\t{$src2, $dst|$dst, $src2}",
492 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
493 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
495 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
499 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
500 "sar{q}\t{%cl, $dst|$dst, %CL}",
501 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
502 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
503 "sar{q}\t{$src, $dst|$dst, $src}",
504 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
505 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
507 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
509 // Rotate instructions
510 let isTwoAddress = 1 in {
512 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
513 "rol{q}\t{%cl, $dst|$dst, %CL}",
514 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
515 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
516 "rol{q}\t{$src2, $dst|$dst, $src2}",
517 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
518 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
520 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
524 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
525 "rol{q}\t{%cl, $dst|$dst, %CL}",
526 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
527 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
528 "rol{q}\t{$src, $dst|$dst, $src}",
529 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
530 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
532 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
534 let isTwoAddress = 1 in {
536 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
537 "ror{q}\t{%cl, $dst|$dst, %CL}",
538 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
539 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
540 "ror{q}\t{$src2, $dst|$dst, $src2}",
541 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
542 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
544 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
548 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
549 "ror{q}\t{%cl, $dst|$dst, %CL}",
550 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
551 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
552 "ror{q}\t{$src, $dst|$dst, $src}",
553 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
554 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
556 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
558 // Double shift instructions (generalizations of rotate)
559 let isTwoAddress = 1 in {
561 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
562 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB;
563 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
564 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB;
567 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
568 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
569 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
570 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
572 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
573 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
574 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
579 // Temporary hack: there is no patterns associated with these instructions
580 // so we have to tell tblgen that these do not produce results.
582 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
583 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB;
584 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
585 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB;
587 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
588 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
589 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
591 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
592 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
593 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
596 //===----------------------------------------------------------------------===//
597 // Logical Instructions...
600 let isTwoAddress = 1 in
601 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
602 [(set GR64:$dst, (not GR64:$src))]>;
603 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
604 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
606 let isTwoAddress = 1 in {
607 let isCommutable = 1 in
608 def AND64rr : RI<0x21, MRMDestReg,
609 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
610 "and{q}\t{$src2, $dst|$dst, $src2}",
611 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
612 def AND64rm : RI<0x23, MRMSrcMem,
613 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
614 "and{q}\t{$src2, $dst|$dst, $src2}",
615 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
616 def AND64ri32 : RIi32<0x81, MRM4r,
617 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
618 "and{q}\t{$src2, $dst|$dst, $src2}",
619 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
620 def AND64ri8 : RIi8<0x83, MRM4r,
621 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
622 "and{q}\t{$src2, $dst|$dst, $src2}",
623 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
626 def AND64mr : RI<0x21, MRMDestMem,
627 (outs), (ins i64mem:$dst, GR64:$src),
628 "and{q}\t{$src, $dst|$dst, $src}",
629 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
630 def AND64mi32 : RIi32<0x81, MRM4m,
631 (outs), (ins i64mem:$dst, i64i32imm:$src),
632 "and{q}\t{$src, $dst|$dst, $src}",
633 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
634 def AND64mi8 : RIi8<0x83, MRM4m,
635 (outs), (ins i64mem:$dst, i64i8imm :$src),
636 "and{q}\t{$src, $dst|$dst, $src}",
637 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
639 let isTwoAddress = 1 in {
640 let isCommutable = 1 in
641 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
642 "or{q}\t{$src2, $dst|$dst, $src2}",
643 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
644 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
645 "or{q}\t{$src2, $dst|$dst, $src2}",
646 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
647 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
648 "or{q}\t{$src2, $dst|$dst, $src2}",
649 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
650 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
651 "or{q}\t{$src2, $dst|$dst, $src2}",
652 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
655 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
656 "or{q}\t{$src, $dst|$dst, $src}",
657 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
658 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
659 "or{q}\t{$src, $dst|$dst, $src}",
660 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
661 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
662 "or{q}\t{$src, $dst|$dst, $src}",
663 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
665 let isTwoAddress = 1 in {
666 let isCommutable = 1 in
667 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
668 "xor{q}\t{$src2, $dst|$dst, $src2}",
669 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
670 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
671 "xor{q}\t{$src2, $dst|$dst, $src2}",
672 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
673 def XOR64ri32 : RIi32<0x81, MRM6r,
674 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
675 "xor{q}\t{$src2, $dst|$dst, $src2}",
676 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
677 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
678 "xor{q}\t{$src2, $dst|$dst, $src2}",
679 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
682 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
683 "xor{q}\t{$src, $dst|$dst, $src}",
684 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
685 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
686 "xor{q}\t{$src, $dst|$dst, $src}",
687 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
688 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
689 "xor{q}\t{$src, $dst|$dst, $src}",
690 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
692 //===----------------------------------------------------------------------===//
693 // Comparison Instructions...
696 // Integer comparison
697 let isCommutable = 1 in
698 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
699 "test{q}\t{$src2, $src1|$src1, $src2}",
700 [(X86cmp (and GR64:$src1, GR64:$src2), 0)]>;
701 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
702 "test{q}\t{$src2, $src1|$src1, $src2}",
703 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0)]>;
704 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs), (ins GR64:$src1, i64i32imm:$src2),
705 "test{q}\t{$src2, $src1|$src1, $src2}",
706 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0)]>;
707 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs), (ins i64mem:$src1, i64i32imm:$src2),
708 "test{q}\t{$src2, $src1|$src1, $src2}",
709 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0)]>;
711 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
712 "cmp{q}\t{$src2, $src1|$src1, $src2}",
713 [(X86cmp GR64:$src1, GR64:$src2)]>;
714 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
715 "cmp{q}\t{$src2, $src1|$src1, $src2}",
716 [(X86cmp (loadi64 addr:$src1), GR64:$src2)]>;
717 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
718 "cmp{q}\t{$src2, $src1|$src1, $src2}",
719 [(X86cmp GR64:$src1, (loadi64 addr:$src2))]>;
720 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
721 "cmp{q}\t{$src2, $src1|$src1, $src2}",
722 [(X86cmp GR64:$src1, i64immSExt32:$src2)]>;
723 def CMP64mi32 : RIi32<0x81, MRM7m, (outs), (ins i64mem:$src1, i64i32imm:$src2),
724 "cmp{q}\t{$src2, $src1|$src1, $src2}",
725 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2)]>;
726 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
727 "cmp{q}\t{$src2, $src1|$src1, $src2}",
728 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2)]>;
729 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
730 "cmp{q}\t{$src2, $src1|$src1, $src2}",
731 [(X86cmp GR64:$src1, i64immSExt8:$src2)]>;
734 let isTwoAddress = 1 in {
735 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
736 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
737 "cmovb\t{$src2, $dst|$dst, $src2}",
738 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
740 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
741 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
742 "cmovb\t{$src2, $dst|$dst, $src2}",
743 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
745 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
746 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
747 "cmovae\t{$src2, $dst|$dst, $src2}",
748 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
750 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
751 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
752 "cmovae\t{$src2, $dst|$dst, $src2}",
753 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
755 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
756 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
757 "cmove\t{$src2, $dst|$dst, $src2}",
758 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
760 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
761 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
762 "cmove\t{$src2, $dst|$dst, $src2}",
763 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
765 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
766 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
767 "cmovne\t{$src2, $dst|$dst, $src2}",
768 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
770 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
771 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
772 "cmovne\t{$src2, $dst|$dst, $src2}",
773 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
775 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
776 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
777 "cmovbe\t{$src2, $dst|$dst, $src2}",
778 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
780 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
781 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
782 "cmovbe\t{$src2, $dst|$dst, $src2}",
783 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
785 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
786 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
787 "cmova\t{$src2, $dst|$dst, $src2}",
788 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
790 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
791 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
792 "cmova\t{$src2, $dst|$dst, $src2}",
793 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
795 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
796 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
797 "cmovl\t{$src2, $dst|$dst, $src2}",
798 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
800 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
801 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
802 "cmovl\t{$src2, $dst|$dst, $src2}",
803 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
805 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
806 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
807 "cmovge\t{$src2, $dst|$dst, $src2}",
808 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
810 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
811 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
812 "cmovge\t{$src2, $dst|$dst, $src2}",
813 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
815 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
816 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
817 "cmovle\t{$src2, $dst|$dst, $src2}",
818 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
820 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
821 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
822 "cmovle\t{$src2, $dst|$dst, $src2}",
823 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
825 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
826 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
827 "cmovg\t{$src2, $dst|$dst, $src2}",
828 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
830 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
831 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
832 "cmovg\t{$src2, $dst|$dst, $src2}",
833 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
835 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
836 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
837 "cmovs\t{$src2, $dst|$dst, $src2}",
838 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
840 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
841 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
842 "cmovs\t{$src2, $dst|$dst, $src2}",
843 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
845 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
846 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
847 "cmovns\t{$src2, $dst|$dst, $src2}",
848 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
850 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
851 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
852 "cmovns\t{$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
855 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
856 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
857 "cmovp\t{$src2, $dst|$dst, $src2}",
858 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
860 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
861 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
862 "cmovp\t{$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
865 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
866 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
867 "cmovnp\t{$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
870 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
871 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
872 "cmovnp\t{$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
877 //===----------------------------------------------------------------------===//
878 // Conversion Instructions...
882 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
883 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
885 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
886 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
887 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
888 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
889 (load addr:$src)))]>;
890 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
891 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
892 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
893 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
894 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
895 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
896 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
897 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
899 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
900 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
901 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
903 (int_x86_sse2_cvttsd2si64
904 (load addr:$src)))]>;
907 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
908 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
909 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
910 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
911 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
912 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
913 let isTwoAddress = 1 in {
914 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
915 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
916 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
918 (int_x86_sse2_cvtsi642sd VR128:$src1,
920 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
921 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
922 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
924 (int_x86_sse2_cvtsi642sd VR128:$src1,
925 (loadi64 addr:$src2)))]>;
929 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
930 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
931 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
932 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
933 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
934 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
935 let isTwoAddress = 1 in {
936 def Int_CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg,
937 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
938 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
939 []>; // TODO: add intrinsic
940 def Int_CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem,
941 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
942 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
943 []>; // TODO: add intrinsic
947 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
948 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
950 (int_x86_sse_cvtss2si64 VR128:$src))]>;
951 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
952 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
953 [(set GR64:$dst, (int_x86_sse_cvtss2si64
954 (load addr:$src)))]>;
955 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
956 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
957 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
958 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
959 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
960 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
961 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
962 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
964 (int_x86_sse_cvttss2si64 VR128:$src))]>;
965 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
966 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
968 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
970 let isTwoAddress = 1 in {
971 def Int_CVTSI642SSrr : RSSI<0x2A, MRMSrcReg,
972 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
973 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
975 (int_x86_sse_cvtsi642ss VR128:$src1,
977 def Int_CVTSI642SSrm : RSSI<0x2A, MRMSrcMem,
978 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
979 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
981 (int_x86_sse_cvtsi642ss VR128:$src1,
982 (loadi64 addr:$src2)))]>;
985 //===----------------------------------------------------------------------===//
986 // Alias Instructions
987 //===----------------------------------------------------------------------===//
990 // TODO: Remove this after proper i32 -> i64 zext support.
991 def PsMOVZX64rr32: I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
992 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
993 [(set GR64:$dst, (zext GR32:$src))]>;
994 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
995 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
996 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
999 // Alias instructions that map movr0 to xor.
1000 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1001 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1002 // when we have a better way to specify isel priority.
1003 let AddedComplexity = 1, isReMaterializable = 1 in
1004 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1005 "xor{q}\t$dst, $dst",
1006 [(set GR64:$dst, 0)]>;
1008 // Materialize i64 constant where top 32-bits are zero.
1009 let AddedComplexity = 1, isReMaterializable = 1 in
1010 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1011 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1012 [(set GR64:$dst, i64immZExt32:$src)]>;
1014 //===----------------------------------------------------------------------===//
1015 // Non-Instruction Patterns
1016 //===----------------------------------------------------------------------===//
1018 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1019 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1020 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1021 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1022 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1023 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1024 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1025 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1026 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1028 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1029 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1030 Requires<[SmallCode, HasLow4G, IsStatic]>;
1031 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1032 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1033 Requires<[SmallCode, HasLow4G, IsStatic]>;
1034 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1035 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1036 Requires<[SmallCode, HasLow4G, IsStatic]>;
1037 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1038 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1039 Requires<[SmallCode, HasLow4G, IsStatic]>;
1042 // Direct PC relative function call for small code model. 32-bit displacement
1043 // sign extended to 64-bit.
1044 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1045 (CALL64pcrel32 tglobaladdr:$dst)>;
1046 def : Pat<(X86call (i64 texternalsym:$dst)),
1047 (CALL64pcrel32 texternalsym:$dst)>;
1049 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1050 (CALL64pcrel32 tglobaladdr:$dst)>;
1051 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1052 (CALL64pcrel32 texternalsym:$dst)>;
1054 def : Pat<(X86tailcall GR64:$dst),
1055 (CALL64r GR64:$dst)>;
1057 // {s|z}extload bool -> {s|z}extload byte
1058 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1059 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1062 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1063 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1064 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1065 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1068 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1069 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1070 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1071 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1072 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1073 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1075 //===----------------------------------------------------------------------===//
1077 //===----------------------------------------------------------------------===//
1079 // (shl x, 1) ==> (add x, x)
1080 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1082 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1083 def : Pat<(or (srl GR64:$src1, CL:$amt),
1084 (shl GR64:$src2, (sub 64, CL:$amt))),
1085 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1087 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1088 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1089 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1091 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1092 def : Pat<(or (shl GR64:$src1, CL:$amt),
1093 (srl GR64:$src2, (sub 64, CL:$amt))),
1094 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1096 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1097 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1098 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1100 // X86 specific add which produces a flag.
1101 def : Pat<(addc GR64:$src1, GR64:$src2),
1102 (ADD64rr GR64:$src1, GR64:$src2)>;
1103 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1104 (ADD64rm GR64:$src1, addr:$src2)>;
1105 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1106 (ADD64ri32 GR64:$src1, imm:$src2)>;
1107 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1108 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1110 def : Pat<(subc GR64:$src1, GR64:$src2),
1111 (SUB64rr GR64:$src1, GR64:$src2)>;
1112 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1113 (SUB64rm GR64:$src1, addr:$src2)>;
1114 def : Pat<(subc GR64:$src1, imm:$src2),
1115 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1116 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1117 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1120 //===----------------------------------------------------------------------===//
1121 // X86-64 SSE Instructions
1122 //===----------------------------------------------------------------------===//
1124 // Move instructions...
1126 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1127 "mov{d|q}\t{$src, $dst|$dst, $src}",
1129 (v2i64 (scalar_to_vector GR64:$src)))]>;
1130 def MOV64toPQIrm : RPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1131 "mov{d|q}\t{$src, $dst|$dst, $src}",
1133 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>;
1135 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1136 "mov{d|q}\t{$src, $dst|$dst, $src}",
1137 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1139 def MOVPQIto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
1140 "mov{d|q}\t{$src, $dst|$dst, $src}",
1141 [(store (i64 (vector_extract (v2i64 VR128:$src),
1142 (iPTR 0))), addr:$dst)]>;
1144 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1145 "mov{d|q}\t{$src, $dst|$dst, $src}",
1146 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1147 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1148 "mov{d|q}\t{$src, $dst|$dst, $src}",
1149 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1151 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1152 "mov{d|q}\t{$src, $dst|$dst, $src}",
1153 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1154 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1155 "mov{d|q}\t{$src, $dst|$dst, $src}",
1156 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;