1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def i64immFFFFFFFF : PatLeaf<(i64 imm), [{
65 // i64immFFFFFFFF - True if this is a specific constant we can't write in
67 return N->getValue() == 0x00000000FFFFFFFFULL;
71 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
72 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
73 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
75 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
76 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
77 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
78 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
80 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
81 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
82 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
83 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
85 //===----------------------------------------------------------------------===//
86 // Instruction list...
89 //===----------------------------------------------------------------------===//
90 // Call Instructions...
93 // All calls clobber the non-callee saved registers...
94 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
95 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
96 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
97 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
98 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
99 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
100 "call\t${dst:call}", []>;
101 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
102 "call\t{*}$dst", [(X86call GR64:$dst)]>;
103 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
104 "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>;
109 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
110 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset, variable_ops),
111 "#TC_RETURN $dst $offset",
114 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
115 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset, variable_ops),
116 "#TC_RETURN $dst $offset",
120 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
121 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
125 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
126 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
127 [(brind GR64:$dst)]>;
128 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
129 [(brind (loadi64 addr:$dst))]>;
132 //===----------------------------------------------------------------------===//
133 // Miscellaneous Instructions...
135 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
136 def LEAVE64 : I<0xC9, RawFrm,
137 (outs), (ins), "leave", []>;
138 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
140 def POP64r : I<0x58, AddRegFrm,
141 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
143 def PUSH64r : I<0x50, AddRegFrm,
144 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
147 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
148 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
149 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
150 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
152 def LEA64_32r : I<0x8D, MRMSrcMem,
153 (outs GR32:$dst), (ins lea64_32mem:$src),
154 "lea{l}\t{$src|$dst}, {$dst|$src}",
155 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
157 let isReMaterializable = 1 in
158 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
159 "lea{q}\t{$src|$dst}, {$dst|$src}",
160 [(set GR64:$dst, lea64addr:$src)]>;
162 let isTwoAddress = 1 in
163 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
165 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
167 // Bit scan instructions.
168 let Defs = [EFLAGS] in {
169 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
170 "bsf{q}\t{$src, $dst|$dst, $src}",
171 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
172 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
173 "bsf{q}\t{$src, $dst|$dst, $src}",
174 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
175 (implicit EFLAGS)]>, TB;
177 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
178 "bsr{q}\t{$src, $dst|$dst, $src}",
179 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
180 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
181 "bsr{q}\t{$src, $dst|$dst, $src}",
182 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
183 (implicit EFLAGS)]>, TB;
187 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
188 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
189 [(X86rep_movs i64)]>, REP;
190 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
191 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
192 [(X86rep_stos i64)]>, REP;
194 //===----------------------------------------------------------------------===//
195 // Move Instructions...
198 let neverHasSideEffects = 1 in
199 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
200 "mov{q}\t{$src, $dst|$dst, $src}", []>;
202 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
203 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
204 "movabs{q}\t{$src, $dst|$dst, $src}",
205 [(set GR64:$dst, imm:$src)]>;
206 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
207 "mov{q}\t{$src, $dst|$dst, $src}",
208 [(set GR64:$dst, i64immSExt32:$src)]>;
211 let isSimpleLoad = 1 in
212 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
213 "mov{q}\t{$src, $dst|$dst, $src}",
214 [(set GR64:$dst, (load addr:$src))]>;
216 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
217 "mov{q}\t{$src, $dst|$dst, $src}",
218 [(store GR64:$src, addr:$dst)]>;
219 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
220 "mov{q}\t{$src, $dst|$dst, $src}",
221 [(store i64immSExt32:$src, addr:$dst)]>;
223 // Sign/Zero extenders
225 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
226 "movs{bq|x}\t{$src, $dst|$dst, $src}",
227 [(set GR64:$dst, (sext GR8:$src))]>, TB;
228 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
229 "movs{bq|x}\t{$src, $dst|$dst, $src}",
230 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
231 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
232 "movs{wq|x}\t{$src, $dst|$dst, $src}",
233 [(set GR64:$dst, (sext GR16:$src))]>, TB;
234 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
235 "movs{wq|x}\t{$src, $dst|$dst, $src}",
236 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
237 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
238 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
239 [(set GR64:$dst, (sext GR32:$src))]>;
240 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
241 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
242 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
244 // Use movzbl instead of movzbq when the destination is a register; it's
245 // equivalent due to implicit zero-extending, and it has a smaller encoding.
246 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
247 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
248 [(set GR64:$dst, (zext GR8:$src))]>, TB;
249 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
250 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
251 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
252 // Use movzwl instead of movzwq when the destination is a register; it's
253 // equivalent due to implicit zero-extending, and it has a smaller encoding.
254 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
255 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
256 [(set GR64:$dst, (zext GR16:$src))]>, TB;
257 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
258 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
259 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
261 // There's no movzlq instruction, but movl can be used for this purpose, using
262 // implicit zero-extension. We need this because the seeming alternative for
263 // implementing zext from 32 to 64, an EXTRACT_SUBREG/SUBREG_TO_REG pair, isn't
264 // safe because both instructions could be optimized away in the
265 // register-to-register case, leaving nothing behind to do the zero extension.
266 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
267 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
268 [(set GR64:$dst, (zext GR32:$src))]>;
269 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
270 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
271 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
273 let neverHasSideEffects = 1 in {
274 let Defs = [RAX], Uses = [EAX] in
275 def CDQE : RI<0x98, RawFrm, (outs), (ins),
276 "{cltq|cdqe}", []>; // RAX = signext(EAX)
278 let Defs = [RAX,RDX], Uses = [RAX] in
279 def CQO : RI<0x99, RawFrm, (outs), (ins),
280 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
283 //===----------------------------------------------------------------------===//
284 // Arithmetic Instructions...
287 let Defs = [EFLAGS] in {
288 let isTwoAddress = 1 in {
289 let isConvertibleToThreeAddress = 1 in {
290 let isCommutable = 1 in
291 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
292 "add{q}\t{$src2, $dst|$dst, $src2}",
293 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
295 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
296 "add{q}\t{$src2, $dst|$dst, $src2}",
297 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
298 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
299 "add{q}\t{$src2, $dst|$dst, $src2}",
300 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
301 } // isConvertibleToThreeAddress
303 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
304 "add{q}\t{$src2, $dst|$dst, $src2}",
305 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
308 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
309 "add{q}\t{$src2, $dst|$dst, $src2}",
310 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
311 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
312 "add{q}\t{$src2, $dst|$dst, $src2}",
313 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
314 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
315 "add{q}\t{$src2, $dst|$dst, $src2}",
316 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
318 let Uses = [EFLAGS] in {
319 let isTwoAddress = 1 in {
320 let isCommutable = 1 in
321 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
322 "adc{q}\t{$src2, $dst|$dst, $src2}",
323 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
325 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
326 "adc{q}\t{$src2, $dst|$dst, $src2}",
327 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
329 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
330 "adc{q}\t{$src2, $dst|$dst, $src2}",
331 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
332 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
333 "adc{q}\t{$src2, $dst|$dst, $src2}",
334 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
337 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
338 "adc{q}\t{$src2, $dst|$dst, $src2}",
339 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
340 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
341 "adc{q}\t{$src2, $dst|$dst, $src2}",
342 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
343 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
344 "adc{q}\t{$src2, $dst|$dst, $src2}",
345 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
348 let isTwoAddress = 1 in {
349 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
350 "sub{q}\t{$src2, $dst|$dst, $src2}",
351 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
353 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
354 "sub{q}\t{$src2, $dst|$dst, $src2}",
355 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
357 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
358 "sub{q}\t{$src2, $dst|$dst, $src2}",
359 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
360 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
361 "sub{q}\t{$src2, $dst|$dst, $src2}",
362 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
365 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
366 "sub{q}\t{$src2, $dst|$dst, $src2}",
367 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
368 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
369 "sub{q}\t{$src2, $dst|$dst, $src2}",
370 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
371 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
372 "sub{q}\t{$src2, $dst|$dst, $src2}",
373 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
375 let Uses = [EFLAGS] in {
376 let isTwoAddress = 1 in {
377 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
378 "sbb{q}\t{$src2, $dst|$dst, $src2}",
379 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
381 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
382 "sbb{q}\t{$src2, $dst|$dst, $src2}",
383 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
385 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
386 "sbb{q}\t{$src2, $dst|$dst, $src2}",
387 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
388 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
389 "sbb{q}\t{$src2, $dst|$dst, $src2}",
390 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
393 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
394 "sbb{q}\t{$src2, $dst|$dst, $src2}",
395 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
396 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
397 "sbb{q}\t{$src2, $dst|$dst, $src2}",
398 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
399 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
400 "sbb{q}\t{$src2, $dst|$dst, $src2}",
401 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
405 // Unsigned multiplication
406 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
407 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
408 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
410 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
411 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
413 // Signed multiplication
414 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
415 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
417 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
418 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
421 let Defs = [EFLAGS] in {
422 let isTwoAddress = 1 in {
423 let isCommutable = 1 in
424 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
425 "imul{q}\t{$src2, $dst|$dst, $src2}",
426 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
428 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
429 "imul{q}\t{$src2, $dst|$dst, $src2}",
430 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
433 // Suprisingly enough, these are not two address instructions!
434 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
435 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
436 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
437 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
438 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
439 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
440 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
441 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
442 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
443 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
444 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
445 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
446 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
447 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
448 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
449 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
452 // Unsigned division / remainder
453 let neverHasSideEffects = 1 in {
454 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
455 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
457 // Signed division / remainder
458 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
459 "idiv{q}\t$src", []>;
461 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
463 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
464 "idiv{q}\t$src", []>;
469 // Unary instructions
470 let Defs = [EFLAGS], CodeSize = 2 in {
471 let isTwoAddress = 1 in
472 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
473 [(set GR64:$dst, (ineg GR64:$src))]>;
474 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
475 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
477 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
478 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
479 [(set GR64:$dst, (add GR64:$src, 1))]>;
480 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
481 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
483 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
484 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
485 [(set GR64:$dst, (add GR64:$src, -1))]>;
486 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
487 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
489 // In 64-bit mode, single byte INC and DEC cannot be encoded.
490 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
491 // Can transform into LEA.
492 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
493 [(set GR16:$dst, (add GR16:$src, 1))]>,
494 OpSize, Requires<[In64BitMode]>;
495 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
496 [(set GR32:$dst, (add GR32:$src, 1))]>,
497 Requires<[In64BitMode]>;
498 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
499 [(set GR16:$dst, (add GR16:$src, -1))]>,
500 OpSize, Requires<[In64BitMode]>;
501 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
502 [(set GR32:$dst, (add GR32:$src, -1))]>,
503 Requires<[In64BitMode]>;
504 } // isConvertibleToThreeAddress
506 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
507 // how to unfold them.
508 let isTwoAddress = 0, CodeSize = 2 in {
509 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
510 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
511 OpSize, Requires<[In64BitMode]>;
512 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
513 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
514 Requires<[In64BitMode]>;
515 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
516 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
517 OpSize, Requires<[In64BitMode]>;
518 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
519 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
520 Requires<[In64BitMode]>;
522 } // Defs = [EFLAGS], CodeSize
525 let Defs = [EFLAGS] in {
526 // Shift instructions
527 let isTwoAddress = 1 in {
529 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
530 "shl{q}\t{%cl, $dst|$dst, %CL}",
531 [(set GR64:$dst, (shl GR64:$src, CL))]>;
532 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
533 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
534 "shl{q}\t{$src2, $dst|$dst, $src2}",
535 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
536 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
541 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
542 "shl{q}\t{%cl, $dst|$dst, %CL}",
543 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
544 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
545 "shl{q}\t{$src, $dst|$dst, $src}",
546 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
547 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
549 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
551 let isTwoAddress = 1 in {
553 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
554 "shr{q}\t{%cl, $dst|$dst, %CL}",
555 [(set GR64:$dst, (srl GR64:$src, CL))]>;
556 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
557 "shr{q}\t{$src2, $dst|$dst, $src2}",
558 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
559 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
561 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
565 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
566 "shr{q}\t{%cl, $dst|$dst, %CL}",
567 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
568 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
569 "shr{q}\t{$src, $dst|$dst, $src}",
570 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
571 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
573 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
575 let isTwoAddress = 1 in {
577 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
578 "sar{q}\t{%cl, $dst|$dst, %CL}",
579 [(set GR64:$dst, (sra GR64:$src, CL))]>;
580 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
581 "sar{q}\t{$src2, $dst|$dst, $src2}",
582 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
583 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
585 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
589 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
590 "sar{q}\t{%cl, $dst|$dst, %CL}",
591 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
592 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
593 "sar{q}\t{$src, $dst|$dst, $src}",
594 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
595 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
597 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
599 // Rotate instructions
600 let isTwoAddress = 1 in {
602 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
603 "rol{q}\t{%cl, $dst|$dst, %CL}",
604 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
605 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
606 "rol{q}\t{$src2, $dst|$dst, $src2}",
607 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
608 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
610 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
614 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
615 "rol{q}\t{%cl, $dst|$dst, %CL}",
616 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
617 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
618 "rol{q}\t{$src, $dst|$dst, $src}",
619 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
620 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
622 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
624 let isTwoAddress = 1 in {
626 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
627 "ror{q}\t{%cl, $dst|$dst, %CL}",
628 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
629 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
630 "ror{q}\t{$src2, $dst|$dst, $src2}",
631 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
632 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
634 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
638 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
639 "ror{q}\t{%cl, $dst|$dst, %CL}",
640 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
641 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
642 "ror{q}\t{$src, $dst|$dst, $src}",
643 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
644 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
646 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
648 // Double shift instructions (generalizations of rotate)
649 let isTwoAddress = 1 in {
651 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
652 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
653 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
654 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
655 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
656 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
659 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
660 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
661 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
662 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
663 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
666 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
667 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
668 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
669 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
676 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
677 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
678 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
680 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
681 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
682 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
685 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
686 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
687 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
688 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
689 (i8 imm:$src3)), addr:$dst)]>,
691 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
692 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
693 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
694 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
695 (i8 imm:$src3)), addr:$dst)]>,
699 //===----------------------------------------------------------------------===//
700 // Logical Instructions...
703 let isTwoAddress = 1 in
704 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
705 [(set GR64:$dst, (not GR64:$src))]>;
706 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
707 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
709 let Defs = [EFLAGS] in {
710 let isTwoAddress = 1 in {
711 let isCommutable = 1 in
712 def AND64rr : RI<0x21, MRMDestReg,
713 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
714 "and{q}\t{$src2, $dst|$dst, $src2}",
715 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
716 def AND64rm : RI<0x23, MRMSrcMem,
717 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
718 "and{q}\t{$src2, $dst|$dst, $src2}",
719 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
720 def AND64ri32 : RIi32<0x81, MRM4r,
721 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
722 "and{q}\t{$src2, $dst|$dst, $src2}",
723 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
724 def AND64ri8 : RIi8<0x83, MRM4r,
725 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
726 "and{q}\t{$src2, $dst|$dst, $src2}",
727 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
730 def AND64mr : RI<0x21, MRMDestMem,
731 (outs), (ins i64mem:$dst, GR64:$src),
732 "and{q}\t{$src, $dst|$dst, $src}",
733 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
734 def AND64mi32 : RIi32<0x81, MRM4m,
735 (outs), (ins i64mem:$dst, i64i32imm:$src),
736 "and{q}\t{$src, $dst|$dst, $src}",
737 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
738 def AND64mi8 : RIi8<0x83, MRM4m,
739 (outs), (ins i64mem:$dst, i64i8imm :$src),
740 "and{q}\t{$src, $dst|$dst, $src}",
741 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
743 let isTwoAddress = 1 in {
744 let isCommutable = 1 in
745 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
746 "or{q}\t{$src2, $dst|$dst, $src2}",
747 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
748 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
749 "or{q}\t{$src2, $dst|$dst, $src2}",
750 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
751 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
752 "or{q}\t{$src2, $dst|$dst, $src2}",
753 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
754 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
755 "or{q}\t{$src2, $dst|$dst, $src2}",
756 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
759 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
760 "or{q}\t{$src, $dst|$dst, $src}",
761 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
762 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
763 "or{q}\t{$src, $dst|$dst, $src}",
764 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
765 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
766 "or{q}\t{$src, $dst|$dst, $src}",
767 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
769 let isTwoAddress = 1 in {
770 let isCommutable = 1, isAsCheapAsAMove = 1 in
771 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
772 "xor{q}\t{$src2, $dst|$dst, $src2}",
773 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
774 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
775 "xor{q}\t{$src2, $dst|$dst, $src2}",
776 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
777 def XOR64ri32 : RIi32<0x81, MRM6r,
778 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
779 "xor{q}\t{$src2, $dst|$dst, $src2}",
780 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
781 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
782 "xor{q}\t{$src2, $dst|$dst, $src2}",
783 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
786 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
787 "xor{q}\t{$src, $dst|$dst, $src}",
788 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
789 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
790 "xor{q}\t{$src, $dst|$dst, $src}",
791 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
792 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
793 "xor{q}\t{$src, $dst|$dst, $src}",
794 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
797 //===----------------------------------------------------------------------===//
798 // Comparison Instructions...
801 // Integer comparison
802 let Defs = [EFLAGS] in {
803 let isCommutable = 1 in
804 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
805 "test{q}\t{$src2, $src1|$src1, $src2}",
806 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
808 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
809 "test{q}\t{$src2, $src1|$src1, $src2}",
810 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
812 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
813 (ins GR64:$src1, i64i32imm:$src2),
814 "test{q}\t{$src2, $src1|$src1, $src2}",
815 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
817 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
818 (ins i64mem:$src1, i64i32imm:$src2),
819 "test{q}\t{$src2, $src1|$src1, $src2}",
820 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
823 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
824 "cmp{q}\t{$src2, $src1|$src1, $src2}",
825 [(X86cmp GR64:$src1, GR64:$src2),
827 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
828 "cmp{q}\t{$src2, $src1|$src1, $src2}",
829 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
831 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
832 "cmp{q}\t{$src2, $src1|$src1, $src2}",
833 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
835 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
836 "cmp{q}\t{$src2, $src1|$src1, $src2}",
837 [(X86cmp GR64:$src1, i64immSExt32:$src2),
839 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
840 (ins i64mem:$src1, i64i32imm:$src2),
841 "cmp{q}\t{$src2, $src1|$src1, $src2}",
842 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
844 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
845 "cmp{q}\t{$src2, $src1|$src1, $src2}",
846 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
848 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
849 "cmp{q}\t{$src2, $src1|$src1, $src2}",
850 [(X86cmp GR64:$src1, i64immSExt8:$src2),
855 let Uses = [EFLAGS], isTwoAddress = 1 in {
856 let isCommutable = 1 in {
857 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
858 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
859 "cmovb\t{$src2, $dst|$dst, $src2}",
860 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
861 X86_COND_B, EFLAGS))]>, TB;
862 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
863 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
864 "cmovae\t{$src2, $dst|$dst, $src2}",
865 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
866 X86_COND_AE, EFLAGS))]>, TB;
867 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
868 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
869 "cmove\t{$src2, $dst|$dst, $src2}",
870 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
871 X86_COND_E, EFLAGS))]>, TB;
872 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
873 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
874 "cmovne\t{$src2, $dst|$dst, $src2}",
875 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
876 X86_COND_NE, EFLAGS))]>, TB;
877 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
878 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
879 "cmovbe\t{$src2, $dst|$dst, $src2}",
880 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
881 X86_COND_BE, EFLAGS))]>, TB;
882 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
883 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
884 "cmova\t{$src2, $dst|$dst, $src2}",
885 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
886 X86_COND_A, EFLAGS))]>, TB;
887 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
888 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
889 "cmovl\t{$src2, $dst|$dst, $src2}",
890 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
891 X86_COND_L, EFLAGS))]>, TB;
892 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
893 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
894 "cmovge\t{$src2, $dst|$dst, $src2}",
895 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
896 X86_COND_GE, EFLAGS))]>, TB;
897 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
898 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
899 "cmovle\t{$src2, $dst|$dst, $src2}",
900 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
901 X86_COND_LE, EFLAGS))]>, TB;
902 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
903 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
904 "cmovg\t{$src2, $dst|$dst, $src2}",
905 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
906 X86_COND_G, EFLAGS))]>, TB;
907 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
908 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
909 "cmovs\t{$src2, $dst|$dst, $src2}",
910 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
911 X86_COND_S, EFLAGS))]>, TB;
912 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
913 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
914 "cmovns\t{$src2, $dst|$dst, $src2}",
915 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
916 X86_COND_NS, EFLAGS))]>, TB;
917 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
918 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
919 "cmovp\t{$src2, $dst|$dst, $src2}",
920 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
921 X86_COND_P, EFLAGS))]>, TB;
922 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
923 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
924 "cmovnp\t{$src2, $dst|$dst, $src2}",
925 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
926 X86_COND_NP, EFLAGS))]>, TB;
927 } // isCommutable = 1
929 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
930 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
931 "cmovb\t{$src2, $dst|$dst, $src2}",
932 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
933 X86_COND_B, EFLAGS))]>, TB;
934 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
935 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
936 "cmovae\t{$src2, $dst|$dst, $src2}",
937 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
938 X86_COND_AE, EFLAGS))]>, TB;
939 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
940 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
941 "cmove\t{$src2, $dst|$dst, $src2}",
942 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
943 X86_COND_E, EFLAGS))]>, TB;
944 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
945 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
946 "cmovne\t{$src2, $dst|$dst, $src2}",
947 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
948 X86_COND_NE, EFLAGS))]>, TB;
949 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
950 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
951 "cmovbe\t{$src2, $dst|$dst, $src2}",
952 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
953 X86_COND_BE, EFLAGS))]>, TB;
954 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
955 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
956 "cmova\t{$src2, $dst|$dst, $src2}",
957 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
958 X86_COND_A, EFLAGS))]>, TB;
959 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
960 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
961 "cmovl\t{$src2, $dst|$dst, $src2}",
962 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
963 X86_COND_L, EFLAGS))]>, TB;
964 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
965 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
966 "cmovge\t{$src2, $dst|$dst, $src2}",
967 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
968 X86_COND_GE, EFLAGS))]>, TB;
969 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
970 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
971 "cmovle\t{$src2, $dst|$dst, $src2}",
972 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
973 X86_COND_LE, EFLAGS))]>, TB;
974 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
975 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
976 "cmovg\t{$src2, $dst|$dst, $src2}",
977 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
978 X86_COND_G, EFLAGS))]>, TB;
979 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
980 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
981 "cmovs\t{$src2, $dst|$dst, $src2}",
982 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
983 X86_COND_S, EFLAGS))]>, TB;
984 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
985 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
986 "cmovns\t{$src2, $dst|$dst, $src2}",
987 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
988 X86_COND_NS, EFLAGS))]>, TB;
989 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
990 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
991 "cmovp\t{$src2, $dst|$dst, $src2}",
992 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
993 X86_COND_P, EFLAGS))]>, TB;
994 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
995 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
996 "cmovnp\t{$src2, $dst|$dst, $src2}",
997 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
998 X86_COND_NP, EFLAGS))]>, TB;
1001 //===----------------------------------------------------------------------===//
1002 // Conversion Instructions...
1005 // f64 -> signed i64
1006 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1007 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1009 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1010 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1011 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1012 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1013 (load addr:$src)))]>;
1014 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1015 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1016 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1017 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1018 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1019 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1020 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1021 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1023 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1024 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1025 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1027 (int_x86_sse2_cvttsd2si64
1028 (load addr:$src)))]>;
1030 // Signed i64 -> f64
1031 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1032 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1033 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1034 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1035 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1036 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1038 let isTwoAddress = 1 in {
1039 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1040 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1041 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1043 (int_x86_sse2_cvtsi642sd VR128:$src1,
1045 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1046 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1047 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1049 (int_x86_sse2_cvtsi642sd VR128:$src1,
1050 (loadi64 addr:$src2)))]>;
1053 // Signed i64 -> f32
1054 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1055 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1056 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1057 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1058 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1059 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1061 let isTwoAddress = 1 in {
1062 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1063 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1064 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1066 (int_x86_sse_cvtsi642ss VR128:$src1,
1068 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1069 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1070 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1072 (int_x86_sse_cvtsi642ss VR128:$src1,
1073 (loadi64 addr:$src2)))]>;
1076 // f32 -> signed i64
1077 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1078 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1080 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1081 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1082 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1083 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1084 (load addr:$src)))]>;
1085 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1086 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1087 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1088 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1089 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1090 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1091 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1092 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1094 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1095 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1096 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1098 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1100 //===----------------------------------------------------------------------===//
1101 // Alias Instructions
1102 //===----------------------------------------------------------------------===//
1104 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1105 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1107 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1108 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1109 // when we have a better way to specify isel priority.
1110 let Defs = [EFLAGS], AddedComplexity = 1,
1111 isReMaterializable = 1, isAsCheapAsAMove = 1 in
1112 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1113 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1114 [(set GR64:$dst, 0)]>;
1116 // Materialize i64 constant where top 32-bits are zero.
1117 let AddedComplexity = 1, isReMaterializable = 1 in
1118 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1119 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1120 [(set GR64:$dst, i64immZExt32:$src)]>;
1122 //===----------------------------------------------------------------------===//
1123 // Thread Local Storage Instructions
1124 //===----------------------------------------------------------------------===//
1126 def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
1127 ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
1128 [(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
1130 //===----------------------------------------------------------------------===//
1131 // Atomic Instructions
1132 //===----------------------------------------------------------------------===//
1134 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1135 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1136 "lock\n\tcmpxchgq $swap,$ptr",
1137 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1140 let Constraints = "$val = $dst" in {
1141 let Defs = [EFLAGS] in
1142 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1143 "lock\n\txadd $val, $ptr",
1144 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1146 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1148 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1152 //===----------------------------------------------------------------------===//
1153 // Non-Instruction Patterns
1154 //===----------------------------------------------------------------------===//
1156 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1157 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1158 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1159 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1160 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1161 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1162 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1163 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1164 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1166 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1167 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1168 Requires<[SmallCode, IsStatic]>;
1169 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1170 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1171 Requires<[SmallCode, IsStatic]>;
1172 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1173 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1174 Requires<[SmallCode, IsStatic]>;
1175 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1176 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1177 Requires<[SmallCode, IsStatic]>;
1180 // Direct PC relative function call for small code model. 32-bit displacement
1181 // sign extended to 64-bit.
1182 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1183 (CALL64pcrel32 tglobaladdr:$dst)>;
1184 def : Pat<(X86call (i64 texternalsym:$dst)),
1185 (CALL64pcrel32 texternalsym:$dst)>;
1187 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1188 (CALL64pcrel32 tglobaladdr:$dst)>;
1189 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1190 (CALL64pcrel32 texternalsym:$dst)>;
1192 def : Pat<(X86tailcall GR64:$dst),
1193 (CALL64r GR64:$dst)>;
1197 def : Pat<(X86tailcall GR32:$dst),
1199 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1201 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1204 def : Pat<(X86tcret GR64:$dst, imm:$off),
1205 (TCRETURNri64 GR64:$dst, imm:$off)>;
1207 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1208 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1210 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1211 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1215 // TEST R,R is smaller than CMP R,0
1216 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1217 (TEST64rr GR64:$src1, GR64:$src1)>;
1222 def : Pat<(i64 (zext GR32:$src)),
1223 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
1225 // zextload bool -> zextload byte
1226 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1229 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1230 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1231 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1232 def : Pat<(extloadi64i32 addr:$src),
1233 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1237 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1238 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1239 def : Pat<(i64 (anyext GR32:$src)),
1240 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
1242 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1243 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1244 def : Pat<(i64 (anyext (loadi32 addr:$src))),
1245 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1248 //===----------------------------------------------------------------------===//
1250 //===----------------------------------------------------------------------===//
1252 // r & (2^32-1) ==> movz
1253 def : Pat<(and GR64:$src, i64immFFFFFFFF),
1254 (MOVZX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
1255 // r & (2^16-1) ==> movz
1256 def : Pat<(and GR64:$src, 0xffff),
1257 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1258 // r & (2^8-1) ==> movz
1259 def : Pat<(and GR64:$src, 0xff),
1260 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1261 // r & (2^8-1) ==> movz
1262 def : Pat<(and GR32:$src1, 0xff),
1263 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit)))>,
1264 Requires<[In64BitMode]>;
1265 // r & (2^8-1) ==> movz
1266 def : Pat<(and GR16:$src1, 0xff),
1267 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
1268 Requires<[In64BitMode]>;
1270 // (shl x, 1) ==> (add x, x)
1271 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1273 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1274 def : Pat<(or (srl GR64:$src1, CL:$amt),
1275 (shl GR64:$src2, (sub 64, CL:$amt))),
1276 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1278 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1279 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1280 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1282 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1283 def : Pat<(or (shl GR64:$src1, CL:$amt),
1284 (srl GR64:$src2, (sub 64, CL:$amt))),
1285 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1287 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1288 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1289 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1291 // X86 specific add which produces a flag.
1292 def : Pat<(addc GR64:$src1, GR64:$src2),
1293 (ADD64rr GR64:$src1, GR64:$src2)>;
1294 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1295 (ADD64rm GR64:$src1, addr:$src2)>;
1296 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1297 (ADD64ri32 GR64:$src1, imm:$src2)>;
1298 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1299 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1301 def : Pat<(subc GR64:$src1, GR64:$src2),
1302 (SUB64rr GR64:$src1, GR64:$src2)>;
1303 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1304 (SUB64rm GR64:$src1, addr:$src2)>;
1305 def : Pat<(subc GR64:$src1, imm:$src2),
1306 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1307 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1308 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1311 //===----------------------------------------------------------------------===//
1312 // X86-64 SSE Instructions
1313 //===----------------------------------------------------------------------===//
1315 // Move instructions...
1317 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1318 "mov{d|q}\t{$src, $dst|$dst, $src}",
1320 (v2i64 (scalar_to_vector GR64:$src)))]>;
1321 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1322 "mov{d|q}\t{$src, $dst|$dst, $src}",
1323 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1326 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1327 "mov{d|q}\t{$src, $dst|$dst, $src}",
1328 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1329 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1330 "mov{d|q}\t{$src, $dst|$dst, $src}",
1331 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1333 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1334 "mov{d|q}\t{$src, $dst|$dst, $src}",
1335 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1336 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1337 "mov{d|q}\t{$src, $dst|$dst, $src}",
1338 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1340 //===----------------------------------------------------------------------===//
1341 // X86-64 SSE4.1 Instructions
1342 //===----------------------------------------------------------------------===//
1344 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1345 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1346 def rr : SS4AIi8<opc, MRMSrcReg, (outs GR64:$dst),
1347 (ins VR128:$src1, i32i8imm:$src2),
1348 !strconcat(OpcodeStr,
1349 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1351 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1352 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1353 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1354 !strconcat(OpcodeStr,
1355 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1356 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1357 addr:$dst)]>, OpSize, REX_W;
1360 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1362 let isTwoAddress = 1 in {
1363 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1364 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1365 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1366 !strconcat(OpcodeStr,
1367 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1369 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1371 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1372 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1373 !strconcat(OpcodeStr,
1374 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1376 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1377 imm:$src3)))]>, OpSize, REX_W;
1381 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;