1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // Miscellaneous Instructions...
21 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
22 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
24 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
25 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
27 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
28 def LEAVE64 : I<0xC9, RawFrm,
29 (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
30 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
32 def POP64r : I<0x58, AddRegFrm,
33 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
34 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
35 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
38 def PUSH64r : I<0x50, AddRegFrm,
39 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
40 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
41 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
45 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
46 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
48 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
50 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
54 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
55 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
56 Requires<[In64BitMode]>;
57 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
58 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
59 Requires<[In64BitMode]>;
61 def LEA64_32r : I<0x8D, MRMSrcMem,
62 (outs GR32:$dst), (ins lea64_32mem:$src),
63 "lea{l}\t{$src|$dst}, {$dst|$src}",
64 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
66 let isReMaterializable = 1 in
67 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
68 "lea{q}\t{$src|$dst}, {$dst|$src}",
69 [(set GR64:$dst, lea64addr:$src)]>;
71 let Constraints = "$src = $dst" in
72 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
74 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
76 // Bit scan instructions.
77 let Defs = [EFLAGS] in {
78 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
79 "bsf{q}\t{$src, $dst|$dst, $src}",
80 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
81 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
82 "bsf{q}\t{$src, $dst|$dst, $src}",
83 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
85 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
86 "bsr{q}\t{$src, $dst|$dst, $src}",
87 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
88 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
89 "bsr{q}\t{$src, $dst|$dst, $src}",
90 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
95 //===----------------------------------------------------------------------===//
96 // Move Instructions...
99 let neverHasSideEffects = 1 in
100 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
101 "mov{q}\t{$src, $dst|$dst, $src}", []>;
103 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
104 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
105 "movabs{q}\t{$src, $dst|$dst, $src}",
106 [(set GR64:$dst, imm:$src)]>;
107 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
108 "mov{q}\t{$src, $dst|$dst, $src}",
109 [(set GR64:$dst, i64immSExt32:$src)]>;
112 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
114 let isAsmParserOnly = 1 in {
115 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
116 "mov{q}\t{$src, $dst|$dst, $src}", []>;
119 let isCodeGenOnly = 1 in {
120 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
121 "mov{q}\t{$src, $dst|$dst, $src}", []>;
124 let canFoldAsLoad = 1, isReMaterializable = 1 in
125 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
126 "mov{q}\t{$src, $dst|$dst, $src}",
127 [(set GR64:$dst, (load addr:$src))]>;
129 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
130 "mov{q}\t{$src, $dst|$dst, $src}",
131 [(store GR64:$src, addr:$dst)]>;
132 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
133 "mov{q}\t{$src, $dst|$dst, $src}",
134 [(store i64immSExt32:$src, addr:$dst)]>;
136 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
137 let isCodeGenOnly = 1 in {
138 let neverHasSideEffects = 1 in
139 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
140 "mov{q}\t{$src, $dst|$dst, $src}", []>;
143 canFoldAsLoad = 1, isReMaterializable = 1 in
144 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
145 "mov{q}\t{$src, $dst|$dst, $src}",
149 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
150 "mov{q}\t{$src, $dst|$dst, $src}",
154 // FIXME: These definitions are utterly broken
155 // Just leave them commented out for now because they're useless outside
156 // of the large code model, and most compilers won't generate the instructions
159 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
160 "mov{q}\t{$src, %rax|%rax, $src}", []>;
161 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
162 "mov{q}\t{$src, %rax|%rax, $src}", []>;
163 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
164 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
165 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
166 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
170 // Sign/Zero extenders
172 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
173 // operand, which makes it a rare instruction with an 8-bit register
174 // operand that can never access an h register. If support for h registers
175 // were generalized, this would require a special register class.
176 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
177 "movs{bq|x}\t{$src, $dst|$dst, $src}",
178 [(set GR64:$dst, (sext GR8:$src))]>, TB;
179 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
180 "movs{bq|x}\t{$src, $dst|$dst, $src}",
181 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
182 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
183 "movs{wq|x}\t{$src, $dst|$dst, $src}",
184 [(set GR64:$dst, (sext GR16:$src))]>, TB;
185 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
186 "movs{wq|x}\t{$src, $dst|$dst, $src}",
187 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
188 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
189 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
190 [(set GR64:$dst, (sext GR32:$src))]>;
191 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
192 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
193 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
195 // movzbq and movzwq encodings for the disassembler
196 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
197 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
198 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
199 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
200 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
201 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
202 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
203 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
205 // Use movzbl instead of movzbq when the destination is a register; it's
206 // equivalent due to implicit zero-extending, and it has a smaller encoding.
207 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
208 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
209 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
210 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
211 // Use movzwl instead of movzwq when the destination is a register; it's
212 // equivalent due to implicit zero-extending, and it has a smaller encoding.
213 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
214 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
215 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
216 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
218 // There's no movzlq instruction, but movl can be used for this purpose, using
219 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
220 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
221 // zero-extension, however this isn't possible when the 32-bit value is
222 // defined by a truncate or is copied from something where the high bits aren't
223 // necessarily all zero. In such cases, we fall back to these explicit zext
225 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
226 "", [(set GR64:$dst, (zext GR32:$src))]>;
227 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
228 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
230 // Any instruction that defines a 32-bit result leaves the high half of the
231 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
232 // be copying from a truncate. And x86's cmov doesn't do anything if the
233 // condition is false. But any other 32-bit operation will zero-extend
235 def def32 : PatLeaf<(i32 GR32:$src), [{
236 return N->getOpcode() != ISD::TRUNCATE &&
237 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
238 N->getOpcode() != ISD::CopyFromReg &&
239 N->getOpcode() != X86ISD::CMOV;
242 // In the case of a 32-bit def that is known to implicitly zero-extend,
243 // we can use a SUBREG_TO_REG.
244 def : Pat<(i64 (zext def32:$src)),
245 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
247 let neverHasSideEffects = 1 in {
248 let Defs = [RAX], Uses = [EAX] in
249 def CDQE : RI<0x98, RawFrm, (outs), (ins),
250 "{cltq|cdqe}", []>; // RAX = signext(EAX)
252 let Defs = [RAX,RDX], Uses = [RAX] in
253 def CQO : RI<0x99, RawFrm, (outs), (ins),
254 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
257 //===----------------------------------------------------------------------===//
258 // Arithmetic Instructions...
261 let Defs = [EFLAGS] in {
263 def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
264 "add{q}\t{$src, %rax|%rax, $src}", []>;
266 let Constraints = "$src1 = $dst" in {
267 let isConvertibleToThreeAddress = 1 in {
268 let isCommutable = 1 in
269 // Register-Register Addition
270 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
271 (ins GR64:$src1, GR64:$src2),
272 "add{q}\t{$src2, $dst|$dst, $src2}",
273 [(set GR64:$dst, EFLAGS,
274 (X86add_flag GR64:$src1, GR64:$src2))]>;
276 // These are alternate spellings for use by the disassembler, we mark them as
277 // code gen only to ensure they aren't matched by the assembler.
278 let isCodeGenOnly = 1 in {
279 def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
280 (ins GR64:$src1, GR64:$src2),
281 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
284 // Register-Integer Addition
285 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
286 (ins GR64:$src1, i64i8imm:$src2),
287 "add{q}\t{$src2, $dst|$dst, $src2}",
288 [(set GR64:$dst, EFLAGS,
289 (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
290 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
291 (ins GR64:$src1, i64i32imm:$src2),
292 "add{q}\t{$src2, $dst|$dst, $src2}",
293 [(set GR64:$dst, EFLAGS,
294 (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
295 } // isConvertibleToThreeAddress
297 // Register-Memory Addition
298 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
299 (ins GR64:$src1, i64mem:$src2),
300 "add{q}\t{$src2, $dst|$dst, $src2}",
301 [(set GR64:$dst, EFLAGS,
302 (X86add_flag GR64:$src1, (load addr:$src2)))]>;
304 } // Constraints = "$src1 = $dst"
306 // Memory-Register Addition
307 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
308 "add{q}\t{$src2, $dst|$dst, $src2}",
309 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
311 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
312 "add{q}\t{$src2, $dst|$dst, $src2}",
313 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
315 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
316 "add{q}\t{$src2, $dst|$dst, $src2}",
317 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
320 let Uses = [EFLAGS] in {
322 def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
323 "adc{q}\t{$src, %rax|%rax, $src}", []>;
325 let Constraints = "$src1 = $dst" in {
326 let isCommutable = 1 in
327 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
328 (ins GR64:$src1, GR64:$src2),
329 "adc{q}\t{$src2, $dst|$dst, $src2}",
330 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
332 let isCodeGenOnly = 1 in {
333 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
334 (ins GR64:$src1, GR64:$src2),
335 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
338 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
339 (ins GR64:$src1, i64mem:$src2),
340 "adc{q}\t{$src2, $dst|$dst, $src2}",
341 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
343 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
344 (ins GR64:$src1, i64i8imm:$src2),
345 "adc{q}\t{$src2, $dst|$dst, $src2}",
346 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
347 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
348 (ins GR64:$src1, i64i32imm:$src2),
349 "adc{q}\t{$src2, $dst|$dst, $src2}",
350 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
351 } // Constraints = "$src1 = $dst"
353 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
354 "adc{q}\t{$src2, $dst|$dst, $src2}",
355 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
356 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
357 "adc{q}\t{$src2, $dst|$dst, $src2}",
358 [(store (adde (load addr:$dst), i64immSExt8:$src2),
360 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
361 "adc{q}\t{$src2, $dst|$dst, $src2}",
362 [(store (adde (load addr:$dst), i64immSExt32:$src2),
366 let Constraints = "$src1 = $dst" in {
367 // Register-Register Subtraction
368 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
369 (ins GR64:$src1, GR64:$src2),
370 "sub{q}\t{$src2, $dst|$dst, $src2}",
371 [(set GR64:$dst, EFLAGS,
372 (X86sub_flag GR64:$src1, GR64:$src2))]>;
374 let isCodeGenOnly = 1 in {
375 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
376 (ins GR64:$src1, GR64:$src2),
377 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
380 // Register-Memory Subtraction
381 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
382 (ins GR64:$src1, i64mem:$src2),
383 "sub{q}\t{$src2, $dst|$dst, $src2}",
384 [(set GR64:$dst, EFLAGS,
385 (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
387 // Register-Integer Subtraction
388 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
389 (ins GR64:$src1, i64i8imm:$src2),
390 "sub{q}\t{$src2, $dst|$dst, $src2}",
391 [(set GR64:$dst, EFLAGS,
392 (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
393 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
394 (ins GR64:$src1, i64i32imm:$src2),
395 "sub{q}\t{$src2, $dst|$dst, $src2}",
396 [(set GR64:$dst, EFLAGS,
397 (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
398 } // Constraints = "$src1 = $dst"
400 def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
401 "sub{q}\t{$src, %rax|%rax, $src}", []>;
403 // Memory-Register Subtraction
404 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
405 "sub{q}\t{$src2, $dst|$dst, $src2}",
406 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
409 // Memory-Integer Subtraction
410 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
411 "sub{q}\t{$src2, $dst|$dst, $src2}",
412 [(store (sub (load addr:$dst), i64immSExt8:$src2),
415 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
416 "sub{q}\t{$src2, $dst|$dst, $src2}",
417 [(store (sub (load addr:$dst), i64immSExt32:$src2),
421 let Uses = [EFLAGS] in {
422 let Constraints = "$src1 = $dst" in {
423 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
424 (ins GR64:$src1, GR64:$src2),
425 "sbb{q}\t{$src2, $dst|$dst, $src2}",
426 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
428 let isCodeGenOnly = 1 in {
429 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
430 (ins GR64:$src1, GR64:$src2),
431 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
434 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
435 (ins GR64:$src1, i64mem:$src2),
436 "sbb{q}\t{$src2, $dst|$dst, $src2}",
437 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
439 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
440 (ins GR64:$src1, i64i8imm:$src2),
441 "sbb{q}\t{$src2, $dst|$dst, $src2}",
442 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
443 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
444 (ins GR64:$src1, i64i32imm:$src2),
445 "sbb{q}\t{$src2, $dst|$dst, $src2}",
446 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
447 } // Constraints = "$src1 = $dst"
449 def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
450 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
452 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
453 "sbb{q}\t{$src2, $dst|$dst, $src2}",
454 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
455 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
456 "sbb{q}\t{$src2, $dst|$dst, $src2}",
457 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
458 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
459 "sbb{q}\t{$src2, $dst|$dst, $src2}",
460 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
464 // Unsigned multiplication
465 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
466 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
467 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
469 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
470 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
472 // Signed multiplication
473 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
474 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
476 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
477 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
480 let Defs = [EFLAGS] in {
481 let Constraints = "$src1 = $dst" in {
482 let isCommutable = 1 in
483 // Register-Register Signed Integer Multiplication
484 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
485 (ins GR64:$src1, GR64:$src2),
486 "imul{q}\t{$src2, $dst|$dst, $src2}",
487 [(set GR64:$dst, EFLAGS,
488 (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
490 // Register-Memory Signed Integer Multiplication
491 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
492 (ins GR64:$src1, i64mem:$src2),
493 "imul{q}\t{$src2, $dst|$dst, $src2}",
494 [(set GR64:$dst, EFLAGS,
495 (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
496 } // Constraints = "$src1 = $dst"
498 // Suprisingly enough, these are not two address instructions!
500 // Register-Integer Signed Integer Multiplication
501 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
502 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
503 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
504 [(set GR64:$dst, EFLAGS,
505 (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
506 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
507 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
508 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
509 [(set GR64:$dst, EFLAGS,
510 (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
512 // Memory-Integer Signed Integer Multiplication
513 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
514 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
515 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
516 [(set GR64:$dst, EFLAGS,
517 (X86smul_flag (load addr:$src1),
518 i64immSExt8:$src2))]>;
519 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
520 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
521 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
522 [(set GR64:$dst, EFLAGS,
523 (X86smul_flag (load addr:$src1),
524 i64immSExt32:$src2))]>;
527 // Unsigned division / remainder
528 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
529 // RDX:RAX/r64 = RAX,RDX
530 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
532 // Signed division / remainder
533 // RDX:RAX/r64 = RAX,RDX
534 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
535 "idiv{q}\t$src", []>;
537 // RDX:RAX/[mem64] = RAX,RDX
538 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
540 // RDX:RAX/[mem64] = RAX,RDX
541 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
542 "idiv{q}\t$src", []>;
546 // Unary instructions
547 let Defs = [EFLAGS], CodeSize = 2 in {
548 let Constraints = "$src = $dst" in
549 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
550 [(set GR64:$dst, (ineg GR64:$src)),
552 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
553 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
556 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
557 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
558 [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
559 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
560 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
563 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
564 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
565 [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
566 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
567 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
570 // In 64-bit mode, single byte INC and DEC cannot be encoded.
571 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
572 // Can transform into LEA.
573 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
575 [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
576 OpSize, Requires<[In64BitMode]>;
577 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
579 [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
580 Requires<[In64BitMode]>;
581 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
583 [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
584 OpSize, Requires<[In64BitMode]>;
585 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
587 [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
588 Requires<[In64BitMode]>;
589 } // Constraints = "$src = $dst", isConvertibleToThreeAddress
591 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
592 // how to unfold them.
593 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
594 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
596 OpSize, Requires<[In64BitMode]>;
597 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
598 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
600 Requires<[In64BitMode]>;
601 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
602 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
604 OpSize, Requires<[In64BitMode]>;
605 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
606 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
608 Requires<[In64BitMode]>;
609 } // Defs = [EFLAGS], CodeSize
612 let Defs = [EFLAGS] in {
613 // Shift instructions
614 let Constraints = "$src1 = $dst" in {
616 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
617 "shl{q}\t{%cl, $dst|$dst, %CL}",
618 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
619 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
620 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
621 (ins GR64:$src1, i8imm:$src2),
622 "shl{q}\t{$src2, $dst|$dst, $src2}",
623 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
624 // NOTE: We don't include patterns for shifts of a register by one, because
625 // 'add reg,reg' is cheaper.
626 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
628 } // Constraints = "$src1 = $dst"
631 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
632 "shl{q}\t{%cl, $dst|$dst, %CL}",
633 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
634 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
635 "shl{q}\t{$src, $dst|$dst, $src}",
636 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
637 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
639 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
641 let Constraints = "$src1 = $dst" in {
643 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
644 "shr{q}\t{%cl, $dst|$dst, %CL}",
645 [(set GR64:$dst, (srl GR64:$src1, CL))]>;
646 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
647 "shr{q}\t{$src2, $dst|$dst, $src2}",
648 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
649 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
651 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
652 } // Constraints = "$src1 = $dst"
655 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
656 "shr{q}\t{%cl, $dst|$dst, %CL}",
657 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
658 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
659 "shr{q}\t{$src, $dst|$dst, $src}",
660 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
661 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
663 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
665 let Constraints = "$src1 = $dst" in {
667 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
668 "sar{q}\t{%cl, $dst|$dst, %CL}",
669 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
670 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
671 (ins GR64:$src1, i8imm:$src2),
672 "sar{q}\t{$src2, $dst|$dst, $src2}",
673 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
674 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
676 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
677 } // Constraints = "$src = $dst"
680 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
681 "sar{q}\t{%cl, $dst|$dst, %CL}",
682 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
683 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
684 "sar{q}\t{$src, $dst|$dst, $src}",
685 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
686 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
688 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
690 // Rotate instructions
692 let Constraints = "$src = $dst" in {
693 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
694 "rcl{q}\t{1, $dst|$dst, 1}", []>;
695 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
696 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
698 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
699 "rcr{q}\t{1, $dst|$dst, 1}", []>;
700 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
701 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
704 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
705 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
706 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
707 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
709 } // Constraints = "$src = $dst"
711 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
712 "rcl{q}\t{1, $dst|$dst, 1}", []>;
713 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
714 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
715 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
716 "rcr{q}\t{1, $dst|$dst, 1}", []>;
717 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
718 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
721 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
722 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
723 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
724 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
727 let Constraints = "$src1 = $dst" in {
729 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
730 "rol{q}\t{%cl, $dst|$dst, %CL}",
731 [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
732 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
733 (ins GR64:$src1, i8imm:$src2),
734 "rol{q}\t{$src2, $dst|$dst, $src2}",
735 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
736 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
738 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
739 } // Constraints = "$src1 = $dst"
742 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
743 "rol{q}\t{%cl, $dst|$dst, %CL}",
744 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
745 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
746 "rol{q}\t{$src, $dst|$dst, $src}",
747 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
748 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
750 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
752 let Constraints = "$src1 = $dst" in {
754 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
755 "ror{q}\t{%cl, $dst|$dst, %CL}",
756 [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
757 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
758 (ins GR64:$src1, i8imm:$src2),
759 "ror{q}\t{$src2, $dst|$dst, $src2}",
760 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
761 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
763 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
764 } // Constraints = "$src1 = $dst"
767 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
768 "ror{q}\t{%cl, $dst|$dst, %CL}",
769 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
770 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
771 "ror{q}\t{$src, $dst|$dst, $src}",
772 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
773 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
775 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
777 // Double shift instructions (generalizations of rotate)
778 let Constraints = "$src1 = $dst" in {
780 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
781 (ins GR64:$src1, GR64:$src2),
782 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
783 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
785 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
786 (ins GR64:$src1, GR64:$src2),
787 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
788 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
792 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
793 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
795 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
796 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
797 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
800 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
802 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
803 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
804 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
808 } // Constraints = "$src1 = $dst"
811 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
812 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
813 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
815 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
816 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
817 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
820 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
821 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
822 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
823 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
824 (i8 imm:$src3)), addr:$dst)]>,
826 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
827 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
828 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
829 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
830 (i8 imm:$src3)), addr:$dst)]>,
834 //===----------------------------------------------------------------------===//
835 // Logical Instructions...
838 let Constraints = "$src = $dst" , AddedComplexity = 15 in
839 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
840 [(set GR64:$dst, (not GR64:$src))]>;
841 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
842 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
844 let Defs = [EFLAGS] in {
845 def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
846 "and{q}\t{$src, %rax|%rax, $src}", []>;
848 let Constraints = "$src1 = $dst" in {
849 let isCommutable = 1 in
850 def AND64rr : RI<0x21, MRMDestReg,
851 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
852 "and{q}\t{$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, EFLAGS,
854 (X86and_flag GR64:$src1, GR64:$src2))]>;
855 let isCodeGenOnly = 1 in {
856 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
857 (ins GR64:$src1, GR64:$src2),
858 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
860 def AND64rm : RI<0x23, MRMSrcMem,
861 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
862 "and{q}\t{$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, EFLAGS,
864 (X86and_flag GR64:$src1, (load addr:$src2)))]>;
865 def AND64ri8 : RIi8<0x83, MRM4r,
866 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
867 "and{q}\t{$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, EFLAGS,
869 (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
870 def AND64ri32 : RIi32<0x81, MRM4r,
871 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
872 "and{q}\t{$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, EFLAGS,
874 (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
875 } // Constraints = "$src1 = $dst"
877 def AND64mr : RI<0x21, MRMDestMem,
878 (outs), (ins i64mem:$dst, GR64:$src),
879 "and{q}\t{$src, $dst|$dst, $src}",
880 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
882 def AND64mi8 : RIi8<0x83, MRM4m,
883 (outs), (ins i64mem:$dst, i64i8imm :$src),
884 "and{q}\t{$src, $dst|$dst, $src}",
885 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
887 def AND64mi32 : RIi32<0x81, MRM4m,
888 (outs), (ins i64mem:$dst, i64i32imm:$src),
889 "and{q}\t{$src, $dst|$dst, $src}",
890 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
893 let Constraints = "$src1 = $dst" in {
894 let isCommutable = 1 in
895 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
896 (ins GR64:$src1, GR64:$src2),
897 "or{q}\t{$src2, $dst|$dst, $src2}",
898 [(set GR64:$dst, EFLAGS,
899 (X86or_flag GR64:$src1, GR64:$src2))]>;
900 let isCodeGenOnly = 1 in {
901 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
902 (ins GR64:$src1, GR64:$src2),
903 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
905 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
906 (ins GR64:$src1, i64mem:$src2),
907 "or{q}\t{$src2, $dst|$dst, $src2}",
908 [(set GR64:$dst, EFLAGS,
909 (X86or_flag GR64:$src1, (load addr:$src2)))]>;
910 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
911 (ins GR64:$src1, i64i8imm:$src2),
912 "or{q}\t{$src2, $dst|$dst, $src2}",
913 [(set GR64:$dst, EFLAGS,
914 (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
915 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
916 (ins GR64:$src1, i64i32imm:$src2),
917 "or{q}\t{$src2, $dst|$dst, $src2}",
918 [(set GR64:$dst, EFLAGS,
919 (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
920 } // Constraints = "$src1 = $dst"
922 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
923 "or{q}\t{$src, $dst|$dst, $src}",
924 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
926 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
927 "or{q}\t{$src, $dst|$dst, $src}",
928 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
930 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
931 "or{q}\t{$src, $dst|$dst, $src}",
932 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
935 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
936 "or{q}\t{$src, %rax|%rax, $src}", []>;
938 let Constraints = "$src1 = $dst" in {
939 let isCommutable = 1 in
940 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
941 (ins GR64:$src1, GR64:$src2),
942 "xor{q}\t{$src2, $dst|$dst, $src2}",
943 [(set GR64:$dst, EFLAGS,
944 (X86xor_flag GR64:$src1, GR64:$src2))]>;
945 let isCodeGenOnly = 1 in {
946 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
947 (ins GR64:$src1, GR64:$src2),
948 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
950 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
951 (ins GR64:$src1, i64mem:$src2),
952 "xor{q}\t{$src2, $dst|$dst, $src2}",
953 [(set GR64:$dst, EFLAGS,
954 (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
955 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
956 (ins GR64:$src1, i64i8imm:$src2),
957 "xor{q}\t{$src2, $dst|$dst, $src2}",
958 [(set GR64:$dst, EFLAGS,
959 (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
960 def XOR64ri32 : RIi32<0x81, MRM6r,
961 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
962 "xor{q}\t{$src2, $dst|$dst, $src2}",
963 [(set GR64:$dst, EFLAGS,
964 (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
965 } // Constraints = "$src1 = $dst"
967 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
968 "xor{q}\t{$src, $dst|$dst, $src}",
969 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
971 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
972 "xor{q}\t{$src, $dst|$dst, $src}",
973 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
975 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
976 "xor{q}\t{$src, $dst|$dst, $src}",
977 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
980 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
981 "xor{q}\t{$src, %rax|%rax, $src}", []>;
985 //===----------------------------------------------------------------------===//
986 // Comparison Instructions...
989 // Integer comparison
990 let Defs = [EFLAGS] in {
991 def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
992 "test{q}\t{$src, %rax|%rax, $src}", []>;
993 let isCommutable = 1 in
994 def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
995 "test{q}\t{$src2, $src1|$src1, $src2}",
996 [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
997 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
998 "test{q}\t{$src2, $src1|$src1, $src2}",
999 [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
1001 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
1002 (ins GR64:$src1, i64i32imm:$src2),
1003 "test{q}\t{$src2, $src1|$src1, $src2}",
1004 [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
1006 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
1007 (ins i64mem:$src1, i64i32imm:$src2),
1008 "test{q}\t{$src2, $src1|$src1, $src2}",
1009 [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
1010 i64immSExt32:$src2), 0))]>;
1013 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
1014 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
1015 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1016 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1017 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
1019 // These are alternate spellings for use by the disassembler, we mark them as
1020 // code gen only to ensure they aren't matched by the assembler.
1021 let isCodeGenOnly = 1 in {
1022 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1023 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
1026 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1027 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1028 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
1029 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1030 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1031 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
1032 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1033 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1034 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
1035 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1036 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1037 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
1038 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1039 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1040 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1041 i64immSExt8:$src2))]>;
1042 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1043 (ins i64mem:$src1, i64i32imm:$src2),
1044 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1045 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1046 i64immSExt32:$src2))]>;
1047 } // Defs = [EFLAGS]
1050 // TODO: BTC, BTR, and BTS
1051 let Defs = [EFLAGS] in {
1052 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1053 "bt{q}\t{$src2, $src1|$src1, $src2}",
1054 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
1056 // Unlike with the register+register form, the memory+register form of the
1057 // bt instruction does not ignore the high bits of the index. From ISel's
1058 // perspective, this is pretty bizarre. Disable these instructions for now.
1059 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1060 "bt{q}\t{$src2, $src1|$src1, $src2}",
1061 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1062 // (implicit EFLAGS)]
1066 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1067 "bt{q}\t{$src2, $src1|$src1, $src2}",
1068 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
1069 // Note that these instructions don't need FastBTMem because that
1070 // only applies when the other operand is in a register. When it's
1071 // an immediate, bt is still fast.
1072 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1073 "bt{q}\t{$src2, $src1|$src1, $src2}",
1074 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1075 i64immSExt8:$src2))]>, TB;
1077 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1078 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1079 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1080 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1081 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1082 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1083 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1084 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1086 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1087 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1088 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1089 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1090 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1091 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1092 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1093 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1095 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1096 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1097 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1098 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1099 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1100 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1101 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1102 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1103 } // Defs = [EFLAGS]
1107 //===----------------------------------------------------------------------===//
1108 // X86-64 SSE Instructions
1109 //===----------------------------------------------------------------------===//
1111 // Move instructions...
1113 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1114 "mov{d|q}\t{$src, $dst|$dst, $src}",
1116 (v2i64 (scalar_to_vector GR64:$src)))]>;
1117 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1118 "mov{d|q}\t{$src, $dst|$dst, $src}",
1119 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1122 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1123 "mov{d|q}\t{$src, $dst|$dst, $src}",
1124 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1125 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1126 "movq\t{$src, $dst|$dst, $src}",
1127 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1129 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1130 "mov{d|q}\t{$src, $dst|$dst, $src}",
1131 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1132 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1133 "movq\t{$src, $dst|$dst, $src}",
1134 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;