1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // Move Instructions...
21 let neverHasSideEffects = 1 in
22 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
23 "mov{q}\t{$src, $dst|$dst, $src}", []>;
25 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
26 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
27 "movabs{q}\t{$src, $dst|$dst, $src}",
28 [(set GR64:$dst, imm:$src)]>;
29 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
30 "mov{q}\t{$src, $dst|$dst, $src}",
31 [(set GR64:$dst, i64immSExt32:$src)]>;
34 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
36 let isAsmParserOnly = 1 in {
37 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
38 "mov{q}\t{$src, $dst|$dst, $src}", []>;
41 let isCodeGenOnly = 1 in {
42 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
43 "mov{q}\t{$src, $dst|$dst, $src}", []>;
46 let canFoldAsLoad = 1, isReMaterializable = 1 in
47 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
48 "mov{q}\t{$src, $dst|$dst, $src}",
49 [(set GR64:$dst, (load addr:$src))]>;
51 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
52 "mov{q}\t{$src, $dst|$dst, $src}",
53 [(store GR64:$src, addr:$dst)]>;
54 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
55 "mov{q}\t{$src, $dst|$dst, $src}",
56 [(store i64immSExt32:$src, addr:$dst)]>;
58 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
59 let isCodeGenOnly = 1 in {
60 let neverHasSideEffects = 1 in
61 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
62 "mov{q}\t{$src, $dst|$dst, $src}", []>;
65 canFoldAsLoad = 1, isReMaterializable = 1 in
66 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
67 "mov{q}\t{$src, $dst|$dst, $src}",
71 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
72 "mov{q}\t{$src, $dst|$dst, $src}",
76 // FIXME: These definitions are utterly broken
77 // Just leave them commented out for now because they're useless outside
78 // of the large code model, and most compilers won't generate the instructions
81 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
82 "mov{q}\t{$src, %rax|%rax, $src}", []>;
83 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
84 "mov{q}\t{$src, %rax|%rax, $src}", []>;
85 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
86 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
87 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
88 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
92 //===----------------------------------------------------------------------===//
93 // Arithmetic Instructions...
96 let Defs = [EFLAGS] in {
98 def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
99 "add{q}\t{$src, %rax|%rax, $src}", []>;
101 let Constraints = "$src1 = $dst" in {
102 let isConvertibleToThreeAddress = 1 in {
103 let isCommutable = 1 in
104 // Register-Register Addition
105 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
106 (ins GR64:$src1, GR64:$src2),
107 "add{q}\t{$src2, $dst|$dst, $src2}",
108 [(set GR64:$dst, EFLAGS,
109 (X86add_flag GR64:$src1, GR64:$src2))]>;
111 // These are alternate spellings for use by the disassembler, we mark them as
112 // code gen only to ensure they aren't matched by the assembler.
113 let isCodeGenOnly = 1 in {
114 def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
115 (ins GR64:$src1, GR64:$src2),
116 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
119 // Register-Integer Addition
120 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
121 (ins GR64:$src1, i64i8imm:$src2),
122 "add{q}\t{$src2, $dst|$dst, $src2}",
123 [(set GR64:$dst, EFLAGS,
124 (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
125 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
126 (ins GR64:$src1, i64i32imm:$src2),
127 "add{q}\t{$src2, $dst|$dst, $src2}",
128 [(set GR64:$dst, EFLAGS,
129 (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
130 } // isConvertibleToThreeAddress
132 // Register-Memory Addition
133 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
134 (ins GR64:$src1, i64mem:$src2),
135 "add{q}\t{$src2, $dst|$dst, $src2}",
136 [(set GR64:$dst, EFLAGS,
137 (X86add_flag GR64:$src1, (load addr:$src2)))]>;
139 } // Constraints = "$src1 = $dst"
141 // Memory-Register Addition
142 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
143 "add{q}\t{$src2, $dst|$dst, $src2}",
144 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
146 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
147 "add{q}\t{$src2, $dst|$dst, $src2}",
148 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
150 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
151 "add{q}\t{$src2, $dst|$dst, $src2}",
152 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
155 let Uses = [EFLAGS] in {
157 def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
158 "adc{q}\t{$src, %rax|%rax, $src}", []>;
160 let Constraints = "$src1 = $dst" in {
161 let isCommutable = 1 in
162 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
163 (ins GR64:$src1, GR64:$src2),
164 "adc{q}\t{$src2, $dst|$dst, $src2}",
165 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
167 let isCodeGenOnly = 1 in {
168 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
169 (ins GR64:$src1, GR64:$src2),
170 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
173 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
174 (ins GR64:$src1, i64mem:$src2),
175 "adc{q}\t{$src2, $dst|$dst, $src2}",
176 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
178 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
179 (ins GR64:$src1, i64i8imm:$src2),
180 "adc{q}\t{$src2, $dst|$dst, $src2}",
181 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
182 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
183 (ins GR64:$src1, i64i32imm:$src2),
184 "adc{q}\t{$src2, $dst|$dst, $src2}",
185 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
186 } // Constraints = "$src1 = $dst"
188 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
189 "adc{q}\t{$src2, $dst|$dst, $src2}",
190 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
191 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
192 "adc{q}\t{$src2, $dst|$dst, $src2}",
193 [(store (adde (load addr:$dst), i64immSExt8:$src2),
195 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
196 "adc{q}\t{$src2, $dst|$dst, $src2}",
197 [(store (adde (load addr:$dst), i64immSExt32:$src2),
201 let Constraints = "$src1 = $dst" in {
202 // Register-Register Subtraction
203 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
204 (ins GR64:$src1, GR64:$src2),
205 "sub{q}\t{$src2, $dst|$dst, $src2}",
206 [(set GR64:$dst, EFLAGS,
207 (X86sub_flag GR64:$src1, GR64:$src2))]>;
209 let isCodeGenOnly = 1 in {
210 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
211 (ins GR64:$src1, GR64:$src2),
212 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
215 // Register-Memory Subtraction
216 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
217 (ins GR64:$src1, i64mem:$src2),
218 "sub{q}\t{$src2, $dst|$dst, $src2}",
219 [(set GR64:$dst, EFLAGS,
220 (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
222 // Register-Integer Subtraction
223 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
224 (ins GR64:$src1, i64i8imm:$src2),
225 "sub{q}\t{$src2, $dst|$dst, $src2}",
226 [(set GR64:$dst, EFLAGS,
227 (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
228 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
229 (ins GR64:$src1, i64i32imm:$src2),
230 "sub{q}\t{$src2, $dst|$dst, $src2}",
231 [(set GR64:$dst, EFLAGS,
232 (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
233 } // Constraints = "$src1 = $dst"
235 def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
236 "sub{q}\t{$src, %rax|%rax, $src}", []>;
238 // Memory-Register Subtraction
239 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
240 "sub{q}\t{$src2, $dst|$dst, $src2}",
241 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
244 // Memory-Integer Subtraction
245 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
246 "sub{q}\t{$src2, $dst|$dst, $src2}",
247 [(store (sub (load addr:$dst), i64immSExt8:$src2),
250 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
251 "sub{q}\t{$src2, $dst|$dst, $src2}",
252 [(store (sub (load addr:$dst), i64immSExt32:$src2),
256 let Uses = [EFLAGS] in {
257 let Constraints = "$src1 = $dst" in {
258 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
259 (ins GR64:$src1, GR64:$src2),
260 "sbb{q}\t{$src2, $dst|$dst, $src2}",
261 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
263 let isCodeGenOnly = 1 in {
264 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
265 (ins GR64:$src1, GR64:$src2),
266 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
269 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
270 (ins GR64:$src1, i64mem:$src2),
271 "sbb{q}\t{$src2, $dst|$dst, $src2}",
272 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
274 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
275 (ins GR64:$src1, i64i8imm:$src2),
276 "sbb{q}\t{$src2, $dst|$dst, $src2}",
277 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
278 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
279 (ins GR64:$src1, i64i32imm:$src2),
280 "sbb{q}\t{$src2, $dst|$dst, $src2}",
281 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
282 } // Constraints = "$src1 = $dst"
284 def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
285 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
287 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
288 "sbb{q}\t{$src2, $dst|$dst, $src2}",
289 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
290 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
291 "sbb{q}\t{$src2, $dst|$dst, $src2}",
292 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
293 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
294 "sbb{q}\t{$src2, $dst|$dst, $src2}",
295 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
299 // Unsigned multiplication
300 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
301 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
302 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
304 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
305 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
307 // Signed multiplication
308 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
309 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
311 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
312 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
315 let Defs = [EFLAGS] in {
316 let Constraints = "$src1 = $dst" in {
317 let isCommutable = 1 in
318 // Register-Register Signed Integer Multiplication
319 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
320 (ins GR64:$src1, GR64:$src2),
321 "imul{q}\t{$src2, $dst|$dst, $src2}",
322 [(set GR64:$dst, EFLAGS,
323 (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
325 // Register-Memory Signed Integer Multiplication
326 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
327 (ins GR64:$src1, i64mem:$src2),
328 "imul{q}\t{$src2, $dst|$dst, $src2}",
329 [(set GR64:$dst, EFLAGS,
330 (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
331 } // Constraints = "$src1 = $dst"
333 // Suprisingly enough, these are not two address instructions!
335 // Register-Integer Signed Integer Multiplication
336 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
337 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
338 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
339 [(set GR64:$dst, EFLAGS,
340 (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
341 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
342 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
343 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
344 [(set GR64:$dst, EFLAGS,
345 (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
347 // Memory-Integer Signed Integer Multiplication
348 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
349 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
350 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
351 [(set GR64:$dst, EFLAGS,
352 (X86smul_flag (load addr:$src1),
353 i64immSExt8:$src2))]>;
354 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
355 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
356 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
357 [(set GR64:$dst, EFLAGS,
358 (X86smul_flag (load addr:$src1),
359 i64immSExt32:$src2))]>;
362 // Unsigned division / remainder
363 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
364 // RDX:RAX/r64 = RAX,RDX
365 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
367 // Signed division / remainder
368 // RDX:RAX/r64 = RAX,RDX
369 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
370 "idiv{q}\t$src", []>;
372 // RDX:RAX/[mem64] = RAX,RDX
373 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
375 // RDX:RAX/[mem64] = RAX,RDX
376 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
377 "idiv{q}\t$src", []>;
381 // Unary instructions
382 let Defs = [EFLAGS], CodeSize = 2 in {
383 let Constraints = "$src = $dst" in
384 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
385 [(set GR64:$dst, (ineg GR64:$src)),
387 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
388 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
391 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
392 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
393 [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
394 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
395 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
398 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
399 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
400 [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
401 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
402 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
405 // In 64-bit mode, single byte INC and DEC cannot be encoded.
406 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
407 // Can transform into LEA.
408 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
410 [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
411 OpSize, Requires<[In64BitMode]>;
412 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
414 [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
415 Requires<[In64BitMode]>;
416 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
418 [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
419 OpSize, Requires<[In64BitMode]>;
420 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
422 [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
423 Requires<[In64BitMode]>;
424 } // Constraints = "$src = $dst", isConvertibleToThreeAddress
426 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
427 // how to unfold them.
428 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
429 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
431 OpSize, Requires<[In64BitMode]>;
432 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
433 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
435 Requires<[In64BitMode]>;
436 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
437 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
439 OpSize, Requires<[In64BitMode]>;
440 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
441 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
443 Requires<[In64BitMode]>;
444 } // Defs = [EFLAGS], CodeSize
448 //===----------------------------------------------------------------------===//
449 // Logical Instructions...
452 let Constraints = "$src = $dst" , AddedComplexity = 15 in
453 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
454 [(set GR64:$dst, (not GR64:$src))]>;
455 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
456 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
458 let Defs = [EFLAGS] in {
459 def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
460 "and{q}\t{$src, %rax|%rax, $src}", []>;
462 let Constraints = "$src1 = $dst" in {
463 let isCommutable = 1 in
464 def AND64rr : RI<0x21, MRMDestReg,
465 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
466 "and{q}\t{$src2, $dst|$dst, $src2}",
467 [(set GR64:$dst, EFLAGS,
468 (X86and_flag GR64:$src1, GR64:$src2))]>;
469 let isCodeGenOnly = 1 in {
470 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
471 (ins GR64:$src1, GR64:$src2),
472 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
474 def AND64rm : RI<0x23, MRMSrcMem,
475 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
476 "and{q}\t{$src2, $dst|$dst, $src2}",
477 [(set GR64:$dst, EFLAGS,
478 (X86and_flag GR64:$src1, (load addr:$src2)))]>;
479 def AND64ri8 : RIi8<0x83, MRM4r,
480 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
481 "and{q}\t{$src2, $dst|$dst, $src2}",
482 [(set GR64:$dst, EFLAGS,
483 (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
484 def AND64ri32 : RIi32<0x81, MRM4r,
485 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
486 "and{q}\t{$src2, $dst|$dst, $src2}",
487 [(set GR64:$dst, EFLAGS,
488 (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
489 } // Constraints = "$src1 = $dst"
491 def AND64mr : RI<0x21, MRMDestMem,
492 (outs), (ins i64mem:$dst, GR64:$src),
493 "and{q}\t{$src, $dst|$dst, $src}",
494 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
496 def AND64mi8 : RIi8<0x83, MRM4m,
497 (outs), (ins i64mem:$dst, i64i8imm :$src),
498 "and{q}\t{$src, $dst|$dst, $src}",
499 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
501 def AND64mi32 : RIi32<0x81, MRM4m,
502 (outs), (ins i64mem:$dst, i64i32imm:$src),
503 "and{q}\t{$src, $dst|$dst, $src}",
504 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
507 let Constraints = "$src1 = $dst" in {
508 let isCommutable = 1 in
509 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
510 (ins GR64:$src1, GR64:$src2),
511 "or{q}\t{$src2, $dst|$dst, $src2}",
512 [(set GR64:$dst, EFLAGS,
513 (X86or_flag GR64:$src1, GR64:$src2))]>;
514 let isCodeGenOnly = 1 in {
515 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
516 (ins GR64:$src1, GR64:$src2),
517 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
519 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
520 (ins GR64:$src1, i64mem:$src2),
521 "or{q}\t{$src2, $dst|$dst, $src2}",
522 [(set GR64:$dst, EFLAGS,
523 (X86or_flag GR64:$src1, (load addr:$src2)))]>;
524 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
525 (ins GR64:$src1, i64i8imm:$src2),
526 "or{q}\t{$src2, $dst|$dst, $src2}",
527 [(set GR64:$dst, EFLAGS,
528 (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
529 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
530 (ins GR64:$src1, i64i32imm:$src2),
531 "or{q}\t{$src2, $dst|$dst, $src2}",
532 [(set GR64:$dst, EFLAGS,
533 (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
534 } // Constraints = "$src1 = $dst"
536 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
537 "or{q}\t{$src, $dst|$dst, $src}",
538 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
540 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
541 "or{q}\t{$src, $dst|$dst, $src}",
542 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
544 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
545 "or{q}\t{$src, $dst|$dst, $src}",
546 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
549 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
550 "or{q}\t{$src, %rax|%rax, $src}", []>;
552 let Constraints = "$src1 = $dst" in {
553 let isCommutable = 1 in
554 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
555 (ins GR64:$src1, GR64:$src2),
556 "xor{q}\t{$src2, $dst|$dst, $src2}",
557 [(set GR64:$dst, EFLAGS,
558 (X86xor_flag GR64:$src1, GR64:$src2))]>;
559 let isCodeGenOnly = 1 in {
560 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
561 (ins GR64:$src1, GR64:$src2),
562 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
564 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
565 (ins GR64:$src1, i64mem:$src2),
566 "xor{q}\t{$src2, $dst|$dst, $src2}",
567 [(set GR64:$dst, EFLAGS,
568 (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
569 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
570 (ins GR64:$src1, i64i8imm:$src2),
571 "xor{q}\t{$src2, $dst|$dst, $src2}",
572 [(set GR64:$dst, EFLAGS,
573 (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
574 def XOR64ri32 : RIi32<0x81, MRM6r,
575 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
576 "xor{q}\t{$src2, $dst|$dst, $src2}",
577 [(set GR64:$dst, EFLAGS,
578 (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
579 } // Constraints = "$src1 = $dst"
581 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
582 "xor{q}\t{$src, $dst|$dst, $src}",
583 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
585 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
586 "xor{q}\t{$src, $dst|$dst, $src}",
587 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
589 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
590 "xor{q}\t{$src, $dst|$dst, $src}",
591 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
594 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
595 "xor{q}\t{$src, %rax|%rax, $src}", []>;
599 //===----------------------------------------------------------------------===//
600 // Comparison Instructions...
603 // Integer comparison
604 let Defs = [EFLAGS] in {
605 def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
606 "test{q}\t{$src, %rax|%rax, $src}", []>;
607 let isCommutable = 1 in
608 def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
609 "test{q}\t{$src2, $src1|$src1, $src2}",
610 [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
611 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
612 "test{q}\t{$src2, $src1|$src1, $src2}",
613 [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
615 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
616 (ins GR64:$src1, i64i32imm:$src2),
617 "test{q}\t{$src2, $src1|$src1, $src2}",
618 [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
620 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
621 (ins i64mem:$src1, i64i32imm:$src2),
622 "test{q}\t{$src2, $src1|$src1, $src2}",
623 [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
624 i64immSExt32:$src2), 0))]>;
627 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
628 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
629 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
630 "cmp{q}\t{$src2, $src1|$src1, $src2}",
631 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
633 // These are alternate spellings for use by the disassembler, we mark them as
634 // code gen only to ensure they aren't matched by the assembler.
635 let isCodeGenOnly = 1 in {
636 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
637 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
640 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
641 "cmp{q}\t{$src2, $src1|$src1, $src2}",
642 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
643 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
644 "cmp{q}\t{$src2, $src1|$src1, $src2}",
645 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
646 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
647 "cmp{q}\t{$src2, $src1|$src1, $src2}",
648 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
649 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
650 "cmp{q}\t{$src2, $src1|$src1, $src2}",
651 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
652 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
653 "cmp{q}\t{$src2, $src1|$src1, $src2}",
654 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
655 i64immSExt8:$src2))]>;
656 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
657 (ins i64mem:$src1, i64i32imm:$src2),
658 "cmp{q}\t{$src2, $src1|$src1, $src2}",
659 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
660 i64immSExt32:$src2))]>;
664 // TODO: BTC, BTR, and BTS
665 let Defs = [EFLAGS] in {
666 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
667 "bt{q}\t{$src2, $src1|$src1, $src2}",
668 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
670 // Unlike with the register+register form, the memory+register form of the
671 // bt instruction does not ignore the high bits of the index. From ISel's
672 // perspective, this is pretty bizarre. Disable these instructions for now.
673 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
674 "bt{q}\t{$src2, $src1|$src1, $src2}",
675 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
676 // (implicit EFLAGS)]
680 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
681 "bt{q}\t{$src2, $src1|$src1, $src2}",
682 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
683 // Note that these instructions don't need FastBTMem because that
684 // only applies when the other operand is in a register. When it's
685 // an immediate, bt is still fast.
686 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
687 "bt{q}\t{$src2, $src1|$src1, $src2}",
688 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
689 i64immSExt8:$src2))]>, TB;
691 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
692 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
693 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
694 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
695 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
696 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
697 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
698 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
700 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
701 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
702 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
703 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
704 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
705 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
706 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
707 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
709 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
710 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
711 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
712 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
713 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
714 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
715 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
716 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
721 //===----------------------------------------------------------------------===//
722 // X86-64 SSE Instructions
723 //===----------------------------------------------------------------------===//
725 // Move instructions...
727 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
728 "mov{d|q}\t{$src, $dst|$dst, $src}",
730 (v2i64 (scalar_to_vector GR64:$src)))]>;
731 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
732 "mov{d|q}\t{$src, $dst|$dst, $src}",
733 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
736 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
737 "mov{d|q}\t{$src, $dst|$dst, $src}",
738 [(set FR64:$dst, (bitconvert GR64:$src))]>;
739 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
740 "movq\t{$src, $dst|$dst, $src}",
741 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
743 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
744 "mov{d|q}\t{$src, $dst|$dst, $src}",
745 [(set GR64:$dst, (bitconvert FR64:$src))]>;
746 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
747 "movq\t{$src, $dst|$dst, $src}",
748 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;