1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64> {
22 let ParserMatchClass = ImmSExti64i32AsmOperand;
25 // 64-bits but only 32 bits are significant, and those bits are treated as being
27 def i64i32imm_pcrel : Operand<i64> {
28 let PrintMethod = "print_pcrel_imm";
29 let ParserMatchClass = X86AbsMemAsmOperand;
33 // 64-bits but only 8 bits are significant.
34 def i64i8imm : Operand<i64> {
35 let ParserMatchClass = ImmSExti64i8AsmOperand;
38 // Special i64mem for addresses of load folding tail calls. These are not
39 // allowed to use callee-saved registers since they must be scheduled
40 // after callee-saved register are popped.
41 def i64mem_TC : Operand<i64> {
42 let PrintMethod = "printi64mem";
43 let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
44 let ParserMatchClass = X86MemAsmOperand;
47 def lea64mem : Operand<i64> {
48 let PrintMethod = "printlea64mem";
49 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
50 let ParserMatchClass = X86NoSegMemAsmOperand;
53 def lea64_32mem : Operand<i32> {
54 let PrintMethod = "printlea64_32mem";
55 let AsmOperandLowerMethod = "lower_lea64_32mem";
56 let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
57 let ParserMatchClass = X86NoSegMemAsmOperand;
60 //===----------------------------------------------------------------------===//
61 // Complex Pattern Definitions.
63 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
64 [add, sub, mul, X86mul_imm, shl, or, frameindex,
67 def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
68 [tglobaltlsaddr], []>;
70 //===----------------------------------------------------------------------===//
74 def i64immSExt8 : PatLeaf<(i64 immSext8)>;
76 def GetLo32XForm : SDNodeXForm<imm, [{
77 // Transformation function: get the low 32 bits.
78 return getI32Imm((unsigned)N->getZExtValue());
81 def i64immSExt32 : PatLeaf<(i64 imm), [{
82 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
83 // sign extended field.
84 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
88 def i64immZExt32 : PatLeaf<(i64 imm), [{
89 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
90 // unsignedsign extended field.
91 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
94 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
95 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
96 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
98 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
99 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
100 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
101 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
103 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
104 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
105 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
106 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
108 //===----------------------------------------------------------------------===//
109 // Instruction list...
112 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
113 // a stack adjustment and the codegen must know that they may modify the stack
114 // pointer before prolog-epilog rewriting occurs.
115 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
116 // sub / add which can clobber EFLAGS.
117 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
118 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
120 [(X86callseq_start timm:$amt)]>,
121 Requires<[In64BitMode]>;
122 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
124 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
125 Requires<[In64BitMode]>;
128 // Interrupt Instructions
129 def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>;
131 //===----------------------------------------------------------------------===//
132 // Call Instructions...
135 // All calls clobber the non-callee saved registers. RSP is marked as
136 // a use to prevent stack-pointer assignments that appear immediately
137 // before calls from potentially appearing dead. Uses for argument
138 // registers are added manually.
139 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
140 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
141 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
142 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
143 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
146 // NOTE: this pattern doesn't match "X86call imm", because we do not know
147 // that the offset between an arbitrary immediate and the call will fit in
148 // the 32-bit pcrel field that we have.
149 def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
150 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
151 "call{q}\t$dst", []>,
152 Requires<[In64BitMode, NotWin64]>;
153 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
154 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
155 Requires<[NotWin64]>;
156 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
157 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
158 Requires<[NotWin64]>;
160 def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
161 "lcall{q}\t{*}$dst", []>;
164 // FIXME: We need to teach codegen about single list of call-clobbered
167 // All calls clobber the non-callee saved registers. RSP is marked as
168 // a use to prevent stack-pointer assignments that appear immediately
169 // before calls from potentially appearing dead. Uses for argument
170 // registers are added manually.
171 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
172 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
173 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
174 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
176 def WINCALL64pcrel32 : I<0xE8, RawFrm,
177 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
180 def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
182 [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
183 def WINCALL64m : I<0xFF, MRM2m, (outs),
184 (ins i64mem:$dst, variable_ops), "call\t{*}$dst",
185 [(X86call (loadi64 addr:$dst))]>,
190 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
191 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
192 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
193 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
194 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
195 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
197 def TCRETURNdi64 : I<0, Pseudo, (outs),
198 (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
199 "#TC_RETURN $dst $offset", []>;
200 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
202 "#TC_RETURN $dst $offset", []>;
204 def TCRETURNmi64 : I<0, Pseudo, (outs),
205 (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
206 "#TC_RETURN $dst $offset", []>;
208 def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
209 (ins i64i32imm_pcrel:$dst, variable_ops),
210 "jmp\t$dst # TAILCALL", []>;
211 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
212 "jmp{q}\t{*}$dst # TAILCALL", []>;
215 def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
216 "jmp{q}\t{*}$dst # TAILCALL", []>;
220 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
221 def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
223 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
224 [(brind GR64:$dst)]>;
225 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
226 [(brind (loadi64 addr:$dst))]>;
227 def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
228 "ljmp{q}\t{*}$dst", []>;
231 //===----------------------------------------------------------------------===//
232 // EH Pseudo Instructions
234 let isTerminator = 1, isReturn = 1, isBarrier = 1,
235 hasCtrlDep = 1, isCodeGenOnly = 1 in {
236 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
237 "ret\t#eh_return, addr: $addr",
238 [(X86ehret GR64:$addr)]>;
242 //===----------------------------------------------------------------------===//
243 // Miscellaneous Instructions...
246 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
247 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
249 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
250 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
252 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
253 def LEAVE64 : I<0xC9, RawFrm,
254 (outs), (ins), "leave", []>;
255 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
257 def POP64r : I<0x58, AddRegFrm,
258 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
259 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
260 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
262 let mayStore = 1 in {
263 def PUSH64r : I<0x50, AddRegFrm,
264 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
265 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
266 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
270 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
271 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
272 "push{q}\t$imm", []>;
273 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
274 "push{q}\t$imm", []>;
275 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
276 "push{q}\t$imm", []>;
279 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
280 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
281 Requires<[In64BitMode]>;
282 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
283 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
284 Requires<[In64BitMode]>;
286 def LEA64_32r : I<0x8D, MRMSrcMem,
287 (outs GR32:$dst), (ins lea64_32mem:$src),
288 "lea{l}\t{$src|$dst}, {$dst|$src}",
289 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
291 let isReMaterializable = 1 in
292 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
293 "lea{q}\t{$src|$dst}, {$dst|$src}",
294 [(set GR64:$dst, lea64addr:$src)]>;
296 let Constraints = "$src = $dst" in
297 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
299 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
301 // Bit scan instructions.
302 let Defs = [EFLAGS] in {
303 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
304 "bsf{q}\t{$src, $dst|$dst, $src}",
305 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
306 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
307 "bsf{q}\t{$src, $dst|$dst, $src}",
308 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
310 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
311 "bsr{q}\t{$src, $dst|$dst, $src}",
312 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
313 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
314 "bsr{q}\t{$src, $dst|$dst, $src}",
315 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
319 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
320 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
321 [(X86rep_movs i64)]>, REP;
322 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
323 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
324 [(X86rep_stos i64)]>, REP;
326 let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in
327 def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
329 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
330 def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
332 def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
334 def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
336 // Fast system-call instructions
337 def SYSEXIT64 : RI<0x35, RawFrm,
338 (outs), (ins), "sysexit", []>, TB;
340 //===----------------------------------------------------------------------===//
341 // Move Instructions...
344 let neverHasSideEffects = 1 in
345 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
346 "mov{q}\t{$src, $dst|$dst, $src}", []>;
348 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
349 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
350 "movabs{q}\t{$src, $dst|$dst, $src}",
351 [(set GR64:$dst, imm:$src)]>;
352 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
353 "mov{q}\t{$src, $dst|$dst, $src}",
354 [(set GR64:$dst, i64immSExt32:$src)]>;
357 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
359 let isAsmParserOnly = 1 in {
360 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
361 "mov{q}\t{$src, $dst|$dst, $src}", []>;
364 let isCodeGenOnly = 1 in {
365 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
366 "mov{q}\t{$src, $dst|$dst, $src}", []>;
369 let canFoldAsLoad = 1, isReMaterializable = 1 in
370 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
371 "mov{q}\t{$src, $dst|$dst, $src}",
372 [(set GR64:$dst, (load addr:$src))]>;
374 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
375 "mov{q}\t{$src, $dst|$dst, $src}",
376 [(store GR64:$src, addr:$dst)]>;
377 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
378 "mov{q}\t{$src, $dst|$dst, $src}",
379 [(store i64immSExt32:$src, addr:$dst)]>;
381 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
382 let neverHasSideEffects = 1 in
383 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
384 "mov{q}\t{$src, $dst|$dst, $src}", []>;
387 canFoldAsLoad = 1, isReMaterializable = 1 in
388 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
389 "mov{q}\t{$src, $dst|$dst, $src}",
393 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
394 "mov{q}\t{$src, $dst|$dst, $src}",
397 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
398 "mov{q}\t{$src, %rax|%rax, $src}", []>;
399 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
400 "mov{q}\t{$src, %rax|%rax, $src}", []>;
401 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
402 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
403 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
404 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
406 // Moves to and from segment registers
407 def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
408 "mov{q}\t{$src, $dst|$dst, $src}", []>;
409 def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
410 "mov{q}\t{$src, $dst|$dst, $src}", []>;
411 def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
412 "mov{q}\t{$src, $dst|$dst, $src}", []>;
413 def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
414 "mov{q}\t{$src, $dst|$dst, $src}", []>;
416 // Moves to and from debug registers
417 def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
418 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
419 def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
420 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
422 // Moves to and from control registers
423 def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
424 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
425 def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
426 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
428 // Sign/Zero extenders
430 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
431 // operand, which makes it a rare instruction with an 8-bit register
432 // operand that can never access an h register. If support for h registers
433 // were generalized, this would require a special register class.
434 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
435 "movs{bq|x}\t{$src, $dst|$dst, $src}",
436 [(set GR64:$dst, (sext GR8:$src))]>, TB;
437 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
438 "movs{bq|x}\t{$src, $dst|$dst, $src}",
439 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
440 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
441 "movs{wq|x}\t{$src, $dst|$dst, $src}",
442 [(set GR64:$dst, (sext GR16:$src))]>, TB;
443 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
444 "movs{wq|x}\t{$src, $dst|$dst, $src}",
445 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
446 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
447 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
448 [(set GR64:$dst, (sext GR32:$src))]>;
449 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
450 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
451 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
453 // movzbq and movzwq encodings for the disassembler
454 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
455 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
456 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
457 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
458 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
459 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
460 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
461 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
463 // Use movzbl instead of movzbq when the destination is a register; it's
464 // equivalent due to implicit zero-extending, and it has a smaller encoding.
465 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
466 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
467 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
468 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
469 // Use movzwl instead of movzwq when the destination is a register; it's
470 // equivalent due to implicit zero-extending, and it has a smaller encoding.
471 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
472 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
473 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
474 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
476 // There's no movzlq instruction, but movl can be used for this purpose, using
477 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
478 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
479 // zero-extension, however this isn't possible when the 32-bit value is
480 // defined by a truncate or is copied from something where the high bits aren't
481 // necessarily all zero. In such cases, we fall back to these explicit zext
483 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
484 "", [(set GR64:$dst, (zext GR32:$src))]>;
485 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
486 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
488 // Any instruction that defines a 32-bit result leaves the high half of the
489 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
490 // be copying from a truncate. And x86's cmov doesn't do anything if the
491 // condition is false. But any other 32-bit operation will zero-extend
493 def def32 : PatLeaf<(i32 GR32:$src), [{
494 return N->getOpcode() != ISD::TRUNCATE &&
495 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
496 N->getOpcode() != ISD::CopyFromReg &&
497 N->getOpcode() != X86ISD::CMOV;
500 // In the case of a 32-bit def that is known to implicitly zero-extend,
501 // we can use a SUBREG_TO_REG.
502 def : Pat<(i64 (zext def32:$src)),
503 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
505 let neverHasSideEffects = 1 in {
506 let Defs = [RAX], Uses = [EAX] in
507 def CDQE : RI<0x98, RawFrm, (outs), (ins),
508 "{cltq|cdqe}", []>; // RAX = signext(EAX)
510 let Defs = [RAX,RDX], Uses = [RAX] in
511 def CQO : RI<0x99, RawFrm, (outs), (ins),
512 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
515 //===----------------------------------------------------------------------===//
516 // Arithmetic Instructions...
519 let Defs = [EFLAGS] in {
521 def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
522 "add{q}\t{$src, %rax|%rax, $src}", []>;
524 let Constraints = "$src1 = $dst" in {
525 let isConvertibleToThreeAddress = 1 in {
526 let isCommutable = 1 in
527 // Register-Register Addition
528 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
529 (ins GR64:$src1, GR64:$src2),
530 "add{q}\t{$src2, $dst|$dst, $src2}",
531 [(set GR64:$dst, EFLAGS,
532 (X86add_flag GR64:$src1, GR64:$src2))]>;
534 // These are alternate spellings for use by the disassembler, we mark them as
535 // code gen only to ensure they aren't matched by the assembler.
536 let isCodeGenOnly = 1 in {
537 def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
538 (ins GR64:$src1, GR64:$src2),
539 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
542 // Register-Integer Addition
543 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
544 (ins GR64:$src1, i64i8imm:$src2),
545 "add{q}\t{$src2, $dst|$dst, $src2}",
546 [(set GR64:$dst, EFLAGS,
547 (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
548 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
549 (ins GR64:$src1, i64i32imm:$src2),
550 "add{q}\t{$src2, $dst|$dst, $src2}",
551 [(set GR64:$dst, EFLAGS,
552 (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
553 } // isConvertibleToThreeAddress
555 // Register-Memory Addition
556 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
557 (ins GR64:$src1, i64mem:$src2),
558 "add{q}\t{$src2, $dst|$dst, $src2}",
559 [(set GR64:$dst, EFLAGS,
560 (X86add_flag GR64:$src1, (load addr:$src2)))]>;
562 } // Constraints = "$src1 = $dst"
564 // Memory-Register Addition
565 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
566 "add{q}\t{$src2, $dst|$dst, $src2}",
567 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
569 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
570 "add{q}\t{$src2, $dst|$dst, $src2}",
571 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
573 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
574 "add{q}\t{$src2, $dst|$dst, $src2}",
575 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
578 let Uses = [EFLAGS] in {
580 def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
581 "adc{q}\t{$src, %rax|%rax, $src}", []>;
583 let Constraints = "$src1 = $dst" in {
584 let isCommutable = 1 in
585 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
586 (ins GR64:$src1, GR64:$src2),
587 "adc{q}\t{$src2, $dst|$dst, $src2}",
588 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
590 let isCodeGenOnly = 1 in {
591 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
592 (ins GR64:$src1, GR64:$src2),
593 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
596 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
597 (ins GR64:$src1, i64mem:$src2),
598 "adc{q}\t{$src2, $dst|$dst, $src2}",
599 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
601 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
602 (ins GR64:$src1, i64i8imm:$src2),
603 "adc{q}\t{$src2, $dst|$dst, $src2}",
604 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
605 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
606 (ins GR64:$src1, i64i32imm:$src2),
607 "adc{q}\t{$src2, $dst|$dst, $src2}",
608 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
609 } // Constraints = "$src1 = $dst"
611 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
612 "adc{q}\t{$src2, $dst|$dst, $src2}",
613 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
614 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
615 "adc{q}\t{$src2, $dst|$dst, $src2}",
616 [(store (adde (load addr:$dst), i64immSExt8:$src2),
618 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
619 "adc{q}\t{$src2, $dst|$dst, $src2}",
620 [(store (adde (load addr:$dst), i64immSExt32:$src2),
624 let Constraints = "$src1 = $dst" in {
625 // Register-Register Subtraction
626 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
627 (ins GR64:$src1, GR64:$src2),
628 "sub{q}\t{$src2, $dst|$dst, $src2}",
629 [(set GR64:$dst, EFLAGS,
630 (X86sub_flag GR64:$src1, GR64:$src2))]>;
632 let isCodeGenOnly = 1 in {
633 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
634 (ins GR64:$src1, GR64:$src2),
635 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
638 // Register-Memory Subtraction
639 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
640 (ins GR64:$src1, i64mem:$src2),
641 "sub{q}\t{$src2, $dst|$dst, $src2}",
642 [(set GR64:$dst, EFLAGS,
643 (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
645 // Register-Integer Subtraction
646 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
647 (ins GR64:$src1, i64i8imm:$src2),
648 "sub{q}\t{$src2, $dst|$dst, $src2}",
649 [(set GR64:$dst, EFLAGS,
650 (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
651 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
652 (ins GR64:$src1, i64i32imm:$src2),
653 "sub{q}\t{$src2, $dst|$dst, $src2}",
654 [(set GR64:$dst, EFLAGS,
655 (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
656 } // Constraints = "$src1 = $dst"
658 def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
659 "sub{q}\t{$src, %rax|%rax, $src}", []>;
661 // Memory-Register Subtraction
662 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
663 "sub{q}\t{$src2, $dst|$dst, $src2}",
664 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
667 // Memory-Integer Subtraction
668 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
669 "sub{q}\t{$src2, $dst|$dst, $src2}",
670 [(store (sub (load addr:$dst), i64immSExt8:$src2),
673 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
674 "sub{q}\t{$src2, $dst|$dst, $src2}",
675 [(store (sub (load addr:$dst), i64immSExt32:$src2),
679 let Uses = [EFLAGS] in {
680 let Constraints = "$src1 = $dst" in {
681 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
682 (ins GR64:$src1, GR64:$src2),
683 "sbb{q}\t{$src2, $dst|$dst, $src2}",
684 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
686 let isCodeGenOnly = 1 in {
687 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
688 (ins GR64:$src1, GR64:$src2),
689 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
692 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
693 (ins GR64:$src1, i64mem:$src2),
694 "sbb{q}\t{$src2, $dst|$dst, $src2}",
695 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
697 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
698 (ins GR64:$src1, i64i8imm:$src2),
699 "sbb{q}\t{$src2, $dst|$dst, $src2}",
700 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
701 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
702 (ins GR64:$src1, i64i32imm:$src2),
703 "sbb{q}\t{$src2, $dst|$dst, $src2}",
704 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
705 } // Constraints = "$src1 = $dst"
707 def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
708 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
710 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
711 "sbb{q}\t{$src2, $dst|$dst, $src2}",
712 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
713 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
714 "sbb{q}\t{$src2, $dst|$dst, $src2}",
715 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
716 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
717 "sbb{q}\t{$src2, $dst|$dst, $src2}",
718 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
722 // Unsigned multiplication
723 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
724 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
725 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
727 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
728 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
730 // Signed multiplication
731 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
732 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
734 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
735 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
738 let Defs = [EFLAGS] in {
739 let Constraints = "$src1 = $dst" in {
740 let isCommutable = 1 in
741 // Register-Register Signed Integer Multiplication
742 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
743 (ins GR64:$src1, GR64:$src2),
744 "imul{q}\t{$src2, $dst|$dst, $src2}",
745 [(set GR64:$dst, EFLAGS,
746 (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
748 // Register-Memory Signed Integer Multiplication
749 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
750 (ins GR64:$src1, i64mem:$src2),
751 "imul{q}\t{$src2, $dst|$dst, $src2}",
752 [(set GR64:$dst, EFLAGS,
753 (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
754 } // Constraints = "$src1 = $dst"
756 // Suprisingly enough, these are not two address instructions!
758 // Register-Integer Signed Integer Multiplication
759 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
760 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
761 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
762 [(set GR64:$dst, EFLAGS,
763 (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
764 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
765 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
766 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
767 [(set GR64:$dst, EFLAGS,
768 (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
770 // Memory-Integer Signed Integer Multiplication
771 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
772 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
773 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
774 [(set GR64:$dst, EFLAGS,
775 (X86smul_flag (load addr:$src1),
776 i64immSExt8:$src2))]>;
777 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
778 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
779 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
780 [(set GR64:$dst, EFLAGS,
781 (X86smul_flag (load addr:$src1),
782 i64immSExt32:$src2))]>;
785 // Unsigned division / remainder
786 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
787 // RDX:RAX/r64 = RAX,RDX
788 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
790 // Signed division / remainder
791 // RDX:RAX/r64 = RAX,RDX
792 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
793 "idiv{q}\t$src", []>;
795 // RDX:RAX/[mem64] = RAX,RDX
796 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
798 // RDX:RAX/[mem64] = RAX,RDX
799 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
800 "idiv{q}\t$src", []>;
804 // Unary instructions
805 let Defs = [EFLAGS], CodeSize = 2 in {
806 let Constraints = "$src = $dst" in
807 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
808 [(set GR64:$dst, (ineg GR64:$src)),
810 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
811 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
814 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
815 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
816 [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
817 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
818 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
821 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
822 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
823 [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
824 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
825 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
828 // In 64-bit mode, single byte INC and DEC cannot be encoded.
829 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
830 // Can transform into LEA.
831 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
833 [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
834 OpSize, Requires<[In64BitMode]>;
835 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
837 [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
838 Requires<[In64BitMode]>;
839 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
841 [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
842 OpSize, Requires<[In64BitMode]>;
843 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
845 [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
846 Requires<[In64BitMode]>;
847 } // Constraints = "$src = $dst", isConvertibleToThreeAddress
849 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
850 // how to unfold them.
851 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
852 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
854 OpSize, Requires<[In64BitMode]>;
855 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
856 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
858 Requires<[In64BitMode]>;
859 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
860 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
862 OpSize, Requires<[In64BitMode]>;
863 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
864 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
866 Requires<[In64BitMode]>;
867 } // Defs = [EFLAGS], CodeSize
870 let Defs = [EFLAGS] in {
871 // Shift instructions
872 let Constraints = "$src1 = $dst" in {
874 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
875 "shl{q}\t{%cl, $dst|$dst, %CL}",
876 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
877 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
878 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
879 (ins GR64:$src1, i8imm:$src2),
880 "shl{q}\t{$src2, $dst|$dst, $src2}",
881 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
882 // NOTE: We don't include patterns for shifts of a register by one, because
883 // 'add reg,reg' is cheaper.
884 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
886 } // Constraints = "$src1 = $dst"
889 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
890 "shl{q}\t{%cl, $dst|$dst, %CL}",
891 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
892 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
893 "shl{q}\t{$src, $dst|$dst, $src}",
894 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
895 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
897 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
899 let Constraints = "$src1 = $dst" in {
901 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
902 "shr{q}\t{%cl, $dst|$dst, %CL}",
903 [(set GR64:$dst, (srl GR64:$src1, CL))]>;
904 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
905 "shr{q}\t{$src2, $dst|$dst, $src2}",
906 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
907 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
909 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
910 } // Constraints = "$src1 = $dst"
913 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
914 "shr{q}\t{%cl, $dst|$dst, %CL}",
915 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
916 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
917 "shr{q}\t{$src, $dst|$dst, $src}",
918 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
919 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
921 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
923 let Constraints = "$src1 = $dst" in {
925 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
926 "sar{q}\t{%cl, $dst|$dst, %CL}",
927 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
928 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
929 (ins GR64:$src1, i8imm:$src2),
930 "sar{q}\t{$src2, $dst|$dst, $src2}",
931 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
932 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
934 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
935 } // Constraints = "$src = $dst"
938 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
939 "sar{q}\t{%cl, $dst|$dst, %CL}",
940 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
941 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
942 "sar{q}\t{$src, $dst|$dst, $src}",
943 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
944 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
946 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
948 // Rotate instructions
950 let Constraints = "$src = $dst" in {
951 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
952 "rcl{q}\t{1, $dst|$dst, 1}", []>;
953 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
954 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
956 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
957 "rcr{q}\t{1, $dst|$dst, 1}", []>;
958 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
959 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
962 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
963 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
964 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
965 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
967 } // Constraints = "$src = $dst"
969 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
970 "rcl{q}\t{1, $dst|$dst, 1}", []>;
971 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
972 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
973 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
974 "rcr{q}\t{1, $dst|$dst, 1}", []>;
975 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
976 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
979 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
980 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
981 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
982 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
985 let Constraints = "$src1 = $dst" in {
987 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
988 "rol{q}\t{%cl, $dst|$dst, %CL}",
989 [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
990 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
991 (ins GR64:$src1, i8imm:$src2),
992 "rol{q}\t{$src2, $dst|$dst, $src2}",
993 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
994 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
996 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
997 } // Constraints = "$src1 = $dst"
1000 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
1001 "rol{q}\t{%cl, $dst|$dst, %CL}",
1002 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
1003 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
1004 "rol{q}\t{$src, $dst|$dst, $src}",
1005 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1006 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
1008 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1010 let Constraints = "$src1 = $dst" in {
1012 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1013 "ror{q}\t{%cl, $dst|$dst, %CL}",
1014 [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
1015 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
1016 (ins GR64:$src1, i8imm:$src2),
1017 "ror{q}\t{$src2, $dst|$dst, $src2}",
1018 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
1019 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1021 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
1022 } // Constraints = "$src1 = $dst"
1025 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
1026 "ror{q}\t{%cl, $dst|$dst, %CL}",
1027 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
1028 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
1029 "ror{q}\t{$src, $dst|$dst, $src}",
1030 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1031 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
1033 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1035 // Double shift instructions (generalizations of rotate)
1036 let Constraints = "$src1 = $dst" in {
1037 let Uses = [CL] in {
1038 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
1039 (ins GR64:$src1, GR64:$src2),
1040 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1041 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
1043 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
1044 (ins GR64:$src1, GR64:$src2),
1045 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1046 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
1050 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
1051 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
1053 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1054 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1055 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
1058 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
1060 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1061 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1062 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
1066 } // Constraints = "$src1 = $dst"
1068 let Uses = [CL] in {
1069 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1070 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1071 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
1073 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1074 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1075 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
1078 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
1079 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1080 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1081 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
1082 (i8 imm:$src3)), addr:$dst)]>,
1084 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
1085 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1086 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1087 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
1088 (i8 imm:$src3)), addr:$dst)]>,
1090 } // Defs = [EFLAGS]
1092 //===----------------------------------------------------------------------===//
1093 // Logical Instructions...
1096 let Constraints = "$src = $dst" , AddedComplexity = 15 in
1097 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
1098 [(set GR64:$dst, (not GR64:$src))]>;
1099 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
1100 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
1102 let Defs = [EFLAGS] in {
1103 def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
1104 "and{q}\t{$src, %rax|%rax, $src}", []>;
1106 let Constraints = "$src1 = $dst" in {
1107 let isCommutable = 1 in
1108 def AND64rr : RI<0x21, MRMDestReg,
1109 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1110 "and{q}\t{$src2, $dst|$dst, $src2}",
1111 [(set GR64:$dst, EFLAGS,
1112 (X86and_flag GR64:$src1, GR64:$src2))]>;
1113 let isCodeGenOnly = 1 in {
1114 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
1115 (ins GR64:$src1, GR64:$src2),
1116 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
1118 def AND64rm : RI<0x23, MRMSrcMem,
1119 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1120 "and{q}\t{$src2, $dst|$dst, $src2}",
1121 [(set GR64:$dst, EFLAGS,
1122 (X86and_flag GR64:$src1, (load addr:$src2)))]>;
1123 def AND64ri8 : RIi8<0x83, MRM4r,
1124 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1125 "and{q}\t{$src2, $dst|$dst, $src2}",
1126 [(set GR64:$dst, EFLAGS,
1127 (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
1128 def AND64ri32 : RIi32<0x81, MRM4r,
1129 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1130 "and{q}\t{$src2, $dst|$dst, $src2}",
1131 [(set GR64:$dst, EFLAGS,
1132 (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
1133 } // Constraints = "$src1 = $dst"
1135 def AND64mr : RI<0x21, MRMDestMem,
1136 (outs), (ins i64mem:$dst, GR64:$src),
1137 "and{q}\t{$src, $dst|$dst, $src}",
1138 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
1139 (implicit EFLAGS)]>;
1140 def AND64mi8 : RIi8<0x83, MRM4m,
1141 (outs), (ins i64mem:$dst, i64i8imm :$src),
1142 "and{q}\t{$src, $dst|$dst, $src}",
1143 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
1144 (implicit EFLAGS)]>;
1145 def AND64mi32 : RIi32<0x81, MRM4m,
1146 (outs), (ins i64mem:$dst, i64i32imm:$src),
1147 "and{q}\t{$src, $dst|$dst, $src}",
1148 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1149 (implicit EFLAGS)]>;
1151 let Constraints = "$src1 = $dst" in {
1152 let isCommutable = 1 in
1153 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
1154 (ins GR64:$src1, GR64:$src2),
1155 "or{q}\t{$src2, $dst|$dst, $src2}",
1156 [(set GR64:$dst, EFLAGS,
1157 (X86or_flag GR64:$src1, GR64:$src2))]>;
1158 let isCodeGenOnly = 1 in {
1159 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
1160 (ins GR64:$src1, GR64:$src2),
1161 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
1163 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
1164 (ins GR64:$src1, i64mem:$src2),
1165 "or{q}\t{$src2, $dst|$dst, $src2}",
1166 [(set GR64:$dst, EFLAGS,
1167 (X86or_flag GR64:$src1, (load addr:$src2)))]>;
1168 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
1169 (ins GR64:$src1, i64i8imm:$src2),
1170 "or{q}\t{$src2, $dst|$dst, $src2}",
1171 [(set GR64:$dst, EFLAGS,
1172 (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
1173 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
1174 (ins GR64:$src1, i64i32imm:$src2),
1175 "or{q}\t{$src2, $dst|$dst, $src2}",
1176 [(set GR64:$dst, EFLAGS,
1177 (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
1178 } // Constraints = "$src1 = $dst"
1180 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1181 "or{q}\t{$src, $dst|$dst, $src}",
1182 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
1183 (implicit EFLAGS)]>;
1184 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
1185 "or{q}\t{$src, $dst|$dst, $src}",
1186 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
1187 (implicit EFLAGS)]>;
1188 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1189 "or{q}\t{$src, $dst|$dst, $src}",
1190 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1191 (implicit EFLAGS)]>;
1193 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
1194 "or{q}\t{$src, %rax|%rax, $src}", []>;
1196 let Constraints = "$src1 = $dst" in {
1197 let isCommutable = 1 in
1198 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
1199 (ins GR64:$src1, GR64:$src2),
1200 "xor{q}\t{$src2, $dst|$dst, $src2}",
1201 [(set GR64:$dst, EFLAGS,
1202 (X86xor_flag GR64:$src1, GR64:$src2))]>;
1203 let isCodeGenOnly = 1 in {
1204 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
1205 (ins GR64:$src1, GR64:$src2),
1206 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
1208 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
1209 (ins GR64:$src1, i64mem:$src2),
1210 "xor{q}\t{$src2, $dst|$dst, $src2}",
1211 [(set GR64:$dst, EFLAGS,
1212 (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
1213 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
1214 (ins GR64:$src1, i64i8imm:$src2),
1215 "xor{q}\t{$src2, $dst|$dst, $src2}",
1216 [(set GR64:$dst, EFLAGS,
1217 (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
1218 def XOR64ri32 : RIi32<0x81, MRM6r,
1219 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1220 "xor{q}\t{$src2, $dst|$dst, $src2}",
1221 [(set GR64:$dst, EFLAGS,
1222 (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
1223 } // Constraints = "$src1 = $dst"
1225 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1226 "xor{q}\t{$src, $dst|$dst, $src}",
1227 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
1228 (implicit EFLAGS)]>;
1229 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
1230 "xor{q}\t{$src, $dst|$dst, $src}",
1231 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
1232 (implicit EFLAGS)]>;
1233 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1234 "xor{q}\t{$src, $dst|$dst, $src}",
1235 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1236 (implicit EFLAGS)]>;
1238 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
1239 "xor{q}\t{$src, %rax|%rax, $src}", []>;
1241 } // Defs = [EFLAGS]
1243 //===----------------------------------------------------------------------===//
1244 // Comparison Instructions...
1247 // Integer comparison
1248 let Defs = [EFLAGS] in {
1249 def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
1250 "test{q}\t{$src, %rax|%rax, $src}", []>;
1251 let isCommutable = 1 in
1252 def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1253 "test{q}\t{$src2, $src1|$src1, $src2}",
1254 [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
1255 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1256 "test{q}\t{$src2, $src1|$src1, $src2}",
1257 [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
1259 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
1260 (ins GR64:$src1, i64i32imm:$src2),
1261 "test{q}\t{$src2, $src1|$src1, $src2}",
1262 [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
1264 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
1265 (ins i64mem:$src1, i64i32imm:$src2),
1266 "test{q}\t{$src2, $src1|$src1, $src2}",
1267 [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
1268 i64immSExt32:$src2), 0))]>;
1271 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
1272 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
1273 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1274 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1275 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
1277 // These are alternate spellings for use by the disassembler, we mark them as
1278 // code gen only to ensure they aren't matched by the assembler.
1279 let isCodeGenOnly = 1 in {
1280 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1281 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
1284 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1285 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1286 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
1287 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1288 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1289 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
1290 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1291 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1292 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
1293 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1294 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1295 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
1296 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1297 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1298 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1299 i64immSExt8:$src2))]>;
1300 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1301 (ins i64mem:$src1, i64i32imm:$src2),
1302 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1303 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1304 i64immSExt32:$src2))]>;
1305 } // Defs = [EFLAGS]
1308 // TODO: BTC, BTR, and BTS
1309 let Defs = [EFLAGS] in {
1310 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1311 "bt{q}\t{$src2, $src1|$src1, $src2}",
1312 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
1314 // Unlike with the register+register form, the memory+register form of the
1315 // bt instruction does not ignore the high bits of the index. From ISel's
1316 // perspective, this is pretty bizarre. Disable these instructions for now.
1317 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1318 "bt{q}\t{$src2, $src1|$src1, $src2}",
1319 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1320 // (implicit EFLAGS)]
1324 def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1325 "bt{q}\t{$src2, $src1|$src1, $src2}",
1326 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB,
1328 // Note that these instructions don't need FastBTMem because that
1329 // only applies when the other operand is in a register. When it's
1330 // an immediate, bt is still fast.
1331 def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1332 "bt{q}\t{$src2, $src1|$src1, $src2}",
1333 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1334 i64immSExt8:$src2))]>, TB;
1336 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1337 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1338 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1339 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1340 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1341 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1342 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1343 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1345 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1346 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1347 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1348 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1349 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1350 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1351 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1352 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1354 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1355 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1356 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1357 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1358 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1359 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1360 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1361 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1362 } // Defs = [EFLAGS]
1364 // Conditional moves
1365 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
1366 let isCommutable = 1 in {
1367 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1368 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1369 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1370 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1371 X86_COND_B, EFLAGS))]>, TB;
1372 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1373 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1374 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1375 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1376 X86_COND_AE, EFLAGS))]>, TB;
1377 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1378 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1379 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1380 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1381 X86_COND_E, EFLAGS))]>, TB;
1382 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1383 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1384 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1385 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1386 X86_COND_NE, EFLAGS))]>, TB;
1387 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1388 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1389 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1390 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1391 X86_COND_BE, EFLAGS))]>, TB;
1392 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1393 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1394 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1395 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1396 X86_COND_A, EFLAGS))]>, TB;
1397 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1398 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1399 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1400 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1401 X86_COND_L, EFLAGS))]>, TB;
1402 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1403 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1404 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1405 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1406 X86_COND_GE, EFLAGS))]>, TB;
1407 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1408 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1409 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1410 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1411 X86_COND_LE, EFLAGS))]>, TB;
1412 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1413 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1414 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1415 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1416 X86_COND_G, EFLAGS))]>, TB;
1417 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1418 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1419 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1420 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1421 X86_COND_S, EFLAGS))]>, TB;
1422 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1423 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1424 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1425 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1426 X86_COND_NS, EFLAGS))]>, TB;
1427 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1428 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1429 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1430 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1431 X86_COND_P, EFLAGS))]>, TB;
1432 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1433 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1434 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1435 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1436 X86_COND_NP, EFLAGS))]>, TB;
1437 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1438 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1439 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1440 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1441 X86_COND_O, EFLAGS))]>, TB;
1442 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1443 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1444 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1445 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1446 X86_COND_NO, EFLAGS))]>, TB;
1447 } // isCommutable = 1
1449 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1450 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1451 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1452 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1453 X86_COND_B, EFLAGS))]>, TB;
1454 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1455 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1456 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1457 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1458 X86_COND_AE, EFLAGS))]>, TB;
1459 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1460 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1461 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1462 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1463 X86_COND_E, EFLAGS))]>, TB;
1464 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1465 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1466 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1467 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1468 X86_COND_NE, EFLAGS))]>, TB;
1469 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1470 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1471 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1472 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1473 X86_COND_BE, EFLAGS))]>, TB;
1474 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1475 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1476 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1477 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1478 X86_COND_A, EFLAGS))]>, TB;
1479 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1480 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1481 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1482 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1483 X86_COND_L, EFLAGS))]>, TB;
1484 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1485 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1486 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1487 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1488 X86_COND_GE, EFLAGS))]>, TB;
1489 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1490 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1491 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1492 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1493 X86_COND_LE, EFLAGS))]>, TB;
1494 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1495 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1496 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1497 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1498 X86_COND_G, EFLAGS))]>, TB;
1499 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1500 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1501 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1502 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1503 X86_COND_S, EFLAGS))]>, TB;
1504 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1505 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1506 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1507 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1508 X86_COND_NS, EFLAGS))]>, TB;
1509 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1510 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1511 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1512 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1513 X86_COND_P, EFLAGS))]>, TB;
1514 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1515 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1516 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1517 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1518 X86_COND_NP, EFLAGS))]>, TB;
1519 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1520 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1521 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1522 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1523 X86_COND_O, EFLAGS))]>, TB;
1524 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1525 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1526 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1527 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1528 X86_COND_NO, EFLAGS))]>, TB;
1529 } // Constraints = "$src1 = $dst"
1531 // Use sbb to materialize carry flag into a GPR.
1532 // FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
1533 // However, Pat<> can't replicate the destination reg into the inputs of the
1535 // FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
1537 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
1538 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
1539 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
1541 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1544 //===----------------------------------------------------------------------===//
1545 // Conversion Instructions...
1548 // f64 -> signed i64
1549 def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1550 "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
1551 def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1552 "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
1553 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1554 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1556 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1557 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst),
1559 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1560 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1561 (load addr:$src)))]>;
1562 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1563 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1564 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1565 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1566 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1567 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1568 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1569 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1571 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1572 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst),
1574 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1576 (int_x86_sse2_cvttsd2si64
1577 (load addr:$src)))]>;
1579 // Signed i64 -> f64
1580 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1581 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1582 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1583 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1584 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1585 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1587 let Constraints = "$src1 = $dst" in {
1588 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1589 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1590 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1592 (int_x86_sse2_cvtsi642sd VR128:$src1,
1594 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1595 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1596 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1598 (int_x86_sse2_cvtsi642sd VR128:$src1,
1599 (loadi64 addr:$src2)))]>;
1600 } // Constraints = "$src1 = $dst"
1602 // Signed i64 -> f32
1603 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1604 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1605 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1606 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1607 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1608 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1610 let Constraints = "$src1 = $dst" in {
1611 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1612 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1613 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1615 (int_x86_sse_cvtsi642ss VR128:$src1,
1617 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1619 (ins VR128:$src1, i64mem:$src2),
1620 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1622 (int_x86_sse_cvtsi642ss VR128:$src1,
1623 (loadi64 addr:$src2)))]>;
1624 } // Constraints = "$src1 = $dst"
1626 // f32 -> signed i64
1627 def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1628 "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
1629 def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1630 "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
1631 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1632 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1634 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1635 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1636 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1637 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1638 (load addr:$src)))]>;
1639 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1640 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1641 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1642 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1643 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1644 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1645 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1646 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1648 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1649 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst),
1651 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1653 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1655 // Descriptor-table support instructions
1657 // LLDT is not interpreted specially in 64-bit mode because there is no sign
1659 def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
1660 "sldt{q}\t$dst", []>, TB;
1661 def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
1662 "sldt{q}\t$dst", []>, TB;
1664 //===----------------------------------------------------------------------===//
1665 // Alias Instructions
1666 //===----------------------------------------------------------------------===//
1668 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
1669 // smaller encoding, but doing so at isel time interferes with rematerialization
1670 // in the current register allocator. For now, this is rewritten when the
1671 // instruction is lowered to an MCInst.
1672 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
1673 // when we have a better way to specify isel priority.
1674 let Defs = [EFLAGS],
1675 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1676 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
1677 [(set GR64:$dst, 0)]>;
1679 // Materialize i64 constant where top 32-bits are zero. This could theoretically
1680 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
1681 // that would make it more difficult to rematerialize.
1682 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1683 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1684 "", [(set GR64:$dst, i64immZExt32:$src)]>;
1686 //===----------------------------------------------------------------------===//
1687 // Thread Local Storage Instructions
1688 //===----------------------------------------------------------------------===//
1691 // All calls clobber the non-callee saved registers. RSP is marked as
1692 // a use to prevent stack-pointer assignments that appear immediately
1693 // before calls from potentially appearing dead.
1694 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
1695 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
1696 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
1697 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
1698 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
1700 def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
1702 "leaq\t$sym(%rip), %rdi; "
1705 "call\t__tls_get_addr@PLT",
1706 [(X86tlsaddr tls64addr:$sym)]>,
1707 Requires<[In64BitMode]>;
1709 // Darwin TLS Support
1710 // For x86_64, the address of the thunk is passed in %rdi, on return
1711 // the address of the variable is in %rax. All other registers are preserved.
1714 usesCustomInserter = 1 in
1715 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
1717 [(X86TLSCall addr:$sym)]>,
1718 Requires<[In64BitMode]>;
1720 let AddedComplexity = 5, isCodeGenOnly = 1 in
1721 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1722 "movq\t%gs:$src, $dst",
1723 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1725 let AddedComplexity = 5, isCodeGenOnly = 1 in
1726 def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1727 "movq\t%fs:$src, $dst",
1728 [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
1730 //===----------------------------------------------------------------------===//
1731 // Atomic Instructions
1732 //===----------------------------------------------------------------------===//
1734 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1735 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1737 "cmpxchgq\t$swap,$ptr",
1738 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1741 let Constraints = "$val = $dst" in {
1742 let Defs = [EFLAGS] in
1743 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
1746 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1749 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),
1750 (ins GR64:$val,i64mem:$ptr),
1751 "xchg{q}\t{$val, $ptr|$ptr, $val}",
1752 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1754 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1755 "xchg{q}\t{$val, $src|$src, $val}", []>;
1758 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1759 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1760 let mayLoad = 1, mayStore = 1 in
1761 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1762 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1764 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1765 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1766 let mayLoad = 1, mayStore = 1 in
1767 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1768 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1770 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1771 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1772 "cmpxchg16b\t$dst", []>, TB;
1774 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1775 "xchg{q}\t{$src, %rax|%rax, $src}", []>;
1777 // Optimized codegen when the non-memory output is not used.
1778 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
1779 // FIXME: Use normal add / sub instructions and add lock prefix dynamically.
1780 def LOCK_ADD64mr : RI<0x03, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1782 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1783 def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
1784 (ins i64mem:$dst, i64i8imm :$src2),
1786 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1787 def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
1788 (ins i64mem:$dst, i64i32imm :$src2),
1790 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1791 def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1793 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1794 def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
1795 (ins i64mem:$dst, i64i8imm :$src2),
1797 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1798 def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
1799 (ins i64mem:$dst, i64i32imm:$src2),
1801 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1802 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
1804 "inc{q}\t$dst", []>, LOCK;
1805 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
1807 "dec{q}\t$dst", []>, LOCK;
1809 // Atomic exchange, and, or, xor
1810 let Constraints = "$val = $dst", Defs = [EFLAGS],
1811 usesCustomInserter = 1 in {
1812 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1813 "#ATOMAND64 PSEUDO!",
1814 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1815 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1816 "#ATOMOR64 PSEUDO!",
1817 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1818 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1819 "#ATOMXOR64 PSEUDO!",
1820 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1821 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1822 "#ATOMNAND64 PSEUDO!",
1823 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1824 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1825 "#ATOMMIN64 PSEUDO!",
1826 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1827 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1828 "#ATOMMAX64 PSEUDO!",
1829 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1830 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1831 "#ATOMUMIN64 PSEUDO!",
1832 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1833 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1834 "#ATOMUMAX64 PSEUDO!",
1835 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1838 // Segmentation support instructions
1840 // i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
1841 def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
1842 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1843 def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
1844 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1846 def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1847 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1848 def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1849 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1851 def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
1853 def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
1854 "push{q}\t%fs", []>, TB;
1855 def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
1856 "push{q}\t%gs", []>, TB;
1858 def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
1859 "pop{q}\t%fs", []>, TB;
1860 def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
1861 "pop{q}\t%gs", []>, TB;
1863 def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1864 "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
1865 def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1866 "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1867 def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1868 "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1870 // Specialized register support
1872 // no m form encodable; use SMSW16m
1873 def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
1874 "smsw{q}\t$dst", []>, TB;
1876 // String manipulation instructions
1878 def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
1880 //===----------------------------------------------------------------------===//
1881 // Non-Instruction Patterns
1882 //===----------------------------------------------------------------------===//
1884 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
1885 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
1886 // 'movabs' predicate should handle this sort of thing.
1887 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1888 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
1889 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1890 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
1891 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1892 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
1893 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1894 (MOV64ri tglobaltlsaddr :$dst)>, Requires<[FarData]>;
1895 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1896 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
1897 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1898 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
1900 // In static codegen with small code model, we can get the address of a label
1901 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
1902 // the MOV64ri64i32 should accept these.
1903 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1904 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
1905 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1906 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
1907 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1908 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
1909 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1910 (MOV64ri64i32 tglobaltlsaddr :$dst)>, Requires<[SmallCode]>;
1911 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1912 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
1913 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1914 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
1916 // In kernel code model, we can get the address of a label
1917 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1918 // the MOV64ri32 should accept these.
1919 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1920 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1921 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1922 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1923 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1924 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1925 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1926 (MOV64ri32 tglobaltlsaddr :$dst)>, Requires<[KernelCode]>;
1927 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1928 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1929 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1930 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1932 // If we have small model and -static mode, it is safe to store global addresses
1933 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1934 // for MOV64mi32 should handle this sort of thing.
1935 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1936 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1937 Requires<[NearData, IsStatic]>;
1938 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1939 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1940 Requires<[NearData, IsStatic]>;
1941 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1942 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1943 Requires<[NearData, IsStatic]>;
1944 def : Pat<(store (i64 (X86Wrapper tglobaltlsaddr:$src)), addr:$dst),
1945 (MOV64mi32 addr:$dst, tglobaltlsaddr:$src)>,
1946 Requires<[NearData, IsStatic]>;
1947 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1948 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1949 Requires<[NearData, IsStatic]>;
1950 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1951 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1952 Requires<[NearData, IsStatic]>;
1955 // Direct PC relative function call for small code model. 32-bit displacement
1956 // sign extended to 64-bit.
1957 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1958 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
1959 def : Pat<(X86call (i64 texternalsym:$dst)),
1960 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
1962 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1963 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
1964 def : Pat<(X86call (i64 texternalsym:$dst)),
1965 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
1968 def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
1969 (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
1970 Requires<[In64BitMode]>;
1972 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1973 (TCRETURNmi64 addr:$dst, imm:$off)>,
1974 Requires<[In64BitMode]>;
1976 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1977 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1978 Requires<[In64BitMode]>;
1980 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1981 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1982 Requires<[In64BitMode]>;
1986 // TEST R,R is smaller than CMP R,0
1987 def : Pat<(X86cmp GR64:$src1, 0),
1988 (TEST64rr GR64:$src1, GR64:$src1)>;
1990 // Conditional moves with folded loads with operands swapped and conditions
1992 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1993 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1994 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1995 (CMOVB64rm GR64:$src2, addr:$src1)>;
1996 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1997 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1998 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1999 (CMOVE64rm GR64:$src2, addr:$src1)>;
2000 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
2001 (CMOVA64rm GR64:$src2, addr:$src1)>;
2002 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
2003 (CMOVBE64rm GR64:$src2, addr:$src1)>;
2004 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
2005 (CMOVGE64rm GR64:$src2, addr:$src1)>;
2006 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
2007 (CMOVL64rm GR64:$src2, addr:$src1)>;
2008 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
2009 (CMOVG64rm GR64:$src2, addr:$src1)>;
2010 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
2011 (CMOVLE64rm GR64:$src2, addr:$src1)>;
2012 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
2013 (CMOVNP64rm GR64:$src2, addr:$src1)>;
2014 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
2015 (CMOVP64rm GR64:$src2, addr:$src1)>;
2016 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
2017 (CMOVNS64rm GR64:$src2, addr:$src1)>;
2018 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
2019 (CMOVS64rm GR64:$src2, addr:$src1)>;
2020 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
2021 (CMOVNO64rm GR64:$src2, addr:$src1)>;
2022 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
2023 (CMOVO64rm GR64:$src2, addr:$src1)>;
2025 // zextload bool -> zextload byte
2026 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
2029 // When extloading from 16-bit and smaller memory locations into 64-bit
2030 // registers, use zero-extending loads so that the entire 64-bit register is
2031 // defined, avoiding partial-register updates.
2032 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
2033 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
2034 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
2035 // For other extloads, use subregs, since the high contents of the register are
2036 // defined after an extload.
2037 def : Pat<(extloadi64i32 addr:$src),
2038 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
2041 // anyext. Define these to do an explicit zero-extend to
2042 // avoid partial-register updates.
2043 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
2044 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
2045 def : Pat<(i64 (anyext GR32:$src)),
2046 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
2048 //===----------------------------------------------------------------------===//
2050 //===----------------------------------------------------------------------===//
2052 // Odd encoding trick: -128 fits into an 8-bit immediate field while
2053 // +128 doesn't, so in this special case use a sub instead of an add.
2054 def : Pat<(add GR64:$src1, 128),
2055 (SUB64ri8 GR64:$src1, -128)>;
2056 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
2057 (SUB64mi8 addr:$dst, -128)>;
2059 // The same trick applies for 32-bit immediate fields in 64-bit
2061 def : Pat<(add GR64:$src1, 0x0000000080000000),
2062 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
2063 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
2064 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
2066 // Use a 32-bit and with implicit zero-extension instead of a 64-bit and if it
2067 // has an immediate with at least 32 bits of leading zeros, to avoid needing to
2068 // materialize that immediate in a register first.
2069 def : Pat<(and GR64:$src, i64immZExt32:$imm),
2073 (EXTRACT_SUBREG GR64:$src, sub_32bit),
2074 (i32 (GetLo32XForm imm:$imm))),
2077 // r & (2^32-1) ==> movz
2078 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
2079 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
2080 // r & (2^16-1) ==> movz
2081 def : Pat<(and GR64:$src, 0xffff),
2082 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
2083 // r & (2^8-1) ==> movz
2084 def : Pat<(and GR64:$src, 0xff),
2085 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
2086 // r & (2^8-1) ==> movz
2087 def : Pat<(and GR32:$src1, 0xff),
2088 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
2089 Requires<[In64BitMode]>;
2090 // r & (2^8-1) ==> movz
2091 def : Pat<(and GR16:$src1, 0xff),
2092 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
2093 Requires<[In64BitMode]>;
2095 // sext_inreg patterns
2096 def : Pat<(sext_inreg GR64:$src, i32),
2097 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
2098 def : Pat<(sext_inreg GR64:$src, i16),
2099 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
2100 def : Pat<(sext_inreg GR64:$src, i8),
2101 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
2102 def : Pat<(sext_inreg GR32:$src, i8),
2103 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
2104 Requires<[In64BitMode]>;
2105 def : Pat<(sext_inreg GR16:$src, i8),
2106 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
2107 Requires<[In64BitMode]>;
2110 def : Pat<(i32 (trunc GR64:$src)),
2111 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
2112 def : Pat<(i16 (trunc GR64:$src)),
2113 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
2114 def : Pat<(i8 (trunc GR64:$src)),
2115 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
2116 def : Pat<(i8 (trunc GR32:$src)),
2117 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
2118 Requires<[In64BitMode]>;
2119 def : Pat<(i8 (trunc GR16:$src)),
2120 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
2121 Requires<[In64BitMode]>;
2123 // h-register tricks.
2124 // For now, be conservative on x86-64 and use an h-register extract only if the
2125 // value is immediately zero-extended or stored, which are somewhat common
2126 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
2127 // from being allocated in the same instruction as the h register, as there's
2128 // currently no way to describe this requirement to the register allocator.
2130 // h-register extract and zero-extend.
2131 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
2135 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2138 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
2140 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2142 Requires<[In64BitMode]>;
2143 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
2144 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
2147 Requires<[In64BitMode]>;
2148 def : Pat<(srl GR16:$src, (i8 8)),
2151 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2154 Requires<[In64BitMode]>;
2155 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
2157 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2159 Requires<[In64BitMode]>;
2160 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
2162 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2164 Requires<[In64BitMode]>;
2165 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
2169 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2172 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
2176 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2180 // h-register extract and store.
2181 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
2184 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2186 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
2189 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2191 Requires<[In64BitMode]>;
2192 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
2195 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2197 Requires<[In64BitMode]>;
2199 // (shl x, 1) ==> (add x, x)
2200 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
2202 // (shl x (and y, 63)) ==> (shl x, y)
2203 def : Pat<(shl GR64:$src1, (and CL, 63)),
2204 (SHL64rCL GR64:$src1)>;
2205 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2206 (SHL64mCL addr:$dst)>;
2208 def : Pat<(srl GR64:$src1, (and CL, 63)),
2209 (SHR64rCL GR64:$src1)>;
2210 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2211 (SHR64mCL addr:$dst)>;
2213 def : Pat<(sra GR64:$src1, (and CL, 63)),
2214 (SAR64rCL GR64:$src1)>;
2215 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2216 (SAR64mCL addr:$dst)>;
2218 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
2219 let AddedComplexity = 5 in { // Try this before the selecting to OR
2220 def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
2221 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2222 def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
2223 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2224 def : Pat<(or_is_add GR64:$src1, GR64:$src2),
2225 (ADD64rr GR64:$src1, GR64:$src2)>;
2226 } // AddedComplexity
2228 // X86 specific add which produces a flag.
2229 def : Pat<(addc GR64:$src1, GR64:$src2),
2230 (ADD64rr GR64:$src1, GR64:$src2)>;
2231 def : Pat<(addc GR64:$src1, (load addr:$src2)),
2232 (ADD64rm GR64:$src1, addr:$src2)>;
2233 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
2234 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2235 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
2236 (ADD64ri32 GR64:$src1, imm:$src2)>;
2238 def : Pat<(subc GR64:$src1, GR64:$src2),
2239 (SUB64rr GR64:$src1, GR64:$src2)>;
2240 def : Pat<(subc GR64:$src1, (load addr:$src2)),
2241 (SUB64rm GR64:$src1, addr:$src2)>;
2242 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
2243 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2244 def : Pat<(subc GR64:$src1, imm:$src2),
2245 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2247 //===----------------------------------------------------------------------===//
2248 // EFLAGS-defining Patterns
2249 //===----------------------------------------------------------------------===//
2252 def : Pat<(add GR64:$src1, GR64:$src2),
2253 (ADD64rr GR64:$src1, GR64:$src2)>;
2254 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
2255 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2256 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
2257 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2258 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2259 (ADD64rm GR64:$src1, addr:$src2)>;
2262 def : Pat<(sub GR64:$src1, GR64:$src2),
2263 (SUB64rr GR64:$src1, GR64:$src2)>;
2264 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2265 (SUB64rm GR64:$src1, addr:$src2)>;
2266 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
2267 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2268 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2269 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2272 def : Pat<(mul GR64:$src1, GR64:$src2),
2273 (IMUL64rr GR64:$src1, GR64:$src2)>;
2274 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2275 (IMUL64rm GR64:$src1, addr:$src2)>;
2276 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
2277 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2278 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2279 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2280 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2281 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2282 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2283 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2286 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2287 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2288 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2289 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2290 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2291 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2294 def : Pat<(or GR64:$src1, GR64:$src2),
2295 (OR64rr GR64:$src1, GR64:$src2)>;
2296 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2297 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2298 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2299 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2300 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2301 (OR64rm GR64:$src1, addr:$src2)>;
2304 def : Pat<(xor GR64:$src1, GR64:$src2),
2305 (XOR64rr GR64:$src1, GR64:$src2)>;
2306 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2307 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2308 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2309 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2310 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2311 (XOR64rm GR64:$src1, addr:$src2)>;
2314 def : Pat<(and GR64:$src1, GR64:$src2),
2315 (AND64rr GR64:$src1, GR64:$src2)>;
2316 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2317 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2318 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2319 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2320 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2321 (AND64rm GR64:$src1, addr:$src2)>;
2323 //===----------------------------------------------------------------------===//
2324 // X86-64 SSE Instructions
2325 //===----------------------------------------------------------------------===//
2327 // Move instructions...
2329 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2330 "mov{d|q}\t{$src, $dst|$dst, $src}",
2332 (v2i64 (scalar_to_vector GR64:$src)))]>;
2333 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2334 "mov{d|q}\t{$src, $dst|$dst, $src}",
2335 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2338 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2339 "mov{d|q}\t{$src, $dst|$dst, $src}",
2340 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2341 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2342 "movq\t{$src, $dst|$dst, $src}",
2343 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2345 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2346 "mov{d|q}\t{$src, $dst|$dst, $src}",
2347 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2348 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2349 "movq\t{$src, $dst|$dst, $src}",
2350 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;