1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64> {
22 let ParserMatchClass = ImmSExti64i32AsmOperand;
25 // 64-bits but only 32 bits are significant, and those bits are treated as being
27 def i64i32imm_pcrel : Operand<i64> {
28 let PrintMethod = "print_pcrel_imm";
29 let ParserMatchClass = X86AbsMemAsmOperand;
33 // 64-bits but only 8 bits are significant.
34 def i64i8imm : Operand<i64> {
35 let ParserMatchClass = ImmSExti64i8AsmOperand;
38 def lea64_32mem : Operand<i32> {
39 let PrintMethod = "printi32mem";
40 let AsmOperandLowerMethod = "lower_lea64_32mem";
41 let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
42 let ParserMatchClass = X86MemAsmOperand;
46 // Special i64mem for addresses of load folding tail calls. These are not
47 // allowed to use callee-saved registers since they must be scheduled
48 // after callee-saved register are popped.
49 def i64mem_TC : Operand<i64> {
50 let PrintMethod = "printi64mem";
51 let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
52 let ParserMatchClass = X86MemAsmOperand;
55 //===----------------------------------------------------------------------===//
56 // Complex Pattern Definitions.
58 def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
59 [add, sub, mul, X86mul_imm, shl, or, frameindex,
62 def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
63 [tglobaltlsaddr], []>;
65 //===----------------------------------------------------------------------===//
69 def i64immSExt8 : PatLeaf<(i64 immSext8)>;
71 def GetLo32XForm : SDNodeXForm<imm, [{
72 // Transformation function: get the low 32 bits.
73 return getI32Imm((unsigned)N->getZExtValue());
76 def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>;
79 def i64immZExt32 : PatLeaf<(i64 imm), [{
80 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
81 // unsignedsign extended field.
82 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
85 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
86 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
87 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
89 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
90 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
91 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
92 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
94 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
95 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
96 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
97 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
99 //===----------------------------------------------------------------------===//
100 // Instruction list...
103 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
104 // a stack adjustment and the codegen must know that they may modify the stack
105 // pointer before prolog-epilog rewriting occurs.
106 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
107 // sub / add which can clobber EFLAGS.
108 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
109 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
111 [(X86callseq_start timm:$amt)]>,
112 Requires<[In64BitMode]>;
113 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
115 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
116 Requires<[In64BitMode]>;
119 // Interrupt Instructions
120 def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", []>,
121 Requires<[In64BitMode]>;
123 //===----------------------------------------------------------------------===//
124 // Call Instructions...
127 // All calls clobber the non-callee saved registers. RSP is marked as
128 // a use to prevent stack-pointer assignments that appear immediately
129 // before calls from potentially appearing dead. Uses for argument
130 // registers are added manually.
131 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
132 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
133 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
134 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
135 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
138 // NOTE: this pattern doesn't match "X86call imm", because we do not know
139 // that the offset between an arbitrary immediate and the call will fit in
140 // the 32-bit pcrel field that we have.
141 def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
142 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
143 "call{q}\t$dst", []>,
144 Requires<[In64BitMode, NotWin64]>;
145 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
146 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
147 Requires<[NotWin64]>;
148 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
149 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
150 Requires<[NotWin64]>;
152 def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
153 "lcall{q}\t{*}$dst", []>;
156 // FIXME: We need to teach codegen about single list of call-clobbered
158 let isCall = 1, isCodeGenOnly = 1 in
159 // All calls clobber the non-callee saved registers. RSP is marked as
160 // a use to prevent stack-pointer assignments that appear immediately
161 // before calls from potentially appearing dead. Uses for argument
162 // registers are added manually.
163 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
164 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
165 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
166 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
168 def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
169 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
172 def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
174 [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
175 def WINCALL64m : I<0xFF, MRM2m, (outs),
176 (ins i64mem:$dst, variable_ops), "call\t{*}$dst",
177 [(X86call (loadi64 addr:$dst))]>,
182 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
184 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
185 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
186 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
187 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
188 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
190 def TCRETURNdi64 : I<0, Pseudo, (outs),
191 (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
192 "#TC_RETURN $dst $offset", []>;
193 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
195 "#TC_RETURN $dst $offset", []>;
197 def TCRETURNmi64 : I<0, Pseudo, (outs),
198 (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
199 "#TC_RETURN $dst $offset", []>;
201 def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
202 (ins i64i32imm_pcrel:$dst, variable_ops),
203 "jmp\t$dst # TAILCALL", []>;
204 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
205 "jmp{q}\t{*}$dst # TAILCALL", []>;
208 def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
209 "jmp{q}\t{*}$dst # TAILCALL", []>;
213 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
214 def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
216 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
217 [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
218 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
219 [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
220 def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
221 "ljmp{q}\t{*}$dst", []>;
224 //===----------------------------------------------------------------------===//
225 // EH Pseudo Instructions
227 let isTerminator = 1, isReturn = 1, isBarrier = 1,
228 hasCtrlDep = 1, isCodeGenOnly = 1 in {
229 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
230 "ret\t#eh_return, addr: $addr",
231 [(X86ehret GR64:$addr)]>;
235 //===----------------------------------------------------------------------===//
236 // Miscellaneous Instructions...
239 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
240 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
242 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
243 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
245 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
246 def LEAVE64 : I<0xC9, RawFrm,
247 (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
248 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
250 def POP64r : I<0x58, AddRegFrm,
251 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
252 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
253 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
255 let mayStore = 1 in {
256 def PUSH64r : I<0x50, AddRegFrm,
257 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
258 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
259 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
263 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
264 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
265 "push{q}\t$imm", []>;
266 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
267 "push{q}\t$imm", []>;
268 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
269 "push{q}\t$imm", []>;
272 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
273 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
274 Requires<[In64BitMode]>;
275 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
276 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
277 Requires<[In64BitMode]>;
279 def LEA64_32r : I<0x8D, MRMSrcMem,
280 (outs GR32:$dst), (ins lea64_32mem:$src),
281 "lea{l}\t{$src|$dst}, {$dst|$src}",
282 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
284 let isReMaterializable = 1 in
285 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
286 "lea{q}\t{$src|$dst}, {$dst|$src}",
287 [(set GR64:$dst, lea64addr:$src)]>;
289 let Constraints = "$src = $dst" in
290 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
292 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
294 // Bit scan instructions.
295 let Defs = [EFLAGS] in {
296 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
297 "bsf{q}\t{$src, $dst|$dst, $src}",
298 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
299 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
300 "bsf{q}\t{$src, $dst|$dst, $src}",
301 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
303 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
304 "bsr{q}\t{$src, $dst|$dst, $src}",
305 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
306 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
307 "bsr{q}\t{$src, $dst|$dst, $src}",
308 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
312 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
313 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
314 [(X86rep_movs i64)]>, REP;
315 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
316 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
317 [(X86rep_stos i64)]>, REP;
319 let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in
320 def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
322 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
323 def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
325 def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
327 def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
329 // Fast system-call instructions
330 def SYSEXIT64 : RI<0x35, RawFrm,
331 (outs), (ins), "sysexit", []>, TB, Requires<[In64BitMode]>;
333 //===----------------------------------------------------------------------===//
334 // Move Instructions...
337 let neverHasSideEffects = 1 in
338 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
339 "mov{q}\t{$src, $dst|$dst, $src}", []>;
341 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
342 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
343 "movabs{q}\t{$src, $dst|$dst, $src}",
344 [(set GR64:$dst, imm:$src)]>;
345 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
346 "mov{q}\t{$src, $dst|$dst, $src}",
347 [(set GR64:$dst, i64immSExt32:$src)]>;
350 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
352 let isAsmParserOnly = 1 in {
353 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
354 "mov{q}\t{$src, $dst|$dst, $src}", []>;
357 let isCodeGenOnly = 1 in {
358 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
359 "mov{q}\t{$src, $dst|$dst, $src}", []>;
362 let canFoldAsLoad = 1, isReMaterializable = 1 in
363 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
364 "mov{q}\t{$src, $dst|$dst, $src}",
365 [(set GR64:$dst, (load addr:$src))]>;
367 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
368 "mov{q}\t{$src, $dst|$dst, $src}",
369 [(store GR64:$src, addr:$dst)]>;
370 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
371 "mov{q}\t{$src, $dst|$dst, $src}",
372 [(store i64immSExt32:$src, addr:$dst)]>;
374 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
375 let isCodeGenOnly = 1 in {
376 let neverHasSideEffects = 1 in
377 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
378 "mov{q}\t{$src, $dst|$dst, $src}", []>;
381 canFoldAsLoad = 1, isReMaterializable = 1 in
382 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
383 "mov{q}\t{$src, $dst|$dst, $src}",
387 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
388 "mov{q}\t{$src, $dst|$dst, $src}",
392 // FIXME: These definitions are utterly broken
393 // Just leave them commented out for now because they're useless outside
394 // of the large code model, and most compilers won't generate the instructions
397 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
398 "mov{q}\t{$src, %rax|%rax, $src}", []>;
399 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
400 "mov{q}\t{$src, %rax|%rax, $src}", []>;
401 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
402 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
403 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
404 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
407 // Moves to and from segment registers
408 def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
409 "mov{q}\t{$src, $dst|$dst, $src}", []>;
410 def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
411 "mov{q}\t{$src, $dst|$dst, $src}", []>;
412 def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
413 "mov{q}\t{$src, $dst|$dst, $src}", []>;
414 def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
415 "mov{q}\t{$src, $dst|$dst, $src}", []>;
417 // Moves to and from debug registers
418 def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
419 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
420 def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
421 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
423 // Moves to and from control registers
424 def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
425 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
426 def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
427 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
429 // Sign/Zero extenders
431 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
432 // operand, which makes it a rare instruction with an 8-bit register
433 // operand that can never access an h register. If support for h registers
434 // were generalized, this would require a special register class.
435 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
436 "movs{bq|x}\t{$src, $dst|$dst, $src}",
437 [(set GR64:$dst, (sext GR8:$src))]>, TB;
438 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
439 "movs{bq|x}\t{$src, $dst|$dst, $src}",
440 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
441 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
442 "movs{wq|x}\t{$src, $dst|$dst, $src}",
443 [(set GR64:$dst, (sext GR16:$src))]>, TB;
444 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
445 "movs{wq|x}\t{$src, $dst|$dst, $src}",
446 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
447 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
448 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
449 [(set GR64:$dst, (sext GR32:$src))]>;
450 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
451 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
452 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
454 // movzbq and movzwq encodings for the disassembler
455 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
456 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
457 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
458 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
459 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
460 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
461 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
462 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
464 // Use movzbl instead of movzbq when the destination is a register; it's
465 // equivalent due to implicit zero-extending, and it has a smaller encoding.
466 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
467 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
468 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
469 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
470 // Use movzwl instead of movzwq when the destination is a register; it's
471 // equivalent due to implicit zero-extending, and it has a smaller encoding.
472 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
473 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
474 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
475 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
477 // There's no movzlq instruction, but movl can be used for this purpose, using
478 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
479 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
480 // zero-extension, however this isn't possible when the 32-bit value is
481 // defined by a truncate or is copied from something where the high bits aren't
482 // necessarily all zero. In such cases, we fall back to these explicit zext
484 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
485 "", [(set GR64:$dst, (zext GR32:$src))]>;
486 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
487 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
489 // Any instruction that defines a 32-bit result leaves the high half of the
490 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
491 // be copying from a truncate. And x86's cmov doesn't do anything if the
492 // condition is false. But any other 32-bit operation will zero-extend
494 def def32 : PatLeaf<(i32 GR32:$src), [{
495 return N->getOpcode() != ISD::TRUNCATE &&
496 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
497 N->getOpcode() != ISD::CopyFromReg &&
498 N->getOpcode() != X86ISD::CMOV;
501 // In the case of a 32-bit def that is known to implicitly zero-extend,
502 // we can use a SUBREG_TO_REG.
503 def : Pat<(i64 (zext def32:$src)),
504 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
506 let neverHasSideEffects = 1 in {
507 let Defs = [RAX], Uses = [EAX] in
508 def CDQE : RI<0x98, RawFrm, (outs), (ins),
509 "{cltq|cdqe}", []>; // RAX = signext(EAX)
511 let Defs = [RAX,RDX], Uses = [RAX] in
512 def CQO : RI<0x99, RawFrm, (outs), (ins),
513 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
516 //===----------------------------------------------------------------------===//
517 // Arithmetic Instructions...
520 let Defs = [EFLAGS] in {
522 def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
523 "add{q}\t{$src, %rax|%rax, $src}", []>;
525 let Constraints = "$src1 = $dst" in {
526 let isConvertibleToThreeAddress = 1 in {
527 let isCommutable = 1 in
528 // Register-Register Addition
529 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
530 (ins GR64:$src1, GR64:$src2),
531 "add{q}\t{$src2, $dst|$dst, $src2}",
532 [(set GR64:$dst, EFLAGS,
533 (X86add_flag GR64:$src1, GR64:$src2))]>;
535 // These are alternate spellings for use by the disassembler, we mark them as
536 // code gen only to ensure they aren't matched by the assembler.
537 let isCodeGenOnly = 1 in {
538 def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
539 (ins GR64:$src1, GR64:$src2),
540 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
543 // Register-Integer Addition
544 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
545 (ins GR64:$src1, i64i8imm:$src2),
546 "add{q}\t{$src2, $dst|$dst, $src2}",
547 [(set GR64:$dst, EFLAGS,
548 (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
549 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
550 (ins GR64:$src1, i64i32imm:$src2),
551 "add{q}\t{$src2, $dst|$dst, $src2}",
552 [(set GR64:$dst, EFLAGS,
553 (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
554 } // isConvertibleToThreeAddress
556 // Register-Memory Addition
557 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
558 (ins GR64:$src1, i64mem:$src2),
559 "add{q}\t{$src2, $dst|$dst, $src2}",
560 [(set GR64:$dst, EFLAGS,
561 (X86add_flag GR64:$src1, (load addr:$src2)))]>;
563 } // Constraints = "$src1 = $dst"
565 // Memory-Register Addition
566 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
567 "add{q}\t{$src2, $dst|$dst, $src2}",
568 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
570 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
571 "add{q}\t{$src2, $dst|$dst, $src2}",
572 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
574 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
575 "add{q}\t{$src2, $dst|$dst, $src2}",
576 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
579 let Uses = [EFLAGS] in {
581 def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
582 "adc{q}\t{$src, %rax|%rax, $src}", []>;
584 let Constraints = "$src1 = $dst" in {
585 let isCommutable = 1 in
586 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
587 (ins GR64:$src1, GR64:$src2),
588 "adc{q}\t{$src2, $dst|$dst, $src2}",
589 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
591 let isCodeGenOnly = 1 in {
592 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
593 (ins GR64:$src1, GR64:$src2),
594 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
597 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
598 (ins GR64:$src1, i64mem:$src2),
599 "adc{q}\t{$src2, $dst|$dst, $src2}",
600 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
602 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
603 (ins GR64:$src1, i64i8imm:$src2),
604 "adc{q}\t{$src2, $dst|$dst, $src2}",
605 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
606 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
607 (ins GR64:$src1, i64i32imm:$src2),
608 "adc{q}\t{$src2, $dst|$dst, $src2}",
609 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
610 } // Constraints = "$src1 = $dst"
612 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
613 "adc{q}\t{$src2, $dst|$dst, $src2}",
614 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
615 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
616 "adc{q}\t{$src2, $dst|$dst, $src2}",
617 [(store (adde (load addr:$dst), i64immSExt8:$src2),
619 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
620 "adc{q}\t{$src2, $dst|$dst, $src2}",
621 [(store (adde (load addr:$dst), i64immSExt32:$src2),
625 let Constraints = "$src1 = $dst" in {
626 // Register-Register Subtraction
627 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
628 (ins GR64:$src1, GR64:$src2),
629 "sub{q}\t{$src2, $dst|$dst, $src2}",
630 [(set GR64:$dst, EFLAGS,
631 (X86sub_flag GR64:$src1, GR64:$src2))]>;
633 let isCodeGenOnly = 1 in {
634 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
635 (ins GR64:$src1, GR64:$src2),
636 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
639 // Register-Memory Subtraction
640 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
641 (ins GR64:$src1, i64mem:$src2),
642 "sub{q}\t{$src2, $dst|$dst, $src2}",
643 [(set GR64:$dst, EFLAGS,
644 (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
646 // Register-Integer Subtraction
647 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
648 (ins GR64:$src1, i64i8imm:$src2),
649 "sub{q}\t{$src2, $dst|$dst, $src2}",
650 [(set GR64:$dst, EFLAGS,
651 (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
652 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
653 (ins GR64:$src1, i64i32imm:$src2),
654 "sub{q}\t{$src2, $dst|$dst, $src2}",
655 [(set GR64:$dst, EFLAGS,
656 (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
657 } // Constraints = "$src1 = $dst"
659 def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
660 "sub{q}\t{$src, %rax|%rax, $src}", []>;
662 // Memory-Register Subtraction
663 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
664 "sub{q}\t{$src2, $dst|$dst, $src2}",
665 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
668 // Memory-Integer Subtraction
669 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
670 "sub{q}\t{$src2, $dst|$dst, $src2}",
671 [(store (sub (load addr:$dst), i64immSExt8:$src2),
674 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
675 "sub{q}\t{$src2, $dst|$dst, $src2}",
676 [(store (sub (load addr:$dst), i64immSExt32:$src2),
680 let Uses = [EFLAGS] in {
681 let Constraints = "$src1 = $dst" in {
682 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
683 (ins GR64:$src1, GR64:$src2),
684 "sbb{q}\t{$src2, $dst|$dst, $src2}",
685 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
687 let isCodeGenOnly = 1 in {
688 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
689 (ins GR64:$src1, GR64:$src2),
690 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
693 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
694 (ins GR64:$src1, i64mem:$src2),
695 "sbb{q}\t{$src2, $dst|$dst, $src2}",
696 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
698 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
699 (ins GR64:$src1, i64i8imm:$src2),
700 "sbb{q}\t{$src2, $dst|$dst, $src2}",
701 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
702 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
703 (ins GR64:$src1, i64i32imm:$src2),
704 "sbb{q}\t{$src2, $dst|$dst, $src2}",
705 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
706 } // Constraints = "$src1 = $dst"
708 def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
709 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
711 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
712 "sbb{q}\t{$src2, $dst|$dst, $src2}",
713 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
714 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
715 "sbb{q}\t{$src2, $dst|$dst, $src2}",
716 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
717 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
718 "sbb{q}\t{$src2, $dst|$dst, $src2}",
719 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
723 // Unsigned multiplication
724 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
725 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
726 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
728 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
729 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
731 // Signed multiplication
732 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
733 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
735 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
736 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
739 let Defs = [EFLAGS] in {
740 let Constraints = "$src1 = $dst" in {
741 let isCommutable = 1 in
742 // Register-Register Signed Integer Multiplication
743 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
744 (ins GR64:$src1, GR64:$src2),
745 "imul{q}\t{$src2, $dst|$dst, $src2}",
746 [(set GR64:$dst, EFLAGS,
747 (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
749 // Register-Memory Signed Integer Multiplication
750 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
751 (ins GR64:$src1, i64mem:$src2),
752 "imul{q}\t{$src2, $dst|$dst, $src2}",
753 [(set GR64:$dst, EFLAGS,
754 (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
755 } // Constraints = "$src1 = $dst"
757 // Suprisingly enough, these are not two address instructions!
759 // Register-Integer Signed Integer Multiplication
760 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
761 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
762 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
763 [(set GR64:$dst, EFLAGS,
764 (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
765 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
766 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
767 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
768 [(set GR64:$dst, EFLAGS,
769 (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
771 // Memory-Integer Signed Integer Multiplication
772 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
773 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
774 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
775 [(set GR64:$dst, EFLAGS,
776 (X86smul_flag (load addr:$src1),
777 i64immSExt8:$src2))]>;
778 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
779 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
780 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
781 [(set GR64:$dst, EFLAGS,
782 (X86smul_flag (load addr:$src1),
783 i64immSExt32:$src2))]>;
786 // Unsigned division / remainder
787 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
788 // RDX:RAX/r64 = RAX,RDX
789 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
791 // Signed division / remainder
792 // RDX:RAX/r64 = RAX,RDX
793 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
794 "idiv{q}\t$src", []>;
796 // RDX:RAX/[mem64] = RAX,RDX
797 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
799 // RDX:RAX/[mem64] = RAX,RDX
800 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
801 "idiv{q}\t$src", []>;
805 // Unary instructions
806 let Defs = [EFLAGS], CodeSize = 2 in {
807 let Constraints = "$src = $dst" in
808 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
809 [(set GR64:$dst, (ineg GR64:$src)),
811 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
812 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
815 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
816 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
817 [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
818 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
819 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
822 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
823 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
824 [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
825 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
826 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
829 // In 64-bit mode, single byte INC and DEC cannot be encoded.
830 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
831 // Can transform into LEA.
832 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
834 [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
835 OpSize, Requires<[In64BitMode]>;
836 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
838 [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
839 Requires<[In64BitMode]>;
840 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
842 [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
843 OpSize, Requires<[In64BitMode]>;
844 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
846 [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
847 Requires<[In64BitMode]>;
848 } // Constraints = "$src = $dst", isConvertibleToThreeAddress
850 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
851 // how to unfold them.
852 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
853 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
855 OpSize, Requires<[In64BitMode]>;
856 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
857 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
859 Requires<[In64BitMode]>;
860 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
861 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
863 OpSize, Requires<[In64BitMode]>;
864 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
865 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
867 Requires<[In64BitMode]>;
868 } // Defs = [EFLAGS], CodeSize
871 let Defs = [EFLAGS] in {
872 // Shift instructions
873 let Constraints = "$src1 = $dst" in {
875 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
876 "shl{q}\t{%cl, $dst|$dst, %CL}",
877 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
878 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
879 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
880 (ins GR64:$src1, i8imm:$src2),
881 "shl{q}\t{$src2, $dst|$dst, $src2}",
882 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
883 // NOTE: We don't include patterns for shifts of a register by one, because
884 // 'add reg,reg' is cheaper.
885 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
887 } // Constraints = "$src1 = $dst"
890 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
891 "shl{q}\t{%cl, $dst|$dst, %CL}",
892 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
893 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
894 "shl{q}\t{$src, $dst|$dst, $src}",
895 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
896 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
898 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
900 let Constraints = "$src1 = $dst" in {
902 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
903 "shr{q}\t{%cl, $dst|$dst, %CL}",
904 [(set GR64:$dst, (srl GR64:$src1, CL))]>;
905 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
906 "shr{q}\t{$src2, $dst|$dst, $src2}",
907 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
908 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
910 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
911 } // Constraints = "$src1 = $dst"
914 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
915 "shr{q}\t{%cl, $dst|$dst, %CL}",
916 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
917 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
918 "shr{q}\t{$src, $dst|$dst, $src}",
919 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
920 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
922 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
924 let Constraints = "$src1 = $dst" in {
926 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
927 "sar{q}\t{%cl, $dst|$dst, %CL}",
928 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
929 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
930 (ins GR64:$src1, i8imm:$src2),
931 "sar{q}\t{$src2, $dst|$dst, $src2}",
932 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
933 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
935 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
936 } // Constraints = "$src = $dst"
939 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
940 "sar{q}\t{%cl, $dst|$dst, %CL}",
941 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
942 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
943 "sar{q}\t{$src, $dst|$dst, $src}",
944 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
945 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
947 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
949 // Rotate instructions
951 let Constraints = "$src = $dst" in {
952 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
953 "rcl{q}\t{1, $dst|$dst, 1}", []>;
954 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
955 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
957 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
958 "rcr{q}\t{1, $dst|$dst, 1}", []>;
959 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
960 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
963 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
964 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
965 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
966 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
968 } // Constraints = "$src = $dst"
970 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
971 "rcl{q}\t{1, $dst|$dst, 1}", []>;
972 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
973 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
974 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
975 "rcr{q}\t{1, $dst|$dst, 1}", []>;
976 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
977 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
980 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
981 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
982 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
983 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
986 let Constraints = "$src1 = $dst" in {
988 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
989 "rol{q}\t{%cl, $dst|$dst, %CL}",
990 [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
991 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
992 (ins GR64:$src1, i8imm:$src2),
993 "rol{q}\t{$src2, $dst|$dst, $src2}",
994 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
995 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
997 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
998 } // Constraints = "$src1 = $dst"
1001 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
1002 "rol{q}\t{%cl, $dst|$dst, %CL}",
1003 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
1004 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
1005 "rol{q}\t{$src, $dst|$dst, $src}",
1006 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1007 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
1009 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1011 let Constraints = "$src1 = $dst" in {
1013 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1014 "ror{q}\t{%cl, $dst|$dst, %CL}",
1015 [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
1016 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
1017 (ins GR64:$src1, i8imm:$src2),
1018 "ror{q}\t{$src2, $dst|$dst, $src2}",
1019 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
1020 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1022 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
1023 } // Constraints = "$src1 = $dst"
1026 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
1027 "ror{q}\t{%cl, $dst|$dst, %CL}",
1028 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
1029 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
1030 "ror{q}\t{$src, $dst|$dst, $src}",
1031 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1032 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
1034 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1036 // Double shift instructions (generalizations of rotate)
1037 let Constraints = "$src1 = $dst" in {
1038 let Uses = [CL] in {
1039 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
1040 (ins GR64:$src1, GR64:$src2),
1041 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1042 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
1044 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
1045 (ins GR64:$src1, GR64:$src2),
1046 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1047 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
1051 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
1052 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
1054 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1055 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1056 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
1059 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
1061 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1062 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1063 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
1067 } // Constraints = "$src1 = $dst"
1069 let Uses = [CL] in {
1070 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1071 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1072 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
1074 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1075 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1076 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
1079 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
1080 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1081 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1082 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
1083 (i8 imm:$src3)), addr:$dst)]>,
1085 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
1086 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1087 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1088 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
1089 (i8 imm:$src3)), addr:$dst)]>,
1091 } // Defs = [EFLAGS]
1093 //===----------------------------------------------------------------------===//
1094 // Logical Instructions...
1097 let Constraints = "$src = $dst" , AddedComplexity = 15 in
1098 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
1099 [(set GR64:$dst, (not GR64:$src))]>;
1100 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
1101 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
1103 let Defs = [EFLAGS] in {
1104 def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
1105 "and{q}\t{$src, %rax|%rax, $src}", []>;
1107 let Constraints = "$src1 = $dst" in {
1108 let isCommutable = 1 in
1109 def AND64rr : RI<0x21, MRMDestReg,
1110 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1111 "and{q}\t{$src2, $dst|$dst, $src2}",
1112 [(set GR64:$dst, EFLAGS,
1113 (X86and_flag GR64:$src1, GR64:$src2))]>;
1114 let isCodeGenOnly = 1 in {
1115 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
1116 (ins GR64:$src1, GR64:$src2),
1117 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
1119 def AND64rm : RI<0x23, MRMSrcMem,
1120 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1121 "and{q}\t{$src2, $dst|$dst, $src2}",
1122 [(set GR64:$dst, EFLAGS,
1123 (X86and_flag GR64:$src1, (load addr:$src2)))]>;
1124 def AND64ri8 : RIi8<0x83, MRM4r,
1125 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1126 "and{q}\t{$src2, $dst|$dst, $src2}",
1127 [(set GR64:$dst, EFLAGS,
1128 (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
1129 def AND64ri32 : RIi32<0x81, MRM4r,
1130 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1131 "and{q}\t{$src2, $dst|$dst, $src2}",
1132 [(set GR64:$dst, EFLAGS,
1133 (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
1134 } // Constraints = "$src1 = $dst"
1136 def AND64mr : RI<0x21, MRMDestMem,
1137 (outs), (ins i64mem:$dst, GR64:$src),
1138 "and{q}\t{$src, $dst|$dst, $src}",
1139 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
1140 (implicit EFLAGS)]>;
1141 def AND64mi8 : RIi8<0x83, MRM4m,
1142 (outs), (ins i64mem:$dst, i64i8imm :$src),
1143 "and{q}\t{$src, $dst|$dst, $src}",
1144 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
1145 (implicit EFLAGS)]>;
1146 def AND64mi32 : RIi32<0x81, MRM4m,
1147 (outs), (ins i64mem:$dst, i64i32imm:$src),
1148 "and{q}\t{$src, $dst|$dst, $src}",
1149 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1150 (implicit EFLAGS)]>;
1152 let Constraints = "$src1 = $dst" in {
1153 let isCommutable = 1 in
1154 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
1155 (ins GR64:$src1, GR64:$src2),
1156 "or{q}\t{$src2, $dst|$dst, $src2}",
1157 [(set GR64:$dst, EFLAGS,
1158 (X86or_flag GR64:$src1, GR64:$src2))]>;
1159 let isCodeGenOnly = 1 in {
1160 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
1161 (ins GR64:$src1, GR64:$src2),
1162 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
1164 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
1165 (ins GR64:$src1, i64mem:$src2),
1166 "or{q}\t{$src2, $dst|$dst, $src2}",
1167 [(set GR64:$dst, EFLAGS,
1168 (X86or_flag GR64:$src1, (load addr:$src2)))]>;
1169 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
1170 (ins GR64:$src1, i64i8imm:$src2),
1171 "or{q}\t{$src2, $dst|$dst, $src2}",
1172 [(set GR64:$dst, EFLAGS,
1173 (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
1174 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
1175 (ins GR64:$src1, i64i32imm:$src2),
1176 "or{q}\t{$src2, $dst|$dst, $src2}",
1177 [(set GR64:$dst, EFLAGS,
1178 (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
1179 } // Constraints = "$src1 = $dst"
1181 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1182 "or{q}\t{$src, $dst|$dst, $src}",
1183 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
1184 (implicit EFLAGS)]>;
1185 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
1186 "or{q}\t{$src, $dst|$dst, $src}",
1187 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
1188 (implicit EFLAGS)]>;
1189 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1190 "or{q}\t{$src, $dst|$dst, $src}",
1191 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1192 (implicit EFLAGS)]>;
1194 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
1195 "or{q}\t{$src, %rax|%rax, $src}", []>;
1197 let Constraints = "$src1 = $dst" in {
1198 let isCommutable = 1 in
1199 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
1200 (ins GR64:$src1, GR64:$src2),
1201 "xor{q}\t{$src2, $dst|$dst, $src2}",
1202 [(set GR64:$dst, EFLAGS,
1203 (X86xor_flag GR64:$src1, GR64:$src2))]>;
1204 let isCodeGenOnly = 1 in {
1205 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
1206 (ins GR64:$src1, GR64:$src2),
1207 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
1209 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
1210 (ins GR64:$src1, i64mem:$src2),
1211 "xor{q}\t{$src2, $dst|$dst, $src2}",
1212 [(set GR64:$dst, EFLAGS,
1213 (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
1214 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
1215 (ins GR64:$src1, i64i8imm:$src2),
1216 "xor{q}\t{$src2, $dst|$dst, $src2}",
1217 [(set GR64:$dst, EFLAGS,
1218 (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
1219 def XOR64ri32 : RIi32<0x81, MRM6r,
1220 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1221 "xor{q}\t{$src2, $dst|$dst, $src2}",
1222 [(set GR64:$dst, EFLAGS,
1223 (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
1224 } // Constraints = "$src1 = $dst"
1226 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1227 "xor{q}\t{$src, $dst|$dst, $src}",
1228 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
1229 (implicit EFLAGS)]>;
1230 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
1231 "xor{q}\t{$src, $dst|$dst, $src}",
1232 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
1233 (implicit EFLAGS)]>;
1234 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1235 "xor{q}\t{$src, $dst|$dst, $src}",
1236 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1237 (implicit EFLAGS)]>;
1239 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
1240 "xor{q}\t{$src, %rax|%rax, $src}", []>;
1242 } // Defs = [EFLAGS]
1244 //===----------------------------------------------------------------------===//
1245 // Comparison Instructions...
1248 // Integer comparison
1249 let Defs = [EFLAGS] in {
1250 def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
1251 "test{q}\t{$src, %rax|%rax, $src}", []>;
1252 let isCommutable = 1 in
1253 def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1254 "test{q}\t{$src2, $src1|$src1, $src2}",
1255 [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
1256 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1257 "test{q}\t{$src2, $src1|$src1, $src2}",
1258 [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
1260 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
1261 (ins GR64:$src1, i64i32imm:$src2),
1262 "test{q}\t{$src2, $src1|$src1, $src2}",
1263 [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
1265 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
1266 (ins i64mem:$src1, i64i32imm:$src2),
1267 "test{q}\t{$src2, $src1|$src1, $src2}",
1268 [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
1269 i64immSExt32:$src2), 0))]>;
1272 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
1273 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
1274 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1275 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1276 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
1278 // These are alternate spellings for use by the disassembler, we mark them as
1279 // code gen only to ensure they aren't matched by the assembler.
1280 let isCodeGenOnly = 1 in {
1281 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1282 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
1285 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1286 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1287 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
1288 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1289 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1290 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
1291 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1292 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1293 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
1294 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1295 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1296 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
1297 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1298 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1299 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1300 i64immSExt8:$src2))]>;
1301 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1302 (ins i64mem:$src1, i64i32imm:$src2),
1303 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1304 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1305 i64immSExt32:$src2))]>;
1306 } // Defs = [EFLAGS]
1309 // TODO: BTC, BTR, and BTS
1310 let Defs = [EFLAGS] in {
1311 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1312 "bt{q}\t{$src2, $src1|$src1, $src2}",
1313 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
1315 // Unlike with the register+register form, the memory+register form of the
1316 // bt instruction does not ignore the high bits of the index. From ISel's
1317 // perspective, this is pretty bizarre. Disable these instructions for now.
1318 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1319 "bt{q}\t{$src2, $src1|$src1, $src2}",
1320 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1321 // (implicit EFLAGS)]
1325 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1326 "bt{q}\t{$src2, $src1|$src1, $src2}",
1327 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
1328 // Note that these instructions don't need FastBTMem because that
1329 // only applies when the other operand is in a register. When it's
1330 // an immediate, bt is still fast.
1331 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1332 "bt{q}\t{$src2, $src1|$src1, $src2}",
1333 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1334 i64immSExt8:$src2))]>, TB;
1336 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1337 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1338 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1339 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1340 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1341 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1342 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1343 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1345 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1346 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1347 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1348 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1349 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1350 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1351 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1352 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1354 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1355 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1356 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1357 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1358 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1359 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1360 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1361 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1362 } // Defs = [EFLAGS]
1364 // Conditional moves
1365 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
1366 let isCommutable = 1 in {
1367 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1368 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1369 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1370 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1371 X86_COND_B, EFLAGS))]>, TB;
1372 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1373 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1374 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1375 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1376 X86_COND_AE, EFLAGS))]>, TB;
1377 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1378 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1379 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1380 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1381 X86_COND_E, EFLAGS))]>, TB;
1382 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1383 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1384 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1385 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1386 X86_COND_NE, EFLAGS))]>, TB;
1387 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1388 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1389 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1390 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1391 X86_COND_BE, EFLAGS))]>, TB;
1392 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1393 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1394 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1395 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1396 X86_COND_A, EFLAGS))]>, TB;
1397 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1398 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1399 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1400 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1401 X86_COND_L, EFLAGS))]>, TB;
1402 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1403 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1404 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1405 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1406 X86_COND_GE, EFLAGS))]>, TB;
1407 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1408 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1409 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1410 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1411 X86_COND_LE, EFLAGS))]>, TB;
1412 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1413 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1414 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1415 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1416 X86_COND_G, EFLAGS))]>, TB;
1417 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1418 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1419 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1420 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1421 X86_COND_S, EFLAGS))]>, TB;
1422 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1423 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1424 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1425 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1426 X86_COND_NS, EFLAGS))]>, TB;
1427 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1428 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1429 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1430 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1431 X86_COND_P, EFLAGS))]>, TB;
1432 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1433 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1434 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1435 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1436 X86_COND_NP, EFLAGS))]>, TB;
1437 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1438 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1439 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1440 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1441 X86_COND_O, EFLAGS))]>, TB;
1442 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1443 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1444 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1445 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1446 X86_COND_NO, EFLAGS))]>, TB;
1447 } // isCommutable = 1
1449 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1450 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1451 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1452 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1453 X86_COND_B, EFLAGS))]>, TB;
1454 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1455 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1456 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1457 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1458 X86_COND_AE, EFLAGS))]>, TB;
1459 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1460 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1461 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1462 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1463 X86_COND_E, EFLAGS))]>, TB;
1464 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1465 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1466 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1467 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1468 X86_COND_NE, EFLAGS))]>, TB;
1469 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1470 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1471 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1472 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1473 X86_COND_BE, EFLAGS))]>, TB;
1474 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1475 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1476 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1477 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1478 X86_COND_A, EFLAGS))]>, TB;
1479 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1480 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1481 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1482 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1483 X86_COND_L, EFLAGS))]>, TB;
1484 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1485 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1486 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1487 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1488 X86_COND_GE, EFLAGS))]>, TB;
1489 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1490 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1491 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1492 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1493 X86_COND_LE, EFLAGS))]>, TB;
1494 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1495 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1496 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1497 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1498 X86_COND_G, EFLAGS))]>, TB;
1499 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1500 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1501 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1502 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1503 X86_COND_S, EFLAGS))]>, TB;
1504 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1505 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1506 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1507 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1508 X86_COND_NS, EFLAGS))]>, TB;
1509 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1510 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1511 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1512 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1513 X86_COND_P, EFLAGS))]>, TB;
1514 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1515 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1516 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1517 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1518 X86_COND_NP, EFLAGS))]>, TB;
1519 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1520 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1521 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1522 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1523 X86_COND_O, EFLAGS))]>, TB;
1524 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1525 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1526 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1527 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1528 X86_COND_NO, EFLAGS))]>, TB;
1529 } // Constraints = "$src1 = $dst"
1531 // Use sbb to materialize carry flag into a GPR.
1532 // FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
1533 // However, Pat<> can't replicate the destination reg into the inputs of the
1535 // FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
1537 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
1538 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
1539 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
1541 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1544 //===----------------------------------------------------------------------===//
1545 // Descriptor-table support instructions
1547 // LLDT is not interpreted specially in 64-bit mode because there is no sign
1549 def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
1550 "sldt{q}\t$dst", []>, TB;
1551 def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
1552 "sldt{q}\t$dst", []>, TB;
1554 //===----------------------------------------------------------------------===//
1555 // Alias Instructions
1556 //===----------------------------------------------------------------------===//
1558 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
1559 // smaller encoding, but doing so at isel time interferes with rematerialization
1560 // in the current register allocator. For now, this is rewritten when the
1561 // instruction is lowered to an MCInst.
1562 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
1563 // when we have a better way to specify isel priority.
1564 let Defs = [EFLAGS],
1565 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1566 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
1567 [(set GR64:$dst, 0)]>;
1569 // Materialize i64 constant where top 32-bits are zero. This could theoretically
1570 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
1571 // that would make it more difficult to rematerialize.
1572 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1573 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1574 "", [(set GR64:$dst, i64immZExt32:$src)]>;
1576 //===----------------------------------------------------------------------===//
1577 // Thread Local Storage Instructions
1578 //===----------------------------------------------------------------------===//
1581 // All calls clobber the non-callee saved registers. RSP is marked as
1582 // a use to prevent stack-pointer assignments that appear immediately
1583 // before calls from potentially appearing dead.
1584 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
1585 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
1586 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
1587 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
1588 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
1590 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
1592 "leaq\t$sym(%rip), %rdi; "
1595 "call\t__tls_get_addr@PLT",
1596 [(X86tlsaddr tls64addr:$sym)]>,
1597 Requires<[In64BitMode]>;
1599 // Darwin TLS Support
1600 // For x86_64, the address of the thunk is passed in %rdi, on return
1601 // the address of the variable is in %rax. All other registers are preserved.
1604 usesCustomInserter = 1 in
1605 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
1607 [(X86TLSCall addr:$sym)]>,
1608 Requires<[In64BitMode]>;
1610 let AddedComplexity = 5, isCodeGenOnly = 1 in
1611 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1612 "movq\t%gs:$src, $dst",
1613 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1615 let AddedComplexity = 5, isCodeGenOnly = 1 in
1616 def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1617 "movq\t%fs:$src, $dst",
1618 [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
1620 //===----------------------------------------------------------------------===//
1621 // Atomic Instructions
1622 //===----------------------------------------------------------------------===//
1624 // TODO: Get this to fold the constant into the instruction.
1625 let hasSideEffects = 1, Defs = [ESP] in
1626 def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
1628 "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
1629 [(X86MemBarrierNoSSE GR64:$zero)]>,
1630 Requires<[In64BitMode]>, LOCK;
1632 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1633 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1635 "cmpxchgq\t$swap,$ptr",
1636 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1639 let Constraints = "$val = $dst" in {
1640 let Defs = [EFLAGS] in
1641 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
1644 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1647 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),
1648 (ins GR64:$val,i64mem:$ptr),
1649 "xchg{q}\t{$val, $ptr|$ptr, $val}",
1650 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1652 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1653 "xchg{q}\t{$val, $src|$src, $val}", []>;
1656 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1657 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1658 let mayLoad = 1, mayStore = 1 in
1659 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1660 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1662 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1663 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1664 let mayLoad = 1, mayStore = 1 in
1665 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1666 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1668 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1669 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1670 "cmpxchg16b\t$dst", []>, TB;
1672 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1673 "xchg{q}\t{$src, %rax|%rax, $src}", []>;
1675 // Optimized codegen when the non-memory output is not used.
1676 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
1677 // FIXME: Use normal add / sub instructions and add lock prefix dynamically.
1678 def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1680 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1681 def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
1682 (ins i64mem:$dst, i64i8imm :$src2),
1684 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1685 def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
1686 (ins i64mem:$dst, i64i32imm :$src2),
1688 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1689 def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1691 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1692 def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
1693 (ins i64mem:$dst, i64i8imm :$src2),
1695 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1696 def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
1697 (ins i64mem:$dst, i64i32imm:$src2),
1699 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1700 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
1702 "inc{q}\t$dst", []>, LOCK;
1703 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
1705 "dec{q}\t$dst", []>, LOCK;
1707 // Atomic exchange, and, or, xor
1708 let Constraints = "$val = $dst", Defs = [EFLAGS],
1709 usesCustomInserter = 1 in {
1710 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1711 "#ATOMAND64 PSEUDO!",
1712 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1713 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1714 "#ATOMOR64 PSEUDO!",
1715 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1716 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1717 "#ATOMXOR64 PSEUDO!",
1718 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1719 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1720 "#ATOMNAND64 PSEUDO!",
1721 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1722 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1723 "#ATOMMIN64 PSEUDO!",
1724 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1725 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1726 "#ATOMMAX64 PSEUDO!",
1727 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1728 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1729 "#ATOMUMIN64 PSEUDO!",
1730 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1731 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1732 "#ATOMUMAX64 PSEUDO!",
1733 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1736 // Segmentation support instructions
1738 // i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
1739 def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
1740 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1741 def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
1742 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1744 def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1745 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1746 def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1747 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1749 def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
1751 def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
1752 "push{q}\t%fs", []>, TB;
1753 def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
1754 "push{q}\t%gs", []>, TB;
1756 def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
1757 "pop{q}\t%fs", []>, TB;
1758 def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
1759 "pop{q}\t%gs", []>, TB;
1761 def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1762 "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
1763 def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1764 "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1765 def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1766 "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1768 // Specialized register support
1770 // no m form encodable; use SMSW16m
1771 def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
1772 "smsw{q}\t$dst", []>, TB;
1774 // String manipulation instructions
1776 def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
1778 //===----------------------------------------------------------------------===//
1779 // Non-Instruction Patterns
1780 //===----------------------------------------------------------------------===//
1782 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
1783 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
1784 // 'movabs' predicate should handle this sort of thing.
1785 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1786 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
1787 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1788 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
1789 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1790 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
1791 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1792 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
1793 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1794 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
1796 // In static codegen with small code model, we can get the address of a label
1797 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
1798 // the MOV64ri64i32 should accept these.
1799 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1800 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
1801 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1802 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
1803 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1804 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
1805 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1806 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
1807 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1808 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
1810 // In kernel code model, we can get the address of a label
1811 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1812 // the MOV64ri32 should accept these.
1813 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1814 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1815 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1816 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1817 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1818 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1819 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1820 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1821 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1822 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1824 // If we have small model and -static mode, it is safe to store global addresses
1825 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1826 // for MOV64mi32 should handle this sort of thing.
1827 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1828 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1829 Requires<[NearData, IsStatic]>;
1830 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1831 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1832 Requires<[NearData, IsStatic]>;
1833 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1834 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1835 Requires<[NearData, IsStatic]>;
1836 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1837 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1838 Requires<[NearData, IsStatic]>;
1839 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1840 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1841 Requires<[NearData, IsStatic]>;
1844 // Direct PC relative function call for small code model. 32-bit displacement
1845 // sign extended to 64-bit.
1846 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1847 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
1848 def : Pat<(X86call (i64 texternalsym:$dst)),
1849 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
1851 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1852 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
1853 def : Pat<(X86call (i64 texternalsym:$dst)),
1854 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
1857 def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
1858 (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
1859 Requires<[In64BitMode]>;
1861 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1862 (TCRETURNmi64 addr:$dst, imm:$off)>,
1863 Requires<[In64BitMode]>;
1865 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1866 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1867 Requires<[In64BitMode]>;
1869 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1870 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1871 Requires<[In64BitMode]>;
1873 // tls has some funny stuff here...
1874 // This corresponds to movabs $foo@tpoff, %rax
1875 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1876 (MOV64ri tglobaltlsaddr :$dst)>;
1877 // This corresponds to add $foo@tpoff, %rax
1878 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1879 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1880 // This corresponds to mov foo@tpoff(%rbx), %eax
1881 def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
1882 (MOV64rm tglobaltlsaddr :$dst)>;
1886 // TEST R,R is smaller than CMP R,0
1887 def : Pat<(X86cmp GR64:$src1, 0),
1888 (TEST64rr GR64:$src1, GR64:$src1)>;
1890 // Conditional moves with folded loads with operands swapped and conditions
1892 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1893 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1894 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1895 (CMOVB64rm GR64:$src2, addr:$src1)>;
1896 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1897 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1898 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1899 (CMOVE64rm GR64:$src2, addr:$src1)>;
1900 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
1901 (CMOVA64rm GR64:$src2, addr:$src1)>;
1902 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
1903 (CMOVBE64rm GR64:$src2, addr:$src1)>;
1904 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
1905 (CMOVGE64rm GR64:$src2, addr:$src1)>;
1906 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
1907 (CMOVL64rm GR64:$src2, addr:$src1)>;
1908 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
1909 (CMOVG64rm GR64:$src2, addr:$src1)>;
1910 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
1911 (CMOVLE64rm GR64:$src2, addr:$src1)>;
1912 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
1913 (CMOVNP64rm GR64:$src2, addr:$src1)>;
1914 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
1915 (CMOVP64rm GR64:$src2, addr:$src1)>;
1916 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
1917 (CMOVNS64rm GR64:$src2, addr:$src1)>;
1918 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
1919 (CMOVS64rm GR64:$src2, addr:$src1)>;
1920 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
1921 (CMOVNO64rm GR64:$src2, addr:$src1)>;
1922 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
1923 (CMOVO64rm GR64:$src2, addr:$src1)>;
1925 // zextload bool -> zextload byte
1926 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1929 // When extloading from 16-bit and smaller memory locations into 64-bit
1930 // registers, use zero-extending loads so that the entire 64-bit register is
1931 // defined, avoiding partial-register updates.
1932 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1933 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1934 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1935 // For other extloads, use subregs, since the high contents of the register are
1936 // defined after an extload.
1937 def : Pat<(extloadi64i32 addr:$src),
1938 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1941 // anyext. Define these to do an explicit zero-extend to
1942 // avoid partial-register updates.
1943 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1944 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1945 def : Pat<(i64 (anyext GR32:$src)),
1946 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1948 //===----------------------------------------------------------------------===//
1950 //===----------------------------------------------------------------------===//
1952 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1953 // +128 doesn't, so in this special case use a sub instead of an add.
1954 def : Pat<(add GR64:$src1, 128),
1955 (SUB64ri8 GR64:$src1, -128)>;
1956 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1957 (SUB64mi8 addr:$dst, -128)>;
1959 // The same trick applies for 32-bit immediate fields in 64-bit
1961 def : Pat<(add GR64:$src1, 0x0000000080000000),
1962 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1963 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1964 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1966 // Use a 32-bit and with implicit zero-extension instead of a 64-bit and if it
1967 // has an immediate with at least 32 bits of leading zeros, to avoid needing to
1968 // materialize that immediate in a register first.
1969 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1973 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1974 (i32 (GetLo32XForm imm:$imm))),
1977 // r & (2^32-1) ==> movz
1978 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1979 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1980 // r & (2^16-1) ==> movz
1981 def : Pat<(and GR64:$src, 0xffff),
1982 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1983 // r & (2^8-1) ==> movz
1984 def : Pat<(and GR64:$src, 0xff),
1985 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1986 // r & (2^8-1) ==> movz
1987 def : Pat<(and GR32:$src1, 0xff),
1988 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1989 Requires<[In64BitMode]>;
1990 // r & (2^8-1) ==> movz
1991 def : Pat<(and GR16:$src1, 0xff),
1992 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
1993 Requires<[In64BitMode]>;
1995 // sext_inreg patterns
1996 def : Pat<(sext_inreg GR64:$src, i32),
1997 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1998 def : Pat<(sext_inreg GR64:$src, i16),
1999 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
2000 def : Pat<(sext_inreg GR64:$src, i8),
2001 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
2002 def : Pat<(sext_inreg GR32:$src, i8),
2003 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
2004 Requires<[In64BitMode]>;
2005 def : Pat<(sext_inreg GR16:$src, i8),
2006 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
2007 Requires<[In64BitMode]>;
2010 def : Pat<(i32 (trunc GR64:$src)),
2011 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
2012 def : Pat<(i16 (trunc GR64:$src)),
2013 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
2014 def : Pat<(i8 (trunc GR64:$src)),
2015 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
2016 def : Pat<(i8 (trunc GR32:$src)),
2017 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
2018 Requires<[In64BitMode]>;
2019 def : Pat<(i8 (trunc GR16:$src)),
2020 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
2021 Requires<[In64BitMode]>;
2023 // h-register tricks.
2024 // For now, be conservative on x86-64 and use an h-register extract only if the
2025 // value is immediately zero-extended or stored, which are somewhat common
2026 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
2027 // from being allocated in the same instruction as the h register, as there's
2028 // currently no way to describe this requirement to the register allocator.
2030 // h-register extract and zero-extend.
2031 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
2035 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2038 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
2040 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2042 Requires<[In64BitMode]>;
2043 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
2044 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
2047 Requires<[In64BitMode]>;
2048 def : Pat<(srl GR16:$src, (i8 8)),
2051 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2054 Requires<[In64BitMode]>;
2055 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
2057 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2059 Requires<[In64BitMode]>;
2060 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
2062 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2064 Requires<[In64BitMode]>;
2065 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
2069 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2072 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
2076 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2080 // h-register extract and store.
2081 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
2084 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2086 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
2089 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2091 Requires<[In64BitMode]>;
2092 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
2095 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2097 Requires<[In64BitMode]>;
2099 // (shl x, 1) ==> (add x, x)
2100 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
2102 // (shl x (and y, 63)) ==> (shl x, y)
2103 def : Pat<(shl GR64:$src1, (and CL, 63)),
2104 (SHL64rCL GR64:$src1)>;
2105 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2106 (SHL64mCL addr:$dst)>;
2108 def : Pat<(srl GR64:$src1, (and CL, 63)),
2109 (SHR64rCL GR64:$src1)>;
2110 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2111 (SHR64mCL addr:$dst)>;
2113 def : Pat<(sra GR64:$src1, (and CL, 63)),
2114 (SAR64rCL GR64:$src1)>;
2115 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2116 (SAR64mCL addr:$dst)>;
2118 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
2119 let AddedComplexity = 5 in { // Try this before the selecting to OR
2120 def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
2121 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2122 def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
2123 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2124 def : Pat<(or_is_add GR64:$src1, GR64:$src2),
2125 (ADD64rr GR64:$src1, GR64:$src2)>;
2126 } // AddedComplexity
2128 // X86 specific add which produces a flag.
2129 def : Pat<(addc GR64:$src1, GR64:$src2),
2130 (ADD64rr GR64:$src1, GR64:$src2)>;
2131 def : Pat<(addc GR64:$src1, (load addr:$src2)),
2132 (ADD64rm GR64:$src1, addr:$src2)>;
2133 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
2134 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2135 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
2136 (ADD64ri32 GR64:$src1, imm:$src2)>;
2138 def : Pat<(subc GR64:$src1, GR64:$src2),
2139 (SUB64rr GR64:$src1, GR64:$src2)>;
2140 def : Pat<(subc GR64:$src1, (load addr:$src2)),
2141 (SUB64rm GR64:$src1, addr:$src2)>;
2142 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
2143 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2144 def : Pat<(subc GR64:$src1, imm:$src2),
2145 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2147 //===----------------------------------------------------------------------===//
2148 // EFLAGS-defining Patterns
2149 //===----------------------------------------------------------------------===//
2152 def : Pat<(add GR64:$src1, GR64:$src2),
2153 (ADD64rr GR64:$src1, GR64:$src2)>;
2154 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
2155 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2156 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
2157 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2158 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2159 (ADD64rm GR64:$src1, addr:$src2)>;
2162 def : Pat<(sub GR64:$src1, GR64:$src2),
2163 (SUB64rr GR64:$src1, GR64:$src2)>;
2164 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2165 (SUB64rm GR64:$src1, addr:$src2)>;
2166 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
2167 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2168 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2169 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2172 def : Pat<(mul GR64:$src1, GR64:$src2),
2173 (IMUL64rr GR64:$src1, GR64:$src2)>;
2174 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2175 (IMUL64rm GR64:$src1, addr:$src2)>;
2176 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
2177 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2178 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2179 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2180 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2181 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2182 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2183 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2186 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2187 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2188 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2189 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2190 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2191 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2194 def : Pat<(or GR64:$src1, GR64:$src2),
2195 (OR64rr GR64:$src1, GR64:$src2)>;
2196 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2197 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2198 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2199 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2200 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2201 (OR64rm GR64:$src1, addr:$src2)>;
2204 def : Pat<(xor GR64:$src1, GR64:$src2),
2205 (XOR64rr GR64:$src1, GR64:$src2)>;
2206 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2207 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2208 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2209 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2210 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2211 (XOR64rm GR64:$src1, addr:$src2)>;
2214 def : Pat<(and GR64:$src1, GR64:$src2),
2215 (AND64rr GR64:$src1, GR64:$src2)>;
2216 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2217 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2218 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2219 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2220 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2221 (AND64rm GR64:$src1, addr:$src2)>;
2223 //===----------------------------------------------------------------------===//
2224 // X86-64 SSE Instructions
2225 //===----------------------------------------------------------------------===//
2227 // Move instructions...
2229 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2230 "mov{d|q}\t{$src, $dst|$dst, $src}",
2232 (v2i64 (scalar_to_vector GR64:$src)))]>;
2233 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2234 "mov{d|q}\t{$src, $dst|$dst, $src}",
2235 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2238 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2239 "mov{d|q}\t{$src, $dst|$dst, $src}",
2240 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2241 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2242 "movq\t{$src, $dst|$dst, $src}",
2243 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2245 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2246 "mov{d|q}\t{$src, $dst|$dst, $src}",
2247 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2248 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2249 "movq\t{$src, $dst|$dst, $src}",
2250 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;