1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64> {
22 let ParserMatchClass = ImmSExti64i32AsmOperand;
25 // 64-bits but only 32 bits are significant, and those bits are treated as being
27 def i64i32imm_pcrel : Operand<i64> {
28 let PrintMethod = "print_pcrel_imm";
29 let ParserMatchClass = X86AbsMemAsmOperand;
33 // 64-bits but only 8 bits are significant.
34 def i64i8imm : Operand<i64> {
35 let ParserMatchClass = ImmSExti64i8AsmOperand;
38 def lea64_32mem : Operand<i32> {
39 let PrintMethod = "printi32mem";
40 let AsmOperandLowerMethod = "lower_lea64_32mem";
41 let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
42 let ParserMatchClass = X86MemAsmOperand;
46 // Special i64mem for addresses of load folding tail calls. These are not
47 // allowed to use callee-saved registers since they must be scheduled
48 // after callee-saved register are popped.
49 def i64mem_TC : Operand<i64> {
50 let PrintMethod = "printi64mem";
51 let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
52 let ParserMatchClass = X86MemAsmOperand;
55 //===----------------------------------------------------------------------===//
56 // Complex Pattern Definitions.
58 def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
59 [add, sub, mul, X86mul_imm, shl, or, frameindex,
62 def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
63 [tglobaltlsaddr], []>;
65 //===----------------------------------------------------------------------===//
69 def i64immSExt8 : PatLeaf<(i64 immSext8)>;
71 def GetLo32XForm : SDNodeXForm<imm, [{
72 // Transformation function: get the low 32 bits.
73 return getI32Imm((unsigned)N->getZExtValue());
76 def i64immSExt32 : PatLeaf<(i64 imm), [{
77 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
78 // sign extended field.
79 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
83 def i64immZExt32 : PatLeaf<(i64 imm), [{
84 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
85 // unsignedsign extended field.
86 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
89 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
90 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
91 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
93 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
94 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
95 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
96 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
98 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
99 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
100 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
101 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
103 //===----------------------------------------------------------------------===//
104 // Instruction list...
107 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
108 // a stack adjustment and the codegen must know that they may modify the stack
109 // pointer before prolog-epilog rewriting occurs.
110 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
111 // sub / add which can clobber EFLAGS.
112 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
113 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
115 [(X86callseq_start timm:$amt)]>,
116 Requires<[In64BitMode]>;
117 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
119 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
120 Requires<[In64BitMode]>;
123 // Interrupt Instructions
124 def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>;
126 //===----------------------------------------------------------------------===//
127 // Call Instructions...
130 // All calls clobber the non-callee saved registers. RSP is marked as
131 // a use to prevent stack-pointer assignments that appear immediately
132 // before calls from potentially appearing dead. Uses for argument
133 // registers are added manually.
134 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
135 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
136 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
137 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
138 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
141 // NOTE: this pattern doesn't match "X86call imm", because we do not know
142 // that the offset between an arbitrary immediate and the call will fit in
143 // the 32-bit pcrel field that we have.
144 def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
145 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
146 "call{q}\t$dst", []>,
147 Requires<[In64BitMode, NotWin64]>;
148 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
149 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
150 Requires<[NotWin64]>;
151 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
152 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
153 Requires<[NotWin64]>;
155 def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
156 "lcall{q}\t{*}$dst", []>;
159 // FIXME: We need to teach codegen about single list of call-clobbered
161 let isCall = 1, isCodeGenOnly = 1 in
162 // All calls clobber the non-callee saved registers. RSP is marked as
163 // a use to prevent stack-pointer assignments that appear immediately
164 // before calls from potentially appearing dead. Uses for argument
165 // registers are added manually.
166 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
167 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
168 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
169 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
171 def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
172 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
175 def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
177 [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
178 def WINCALL64m : I<0xFF, MRM2m, (outs),
179 (ins i64mem:$dst, variable_ops), "call\t{*}$dst",
180 [(X86call (loadi64 addr:$dst))]>,
185 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
187 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
188 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
189 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
190 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
191 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
193 def TCRETURNdi64 : I<0, Pseudo, (outs),
194 (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
195 "#TC_RETURN $dst $offset", []>;
196 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
198 "#TC_RETURN $dst $offset", []>;
200 def TCRETURNmi64 : I<0, Pseudo, (outs),
201 (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
202 "#TC_RETURN $dst $offset", []>;
204 def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
205 (ins i64i32imm_pcrel:$dst, variable_ops),
206 "jmp\t$dst # TAILCALL", []>;
207 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
208 "jmp{q}\t{*}$dst # TAILCALL", []>;
211 def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
212 "jmp{q}\t{*}$dst # TAILCALL", []>;
216 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
217 def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
219 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
220 [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
221 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
222 [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
223 def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
224 "ljmp{q}\t{*}$dst", []>;
227 //===----------------------------------------------------------------------===//
228 // EH Pseudo Instructions
230 let isTerminator = 1, isReturn = 1, isBarrier = 1,
231 hasCtrlDep = 1, isCodeGenOnly = 1 in {
232 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
233 "ret\t#eh_return, addr: $addr",
234 [(X86ehret GR64:$addr)]>;
238 //===----------------------------------------------------------------------===//
239 // Miscellaneous Instructions...
242 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
243 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
245 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
246 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
248 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
249 def LEAVE64 : I<0xC9, RawFrm,
250 (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
251 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
253 def POP64r : I<0x58, AddRegFrm,
254 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
255 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
256 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
258 let mayStore = 1 in {
259 def PUSH64r : I<0x50, AddRegFrm,
260 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
261 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
262 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
266 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
267 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
268 "push{q}\t$imm", []>;
269 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
270 "push{q}\t$imm", []>;
271 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
272 "push{q}\t$imm", []>;
275 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
276 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
277 Requires<[In64BitMode]>;
278 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
279 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
280 Requires<[In64BitMode]>;
282 def LEA64_32r : I<0x8D, MRMSrcMem,
283 (outs GR32:$dst), (ins lea64_32mem:$src),
284 "lea{l}\t{$src|$dst}, {$dst|$src}",
285 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
287 let isReMaterializable = 1 in
288 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
289 "lea{q}\t{$src|$dst}, {$dst|$src}",
290 [(set GR64:$dst, lea64addr:$src)]>;
292 let Constraints = "$src = $dst" in
293 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
295 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
297 // Bit scan instructions.
298 let Defs = [EFLAGS] in {
299 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
300 "bsf{q}\t{$src, $dst|$dst, $src}",
301 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
302 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
303 "bsf{q}\t{$src, $dst|$dst, $src}",
304 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
306 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
307 "bsr{q}\t{$src, $dst|$dst, $src}",
308 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
309 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
310 "bsr{q}\t{$src, $dst|$dst, $src}",
311 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
315 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
316 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
317 [(X86rep_movs i64)]>, REP;
318 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
319 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
320 [(X86rep_stos i64)]>, REP;
322 let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in
323 def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
325 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
326 def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
328 def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
330 def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
332 // Fast system-call instructions
333 def SYSEXIT64 : RI<0x35, RawFrm,
334 (outs), (ins), "sysexit", []>, TB, Requires<[In64BitMode]>;
336 //===----------------------------------------------------------------------===//
337 // Move Instructions...
340 let neverHasSideEffects = 1 in
341 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
342 "mov{q}\t{$src, $dst|$dst, $src}", []>;
344 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
345 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
346 "movabs{q}\t{$src, $dst|$dst, $src}",
347 [(set GR64:$dst, imm:$src)]>;
348 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
349 "mov{q}\t{$src, $dst|$dst, $src}",
350 [(set GR64:$dst, i64immSExt32:$src)]>;
353 // The assembler accepts movq of a 64-bit immediate as an alternate spelling of
355 let isAsmParserOnly = 1 in {
356 def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
357 "mov{q}\t{$src, $dst|$dst, $src}", []>;
360 let isCodeGenOnly = 1 in {
361 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
362 "mov{q}\t{$src, $dst|$dst, $src}", []>;
365 let canFoldAsLoad = 1, isReMaterializable = 1 in
366 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
367 "mov{q}\t{$src, $dst|$dst, $src}",
368 [(set GR64:$dst, (load addr:$src))]>;
370 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
371 "mov{q}\t{$src, $dst|$dst, $src}",
372 [(store GR64:$src, addr:$dst)]>;
373 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
374 "mov{q}\t{$src, $dst|$dst, $src}",
375 [(store i64immSExt32:$src, addr:$dst)]>;
377 /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
378 let isCodeGenOnly = 1 in {
379 let neverHasSideEffects = 1 in
380 def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
381 "mov{q}\t{$src, $dst|$dst, $src}", []>;
384 canFoldAsLoad = 1, isReMaterializable = 1 in
385 def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
386 "mov{q}\t{$src, $dst|$dst, $src}",
390 def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
391 "mov{q}\t{$src, $dst|$dst, $src}",
395 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
396 "mov{q}\t{$src, %rax|%rax, $src}", []>;
397 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
398 "mov{q}\t{$src, %rax|%rax, $src}", []>;
399 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
400 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
401 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
402 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
404 // Moves to and from segment registers
405 def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
406 "mov{q}\t{$src, $dst|$dst, $src}", []>;
407 def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
408 "mov{q}\t{$src, $dst|$dst, $src}", []>;
409 def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
410 "mov{q}\t{$src, $dst|$dst, $src}", []>;
411 def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
412 "mov{q}\t{$src, $dst|$dst, $src}", []>;
414 // Moves to and from debug registers
415 def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
416 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
417 def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
418 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
420 // Moves to and from control registers
421 def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
422 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
423 def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
424 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
426 // Sign/Zero extenders
428 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
429 // operand, which makes it a rare instruction with an 8-bit register
430 // operand that can never access an h register. If support for h registers
431 // were generalized, this would require a special register class.
432 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
433 "movs{bq|x}\t{$src, $dst|$dst, $src}",
434 [(set GR64:$dst, (sext GR8:$src))]>, TB;
435 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
436 "movs{bq|x}\t{$src, $dst|$dst, $src}",
437 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
438 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
439 "movs{wq|x}\t{$src, $dst|$dst, $src}",
440 [(set GR64:$dst, (sext GR16:$src))]>, TB;
441 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
442 "movs{wq|x}\t{$src, $dst|$dst, $src}",
443 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
444 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
445 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
446 [(set GR64:$dst, (sext GR32:$src))]>;
447 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
448 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
449 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
451 // movzbq and movzwq encodings for the disassembler
452 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
453 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
454 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
455 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
456 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
457 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
458 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
459 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
461 // Use movzbl instead of movzbq when the destination is a register; it's
462 // equivalent due to implicit zero-extending, and it has a smaller encoding.
463 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
464 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
465 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
466 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
467 // Use movzwl instead of movzwq when the destination is a register; it's
468 // equivalent due to implicit zero-extending, and it has a smaller encoding.
469 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
470 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
471 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
472 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
474 // There's no movzlq instruction, but movl can be used for this purpose, using
475 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
476 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
477 // zero-extension, however this isn't possible when the 32-bit value is
478 // defined by a truncate or is copied from something where the high bits aren't
479 // necessarily all zero. In such cases, we fall back to these explicit zext
481 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
482 "", [(set GR64:$dst, (zext GR32:$src))]>;
483 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
484 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
486 // Any instruction that defines a 32-bit result leaves the high half of the
487 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
488 // be copying from a truncate. And x86's cmov doesn't do anything if the
489 // condition is false. But any other 32-bit operation will zero-extend
491 def def32 : PatLeaf<(i32 GR32:$src), [{
492 return N->getOpcode() != ISD::TRUNCATE &&
493 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
494 N->getOpcode() != ISD::CopyFromReg &&
495 N->getOpcode() != X86ISD::CMOV;
498 // In the case of a 32-bit def that is known to implicitly zero-extend,
499 // we can use a SUBREG_TO_REG.
500 def : Pat<(i64 (zext def32:$src)),
501 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
503 let neverHasSideEffects = 1 in {
504 let Defs = [RAX], Uses = [EAX] in
505 def CDQE : RI<0x98, RawFrm, (outs), (ins),
506 "{cltq|cdqe}", []>; // RAX = signext(EAX)
508 let Defs = [RAX,RDX], Uses = [RAX] in
509 def CQO : RI<0x99, RawFrm, (outs), (ins),
510 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
513 //===----------------------------------------------------------------------===//
514 // Arithmetic Instructions...
517 let Defs = [EFLAGS] in {
519 def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
520 "add{q}\t{$src, %rax|%rax, $src}", []>;
522 let Constraints = "$src1 = $dst" in {
523 let isConvertibleToThreeAddress = 1 in {
524 let isCommutable = 1 in
525 // Register-Register Addition
526 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
527 (ins GR64:$src1, GR64:$src2),
528 "add{q}\t{$src2, $dst|$dst, $src2}",
529 [(set GR64:$dst, EFLAGS,
530 (X86add_flag GR64:$src1, GR64:$src2))]>;
532 // These are alternate spellings for use by the disassembler, we mark them as
533 // code gen only to ensure they aren't matched by the assembler.
534 let isCodeGenOnly = 1 in {
535 def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
536 (ins GR64:$src1, GR64:$src2),
537 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
540 // Register-Integer Addition
541 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
542 (ins GR64:$src1, i64i8imm:$src2),
543 "add{q}\t{$src2, $dst|$dst, $src2}",
544 [(set GR64:$dst, EFLAGS,
545 (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
546 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
547 (ins GR64:$src1, i64i32imm:$src2),
548 "add{q}\t{$src2, $dst|$dst, $src2}",
549 [(set GR64:$dst, EFLAGS,
550 (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
551 } // isConvertibleToThreeAddress
553 // Register-Memory Addition
554 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
555 (ins GR64:$src1, i64mem:$src2),
556 "add{q}\t{$src2, $dst|$dst, $src2}",
557 [(set GR64:$dst, EFLAGS,
558 (X86add_flag GR64:$src1, (load addr:$src2)))]>;
560 } // Constraints = "$src1 = $dst"
562 // Memory-Register Addition
563 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
564 "add{q}\t{$src2, $dst|$dst, $src2}",
565 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
567 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
568 "add{q}\t{$src2, $dst|$dst, $src2}",
569 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
571 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
572 "add{q}\t{$src2, $dst|$dst, $src2}",
573 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
576 let Uses = [EFLAGS] in {
578 def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
579 "adc{q}\t{$src, %rax|%rax, $src}", []>;
581 let Constraints = "$src1 = $dst" in {
582 let isCommutable = 1 in
583 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
584 (ins GR64:$src1, GR64:$src2),
585 "adc{q}\t{$src2, $dst|$dst, $src2}",
586 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
588 let isCodeGenOnly = 1 in {
589 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
590 (ins GR64:$src1, GR64:$src2),
591 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
594 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
595 (ins GR64:$src1, i64mem:$src2),
596 "adc{q}\t{$src2, $dst|$dst, $src2}",
597 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
599 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
600 (ins GR64:$src1, i64i8imm:$src2),
601 "adc{q}\t{$src2, $dst|$dst, $src2}",
602 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
603 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
604 (ins GR64:$src1, i64i32imm:$src2),
605 "adc{q}\t{$src2, $dst|$dst, $src2}",
606 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
607 } // Constraints = "$src1 = $dst"
609 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
610 "adc{q}\t{$src2, $dst|$dst, $src2}",
611 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
612 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
613 "adc{q}\t{$src2, $dst|$dst, $src2}",
614 [(store (adde (load addr:$dst), i64immSExt8:$src2),
616 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
617 "adc{q}\t{$src2, $dst|$dst, $src2}",
618 [(store (adde (load addr:$dst), i64immSExt32:$src2),
622 let Constraints = "$src1 = $dst" in {
623 // Register-Register Subtraction
624 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
625 (ins GR64:$src1, GR64:$src2),
626 "sub{q}\t{$src2, $dst|$dst, $src2}",
627 [(set GR64:$dst, EFLAGS,
628 (X86sub_flag GR64:$src1, GR64:$src2))]>;
630 let isCodeGenOnly = 1 in {
631 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
632 (ins GR64:$src1, GR64:$src2),
633 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
636 // Register-Memory Subtraction
637 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
638 (ins GR64:$src1, i64mem:$src2),
639 "sub{q}\t{$src2, $dst|$dst, $src2}",
640 [(set GR64:$dst, EFLAGS,
641 (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
643 // Register-Integer Subtraction
644 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
645 (ins GR64:$src1, i64i8imm:$src2),
646 "sub{q}\t{$src2, $dst|$dst, $src2}",
647 [(set GR64:$dst, EFLAGS,
648 (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
649 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
650 (ins GR64:$src1, i64i32imm:$src2),
651 "sub{q}\t{$src2, $dst|$dst, $src2}",
652 [(set GR64:$dst, EFLAGS,
653 (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
654 } // Constraints = "$src1 = $dst"
656 def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
657 "sub{q}\t{$src, %rax|%rax, $src}", []>;
659 // Memory-Register Subtraction
660 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
661 "sub{q}\t{$src2, $dst|$dst, $src2}",
662 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
665 // Memory-Integer Subtraction
666 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
667 "sub{q}\t{$src2, $dst|$dst, $src2}",
668 [(store (sub (load addr:$dst), i64immSExt8:$src2),
671 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
672 "sub{q}\t{$src2, $dst|$dst, $src2}",
673 [(store (sub (load addr:$dst), i64immSExt32:$src2),
677 let Uses = [EFLAGS] in {
678 let Constraints = "$src1 = $dst" in {
679 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
680 (ins GR64:$src1, GR64:$src2),
681 "sbb{q}\t{$src2, $dst|$dst, $src2}",
682 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
684 let isCodeGenOnly = 1 in {
685 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
686 (ins GR64:$src1, GR64:$src2),
687 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
690 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
691 (ins GR64:$src1, i64mem:$src2),
692 "sbb{q}\t{$src2, $dst|$dst, $src2}",
693 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
695 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
696 (ins GR64:$src1, i64i8imm:$src2),
697 "sbb{q}\t{$src2, $dst|$dst, $src2}",
698 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
699 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
700 (ins GR64:$src1, i64i32imm:$src2),
701 "sbb{q}\t{$src2, $dst|$dst, $src2}",
702 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
703 } // Constraints = "$src1 = $dst"
705 def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
706 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
708 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
709 "sbb{q}\t{$src2, $dst|$dst, $src2}",
710 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
711 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
712 "sbb{q}\t{$src2, $dst|$dst, $src2}",
713 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
714 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
715 "sbb{q}\t{$src2, $dst|$dst, $src2}",
716 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
720 // Unsigned multiplication
721 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
722 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
723 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
725 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
726 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
728 // Signed multiplication
729 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
730 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
732 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
733 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
736 let Defs = [EFLAGS] in {
737 let Constraints = "$src1 = $dst" in {
738 let isCommutable = 1 in
739 // Register-Register Signed Integer Multiplication
740 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
741 (ins GR64:$src1, GR64:$src2),
742 "imul{q}\t{$src2, $dst|$dst, $src2}",
743 [(set GR64:$dst, EFLAGS,
744 (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
746 // Register-Memory Signed Integer Multiplication
747 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
748 (ins GR64:$src1, i64mem:$src2),
749 "imul{q}\t{$src2, $dst|$dst, $src2}",
750 [(set GR64:$dst, EFLAGS,
751 (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
752 } // Constraints = "$src1 = $dst"
754 // Suprisingly enough, these are not two address instructions!
756 // Register-Integer Signed Integer Multiplication
757 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
758 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
759 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
760 [(set GR64:$dst, EFLAGS,
761 (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
762 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
763 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
764 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
765 [(set GR64:$dst, EFLAGS,
766 (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
768 // Memory-Integer Signed Integer Multiplication
769 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
770 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
771 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
772 [(set GR64:$dst, EFLAGS,
773 (X86smul_flag (load addr:$src1),
774 i64immSExt8:$src2))]>;
775 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
776 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
777 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
778 [(set GR64:$dst, EFLAGS,
779 (X86smul_flag (load addr:$src1),
780 i64immSExt32:$src2))]>;
783 // Unsigned division / remainder
784 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
785 // RDX:RAX/r64 = RAX,RDX
786 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
788 // Signed division / remainder
789 // RDX:RAX/r64 = RAX,RDX
790 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
791 "idiv{q}\t$src", []>;
793 // RDX:RAX/[mem64] = RAX,RDX
794 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
796 // RDX:RAX/[mem64] = RAX,RDX
797 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
798 "idiv{q}\t$src", []>;
802 // Unary instructions
803 let Defs = [EFLAGS], CodeSize = 2 in {
804 let Constraints = "$src = $dst" in
805 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
806 [(set GR64:$dst, (ineg GR64:$src)),
808 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
809 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
812 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
813 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
814 [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
815 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
816 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
819 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
820 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
821 [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
822 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
823 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
826 // In 64-bit mode, single byte INC and DEC cannot be encoded.
827 let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
828 // Can transform into LEA.
829 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
831 [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
832 OpSize, Requires<[In64BitMode]>;
833 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
835 [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
836 Requires<[In64BitMode]>;
837 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
839 [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
840 OpSize, Requires<[In64BitMode]>;
841 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
843 [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
844 Requires<[In64BitMode]>;
845 } // Constraints = "$src = $dst", isConvertibleToThreeAddress
847 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
848 // how to unfold them.
849 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
850 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
852 OpSize, Requires<[In64BitMode]>;
853 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
854 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
856 Requires<[In64BitMode]>;
857 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
858 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
860 OpSize, Requires<[In64BitMode]>;
861 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
862 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
864 Requires<[In64BitMode]>;
865 } // Defs = [EFLAGS], CodeSize
868 let Defs = [EFLAGS] in {
869 // Shift instructions
870 let Constraints = "$src1 = $dst" in {
872 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
873 "shl{q}\t{%cl, $dst|$dst, %CL}",
874 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
875 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
876 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
877 (ins GR64:$src1, i8imm:$src2),
878 "shl{q}\t{$src2, $dst|$dst, $src2}",
879 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
880 // NOTE: We don't include patterns for shifts of a register by one, because
881 // 'add reg,reg' is cheaper.
882 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
884 } // Constraints = "$src1 = $dst"
887 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
888 "shl{q}\t{%cl, $dst|$dst, %CL}",
889 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
890 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
891 "shl{q}\t{$src, $dst|$dst, $src}",
892 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
893 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
895 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
897 let Constraints = "$src1 = $dst" in {
899 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
900 "shr{q}\t{%cl, $dst|$dst, %CL}",
901 [(set GR64:$dst, (srl GR64:$src1, CL))]>;
902 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
903 "shr{q}\t{$src2, $dst|$dst, $src2}",
904 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
905 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
907 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
908 } // Constraints = "$src1 = $dst"
911 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
912 "shr{q}\t{%cl, $dst|$dst, %CL}",
913 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
914 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
915 "shr{q}\t{$src, $dst|$dst, $src}",
916 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
917 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
919 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
921 let Constraints = "$src1 = $dst" in {
923 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
924 "sar{q}\t{%cl, $dst|$dst, %CL}",
925 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
926 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
927 (ins GR64:$src1, i8imm:$src2),
928 "sar{q}\t{$src2, $dst|$dst, $src2}",
929 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
930 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
932 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
933 } // Constraints = "$src = $dst"
936 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
937 "sar{q}\t{%cl, $dst|$dst, %CL}",
938 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
939 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
940 "sar{q}\t{$src, $dst|$dst, $src}",
941 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
942 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
944 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
946 // Rotate instructions
948 let Constraints = "$src = $dst" in {
949 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
950 "rcl{q}\t{1, $dst|$dst, 1}", []>;
951 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
952 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
954 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
955 "rcr{q}\t{1, $dst|$dst, 1}", []>;
956 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
957 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
960 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
961 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
962 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
963 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
965 } // Constraints = "$src = $dst"
967 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
968 "rcl{q}\t{1, $dst|$dst, 1}", []>;
969 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
970 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
971 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
972 "rcr{q}\t{1, $dst|$dst, 1}", []>;
973 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
974 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
977 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
978 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
979 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
980 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
983 let Constraints = "$src1 = $dst" in {
985 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
986 "rol{q}\t{%cl, $dst|$dst, %CL}",
987 [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
988 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
989 (ins GR64:$src1, i8imm:$src2),
990 "rol{q}\t{$src2, $dst|$dst, $src2}",
991 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
992 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
994 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
995 } // Constraints = "$src1 = $dst"
998 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
999 "rol{q}\t{%cl, $dst|$dst, %CL}",
1000 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
1001 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
1002 "rol{q}\t{$src, $dst|$dst, $src}",
1003 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1004 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
1006 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1008 let Constraints = "$src1 = $dst" in {
1010 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1011 "ror{q}\t{%cl, $dst|$dst, %CL}",
1012 [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
1013 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
1014 (ins GR64:$src1, i8imm:$src2),
1015 "ror{q}\t{$src2, $dst|$dst, $src2}",
1016 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
1017 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
1019 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
1020 } // Constraints = "$src1 = $dst"
1023 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
1024 "ror{q}\t{%cl, $dst|$dst, %CL}",
1025 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
1026 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
1027 "ror{q}\t{$src, $dst|$dst, $src}",
1028 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1029 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
1031 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
1033 // Double shift instructions (generalizations of rotate)
1034 let Constraints = "$src1 = $dst" in {
1035 let Uses = [CL] in {
1036 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
1037 (ins GR64:$src1, GR64:$src2),
1038 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1039 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
1041 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
1042 (ins GR64:$src1, GR64:$src2),
1043 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1044 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
1048 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
1049 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
1051 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1052 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1053 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
1056 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
1058 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1059 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1060 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
1064 } // Constraints = "$src1 = $dst"
1066 let Uses = [CL] in {
1067 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1068 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1069 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
1071 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1072 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1073 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
1076 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
1077 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1078 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1079 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
1080 (i8 imm:$src3)), addr:$dst)]>,
1082 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
1083 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1084 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1085 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
1086 (i8 imm:$src3)), addr:$dst)]>,
1088 } // Defs = [EFLAGS]
1090 //===----------------------------------------------------------------------===//
1091 // Logical Instructions...
1094 let Constraints = "$src = $dst" , AddedComplexity = 15 in
1095 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
1096 [(set GR64:$dst, (not GR64:$src))]>;
1097 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
1098 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
1100 let Defs = [EFLAGS] in {
1101 def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
1102 "and{q}\t{$src, %rax|%rax, $src}", []>;
1104 let Constraints = "$src1 = $dst" in {
1105 let isCommutable = 1 in
1106 def AND64rr : RI<0x21, MRMDestReg,
1107 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1108 "and{q}\t{$src2, $dst|$dst, $src2}",
1109 [(set GR64:$dst, EFLAGS,
1110 (X86and_flag GR64:$src1, GR64:$src2))]>;
1111 let isCodeGenOnly = 1 in {
1112 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
1113 (ins GR64:$src1, GR64:$src2),
1114 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
1116 def AND64rm : RI<0x23, MRMSrcMem,
1117 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1118 "and{q}\t{$src2, $dst|$dst, $src2}",
1119 [(set GR64:$dst, EFLAGS,
1120 (X86and_flag GR64:$src1, (load addr:$src2)))]>;
1121 def AND64ri8 : RIi8<0x83, MRM4r,
1122 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1123 "and{q}\t{$src2, $dst|$dst, $src2}",
1124 [(set GR64:$dst, EFLAGS,
1125 (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
1126 def AND64ri32 : RIi32<0x81, MRM4r,
1127 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1128 "and{q}\t{$src2, $dst|$dst, $src2}",
1129 [(set GR64:$dst, EFLAGS,
1130 (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
1131 } // Constraints = "$src1 = $dst"
1133 def AND64mr : RI<0x21, MRMDestMem,
1134 (outs), (ins i64mem:$dst, GR64:$src),
1135 "and{q}\t{$src, $dst|$dst, $src}",
1136 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
1137 (implicit EFLAGS)]>;
1138 def AND64mi8 : RIi8<0x83, MRM4m,
1139 (outs), (ins i64mem:$dst, i64i8imm :$src),
1140 "and{q}\t{$src, $dst|$dst, $src}",
1141 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
1142 (implicit EFLAGS)]>;
1143 def AND64mi32 : RIi32<0x81, MRM4m,
1144 (outs), (ins i64mem:$dst, i64i32imm:$src),
1145 "and{q}\t{$src, $dst|$dst, $src}",
1146 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1147 (implicit EFLAGS)]>;
1149 let Constraints = "$src1 = $dst" in {
1150 let isCommutable = 1 in
1151 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
1152 (ins GR64:$src1, GR64:$src2),
1153 "or{q}\t{$src2, $dst|$dst, $src2}",
1154 [(set GR64:$dst, EFLAGS,
1155 (X86or_flag GR64:$src1, GR64:$src2))]>;
1156 let isCodeGenOnly = 1 in {
1157 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
1158 (ins GR64:$src1, GR64:$src2),
1159 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
1161 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
1162 (ins GR64:$src1, i64mem:$src2),
1163 "or{q}\t{$src2, $dst|$dst, $src2}",
1164 [(set GR64:$dst, EFLAGS,
1165 (X86or_flag GR64:$src1, (load addr:$src2)))]>;
1166 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
1167 (ins GR64:$src1, i64i8imm:$src2),
1168 "or{q}\t{$src2, $dst|$dst, $src2}",
1169 [(set GR64:$dst, EFLAGS,
1170 (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
1171 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
1172 (ins GR64:$src1, i64i32imm:$src2),
1173 "or{q}\t{$src2, $dst|$dst, $src2}",
1174 [(set GR64:$dst, EFLAGS,
1175 (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
1176 } // Constraints = "$src1 = $dst"
1178 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1179 "or{q}\t{$src, $dst|$dst, $src}",
1180 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
1181 (implicit EFLAGS)]>;
1182 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
1183 "or{q}\t{$src, $dst|$dst, $src}",
1184 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
1185 (implicit EFLAGS)]>;
1186 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1187 "or{q}\t{$src, $dst|$dst, $src}",
1188 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1189 (implicit EFLAGS)]>;
1191 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
1192 "or{q}\t{$src, %rax|%rax, $src}", []>;
1194 let Constraints = "$src1 = $dst" in {
1195 let isCommutable = 1 in
1196 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
1197 (ins GR64:$src1, GR64:$src2),
1198 "xor{q}\t{$src2, $dst|$dst, $src2}",
1199 [(set GR64:$dst, EFLAGS,
1200 (X86xor_flag GR64:$src1, GR64:$src2))]>;
1201 let isCodeGenOnly = 1 in {
1202 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
1203 (ins GR64:$src1, GR64:$src2),
1204 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
1206 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
1207 (ins GR64:$src1, i64mem:$src2),
1208 "xor{q}\t{$src2, $dst|$dst, $src2}",
1209 [(set GR64:$dst, EFLAGS,
1210 (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
1211 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
1212 (ins GR64:$src1, i64i8imm:$src2),
1213 "xor{q}\t{$src2, $dst|$dst, $src2}",
1214 [(set GR64:$dst, EFLAGS,
1215 (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
1216 def XOR64ri32 : RIi32<0x81, MRM6r,
1217 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1218 "xor{q}\t{$src2, $dst|$dst, $src2}",
1219 [(set GR64:$dst, EFLAGS,
1220 (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
1221 } // Constraints = "$src1 = $dst"
1223 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1224 "xor{q}\t{$src, $dst|$dst, $src}",
1225 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
1226 (implicit EFLAGS)]>;
1227 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
1228 "xor{q}\t{$src, $dst|$dst, $src}",
1229 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
1230 (implicit EFLAGS)]>;
1231 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1232 "xor{q}\t{$src, $dst|$dst, $src}",
1233 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1234 (implicit EFLAGS)]>;
1236 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
1237 "xor{q}\t{$src, %rax|%rax, $src}", []>;
1239 } // Defs = [EFLAGS]
1241 //===----------------------------------------------------------------------===//
1242 // Comparison Instructions...
1245 // Integer comparison
1246 let Defs = [EFLAGS] in {
1247 def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
1248 "test{q}\t{$src, %rax|%rax, $src}", []>;
1249 let isCommutable = 1 in
1250 def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1251 "test{q}\t{$src2, $src1|$src1, $src2}",
1252 [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
1253 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1254 "test{q}\t{$src2, $src1|$src1, $src2}",
1255 [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
1257 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
1258 (ins GR64:$src1, i64i32imm:$src2),
1259 "test{q}\t{$src2, $src1|$src1, $src2}",
1260 [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
1262 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
1263 (ins i64mem:$src1, i64i32imm:$src2),
1264 "test{q}\t{$src2, $src1|$src1, $src2}",
1265 [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
1266 i64immSExt32:$src2), 0))]>;
1269 def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
1270 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
1271 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1272 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1273 [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
1275 // These are alternate spellings for use by the disassembler, we mark them as
1276 // code gen only to ensure they aren't matched by the assembler.
1277 let isCodeGenOnly = 1 in {
1278 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1279 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
1282 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1283 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1284 [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
1285 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1286 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1287 [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
1288 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1289 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1290 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
1291 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1292 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1293 [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
1294 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1295 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1296 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1297 i64immSExt8:$src2))]>;
1298 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1299 (ins i64mem:$src1, i64i32imm:$src2),
1300 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1301 [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
1302 i64immSExt32:$src2))]>;
1303 } // Defs = [EFLAGS]
1306 // TODO: BTC, BTR, and BTS
1307 let Defs = [EFLAGS] in {
1308 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1309 "bt{q}\t{$src2, $src1|$src1, $src2}",
1310 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
1312 // Unlike with the register+register form, the memory+register form of the
1313 // bt instruction does not ignore the high bits of the index. From ISel's
1314 // perspective, this is pretty bizarre. Disable these instructions for now.
1315 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1316 "bt{q}\t{$src2, $src1|$src1, $src2}",
1317 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1318 // (implicit EFLAGS)]
1322 def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1323 "bt{q}\t{$src2, $src1|$src1, $src2}",
1324 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB,
1326 // Note that these instructions don't need FastBTMem because that
1327 // only applies when the other operand is in a register. When it's
1328 // an immediate, bt is still fast.
1329 def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1330 "bt{q}\t{$src2, $src1|$src1, $src2}",
1331 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1332 i64immSExt8:$src2))]>, TB;
1334 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1335 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1336 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1337 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1338 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1339 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1340 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1341 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1343 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1344 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1345 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1346 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1347 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1348 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1349 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1350 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1352 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1353 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1354 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1355 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1356 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1357 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1358 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1359 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1360 } // Defs = [EFLAGS]
1362 // Conditional moves
1363 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
1364 let isCommutable = 1 in {
1365 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1366 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1367 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1368 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1369 X86_COND_B, EFLAGS))]>, TB;
1370 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1371 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1372 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1373 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1374 X86_COND_AE, EFLAGS))]>, TB;
1375 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1376 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1377 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1378 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1379 X86_COND_E, EFLAGS))]>, TB;
1380 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1381 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1382 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1383 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1384 X86_COND_NE, EFLAGS))]>, TB;
1385 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1386 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1387 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1388 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1389 X86_COND_BE, EFLAGS))]>, TB;
1390 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1391 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1392 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1393 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1394 X86_COND_A, EFLAGS))]>, TB;
1395 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1396 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1397 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1398 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1399 X86_COND_L, EFLAGS))]>, TB;
1400 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1401 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1402 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1403 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1404 X86_COND_GE, EFLAGS))]>, TB;
1405 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1406 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1407 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1408 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1409 X86_COND_LE, EFLAGS))]>, TB;
1410 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1411 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1412 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1413 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1414 X86_COND_G, EFLAGS))]>, TB;
1415 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1416 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1417 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1418 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1419 X86_COND_S, EFLAGS))]>, TB;
1420 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1421 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1422 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1423 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1424 X86_COND_NS, EFLAGS))]>, TB;
1425 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1426 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1427 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1428 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1429 X86_COND_P, EFLAGS))]>, TB;
1430 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1431 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1432 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1433 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1434 X86_COND_NP, EFLAGS))]>, TB;
1435 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1436 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1437 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1438 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1439 X86_COND_O, EFLAGS))]>, TB;
1440 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1441 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1442 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1443 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1444 X86_COND_NO, EFLAGS))]>, TB;
1445 } // isCommutable = 1
1447 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1448 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1449 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1450 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1451 X86_COND_B, EFLAGS))]>, TB;
1452 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1453 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1454 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1455 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1456 X86_COND_AE, EFLAGS))]>, TB;
1457 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1458 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1459 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1460 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1461 X86_COND_E, EFLAGS))]>, TB;
1462 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1463 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1464 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1465 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1466 X86_COND_NE, EFLAGS))]>, TB;
1467 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1468 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1469 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1470 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1471 X86_COND_BE, EFLAGS))]>, TB;
1472 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1473 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1474 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1475 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1476 X86_COND_A, EFLAGS))]>, TB;
1477 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1478 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1479 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1480 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1481 X86_COND_L, EFLAGS))]>, TB;
1482 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1483 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1484 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1485 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1486 X86_COND_GE, EFLAGS))]>, TB;
1487 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1488 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1489 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1490 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1491 X86_COND_LE, EFLAGS))]>, TB;
1492 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1493 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1494 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1495 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1496 X86_COND_G, EFLAGS))]>, TB;
1497 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1498 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1499 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1500 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1501 X86_COND_S, EFLAGS))]>, TB;
1502 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1503 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1504 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1505 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1506 X86_COND_NS, EFLAGS))]>, TB;
1507 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1508 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1509 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1510 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1511 X86_COND_P, EFLAGS))]>, TB;
1512 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1513 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1514 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1515 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1516 X86_COND_NP, EFLAGS))]>, TB;
1517 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1518 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1519 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1520 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1521 X86_COND_O, EFLAGS))]>, TB;
1522 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1523 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1524 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1525 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1526 X86_COND_NO, EFLAGS))]>, TB;
1527 } // Constraints = "$src1 = $dst"
1529 // Use sbb to materialize carry flag into a GPR.
1530 // FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
1531 // However, Pat<> can't replicate the destination reg into the inputs of the
1533 // FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
1535 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
1536 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
1537 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
1539 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1542 //===----------------------------------------------------------------------===//
1543 // Descriptor-table support instructions
1545 // LLDT is not interpreted specially in 64-bit mode because there is no sign
1547 def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
1548 "sldt{q}\t$dst", []>, TB;
1549 def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
1550 "sldt{q}\t$dst", []>, TB;
1552 //===----------------------------------------------------------------------===//
1553 // Alias Instructions
1554 //===----------------------------------------------------------------------===//
1556 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
1557 // smaller encoding, but doing so at isel time interferes with rematerialization
1558 // in the current register allocator. For now, this is rewritten when the
1559 // instruction is lowered to an MCInst.
1560 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
1561 // when we have a better way to specify isel priority.
1562 let Defs = [EFLAGS],
1563 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1564 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
1565 [(set GR64:$dst, 0)]>;
1567 // Materialize i64 constant where top 32-bits are zero. This could theoretically
1568 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
1569 // that would make it more difficult to rematerialize.
1570 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1571 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1572 "", [(set GR64:$dst, i64immZExt32:$src)]>;
1574 //===----------------------------------------------------------------------===//
1575 // Thread Local Storage Instructions
1576 //===----------------------------------------------------------------------===//
1579 // All calls clobber the non-callee saved registers. RSP is marked as
1580 // a use to prevent stack-pointer assignments that appear immediately
1581 // before calls from potentially appearing dead.
1582 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
1583 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
1584 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
1585 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
1586 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
1588 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
1590 "leaq\t$sym(%rip), %rdi; "
1593 "call\t__tls_get_addr@PLT",
1594 [(X86tlsaddr tls64addr:$sym)]>,
1595 Requires<[In64BitMode]>;
1597 // Darwin TLS Support
1598 // For x86_64, the address of the thunk is passed in %rdi, on return
1599 // the address of the variable is in %rax. All other registers are preserved.
1602 usesCustomInserter = 1 in
1603 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
1605 [(X86TLSCall addr:$sym)]>,
1606 Requires<[In64BitMode]>;
1608 let AddedComplexity = 5, isCodeGenOnly = 1 in
1609 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1610 "movq\t%gs:$src, $dst",
1611 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1613 let AddedComplexity = 5, isCodeGenOnly = 1 in
1614 def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1615 "movq\t%fs:$src, $dst",
1616 [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
1618 //===----------------------------------------------------------------------===//
1619 // Atomic Instructions
1620 //===----------------------------------------------------------------------===//
1622 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1623 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1625 "cmpxchgq\t$swap,$ptr",
1626 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1629 let Constraints = "$val = $dst" in {
1630 let Defs = [EFLAGS] in
1631 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
1634 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1637 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),
1638 (ins GR64:$val,i64mem:$ptr),
1639 "xchg{q}\t{$val, $ptr|$ptr, $val}",
1640 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1642 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1643 "xchg{q}\t{$val, $src|$src, $val}", []>;
1646 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1647 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1648 let mayLoad = 1, mayStore = 1 in
1649 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1650 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1652 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1653 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1654 let mayLoad = 1, mayStore = 1 in
1655 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1656 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1658 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1659 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1660 "cmpxchg16b\t$dst", []>, TB;
1662 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1663 "xchg{q}\t{$src, %rax|%rax, $src}", []>;
1665 // Optimized codegen when the non-memory output is not used.
1666 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
1667 // FIXME: Use normal add / sub instructions and add lock prefix dynamically.
1668 def LOCK_ADD64mr : RI<0x03, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1670 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1671 def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
1672 (ins i64mem:$dst, i64i8imm :$src2),
1674 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1675 def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
1676 (ins i64mem:$dst, i64i32imm :$src2),
1678 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1679 def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1681 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1682 def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
1683 (ins i64mem:$dst, i64i8imm :$src2),
1685 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1686 def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
1687 (ins i64mem:$dst, i64i32imm:$src2),
1689 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1690 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
1692 "inc{q}\t$dst", []>, LOCK;
1693 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
1695 "dec{q}\t$dst", []>, LOCK;
1697 // Atomic exchange, and, or, xor
1698 let Constraints = "$val = $dst", Defs = [EFLAGS],
1699 usesCustomInserter = 1 in {
1700 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1701 "#ATOMAND64 PSEUDO!",
1702 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1703 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1704 "#ATOMOR64 PSEUDO!",
1705 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1706 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1707 "#ATOMXOR64 PSEUDO!",
1708 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1709 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1710 "#ATOMNAND64 PSEUDO!",
1711 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1712 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1713 "#ATOMMIN64 PSEUDO!",
1714 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1715 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1716 "#ATOMMAX64 PSEUDO!",
1717 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1718 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1719 "#ATOMUMIN64 PSEUDO!",
1720 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1721 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1722 "#ATOMUMAX64 PSEUDO!",
1723 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1726 // Segmentation support instructions
1728 // i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
1729 def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
1730 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1731 def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
1732 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1734 def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1735 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1736 def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1737 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1739 def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
1741 def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
1742 "push{q}\t%fs", []>, TB;
1743 def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
1744 "push{q}\t%gs", []>, TB;
1746 def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
1747 "pop{q}\t%fs", []>, TB;
1748 def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
1749 "pop{q}\t%gs", []>, TB;
1751 def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1752 "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
1753 def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1754 "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1755 def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1756 "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1758 // Specialized register support
1760 // no m form encodable; use SMSW16m
1761 def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
1762 "smsw{q}\t$dst", []>, TB;
1764 // String manipulation instructions
1766 def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
1768 //===----------------------------------------------------------------------===//
1769 // Non-Instruction Patterns
1770 //===----------------------------------------------------------------------===//
1772 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
1773 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
1774 // 'movabs' predicate should handle this sort of thing.
1775 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1776 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
1777 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1778 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
1779 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1780 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
1781 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1782 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
1783 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1784 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
1786 // In static codegen with small code model, we can get the address of a label
1787 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
1788 // the MOV64ri64i32 should accept these.
1789 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1790 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
1791 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1792 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
1793 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1794 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
1795 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1796 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
1797 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1798 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
1800 // In kernel code model, we can get the address of a label
1801 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1802 // the MOV64ri32 should accept these.
1803 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1804 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1805 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1806 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1807 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1808 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1809 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1810 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1811 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1812 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1814 // If we have small model and -static mode, it is safe to store global addresses
1815 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1816 // for MOV64mi32 should handle this sort of thing.
1817 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1818 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1819 Requires<[NearData, IsStatic]>;
1820 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1821 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1822 Requires<[NearData, IsStatic]>;
1823 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1824 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1825 Requires<[NearData, IsStatic]>;
1826 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1827 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1828 Requires<[NearData, IsStatic]>;
1829 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1830 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1831 Requires<[NearData, IsStatic]>;
1834 // Direct PC relative function call for small code model. 32-bit displacement
1835 // sign extended to 64-bit.
1836 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1837 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
1838 def : Pat<(X86call (i64 texternalsym:$dst)),
1839 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
1841 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1842 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
1843 def : Pat<(X86call (i64 texternalsym:$dst)),
1844 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
1847 def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
1848 (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
1849 Requires<[In64BitMode]>;
1851 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1852 (TCRETURNmi64 addr:$dst, imm:$off)>,
1853 Requires<[In64BitMode]>;
1855 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1856 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1857 Requires<[In64BitMode]>;
1859 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1860 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1861 Requires<[In64BitMode]>;
1863 // tls has some funny stuff here...
1864 // This corresponds to movabs $foo@tpoff, %rax
1865 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1866 (MOV64ri tglobaltlsaddr :$dst)>;
1867 // This corresponds to add $foo@tpoff, %rax
1868 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1869 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1870 // This corresponds to mov foo@tpoff(%rbx), %eax
1871 def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
1872 (MOV64rm tglobaltlsaddr :$dst)>;
1876 // TEST R,R is smaller than CMP R,0
1877 def : Pat<(X86cmp GR64:$src1, 0),
1878 (TEST64rr GR64:$src1, GR64:$src1)>;
1880 // Conditional moves with folded loads with operands swapped and conditions
1882 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1883 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1884 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1885 (CMOVB64rm GR64:$src2, addr:$src1)>;
1886 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1887 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1888 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1889 (CMOVE64rm GR64:$src2, addr:$src1)>;
1890 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
1891 (CMOVA64rm GR64:$src2, addr:$src1)>;
1892 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
1893 (CMOVBE64rm GR64:$src2, addr:$src1)>;
1894 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
1895 (CMOVGE64rm GR64:$src2, addr:$src1)>;
1896 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
1897 (CMOVL64rm GR64:$src2, addr:$src1)>;
1898 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
1899 (CMOVG64rm GR64:$src2, addr:$src1)>;
1900 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
1901 (CMOVLE64rm GR64:$src2, addr:$src1)>;
1902 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
1903 (CMOVNP64rm GR64:$src2, addr:$src1)>;
1904 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
1905 (CMOVP64rm GR64:$src2, addr:$src1)>;
1906 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
1907 (CMOVNS64rm GR64:$src2, addr:$src1)>;
1908 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
1909 (CMOVS64rm GR64:$src2, addr:$src1)>;
1910 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
1911 (CMOVNO64rm GR64:$src2, addr:$src1)>;
1912 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
1913 (CMOVO64rm GR64:$src2, addr:$src1)>;
1915 // zextload bool -> zextload byte
1916 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1919 // When extloading from 16-bit and smaller memory locations into 64-bit
1920 // registers, use zero-extending loads so that the entire 64-bit register is
1921 // defined, avoiding partial-register updates.
1922 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1923 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1924 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1925 // For other extloads, use subregs, since the high contents of the register are
1926 // defined after an extload.
1927 def : Pat<(extloadi64i32 addr:$src),
1928 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1931 // anyext. Define these to do an explicit zero-extend to
1932 // avoid partial-register updates.
1933 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1934 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1935 def : Pat<(i64 (anyext GR32:$src)),
1936 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1938 //===----------------------------------------------------------------------===//
1940 //===----------------------------------------------------------------------===//
1942 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1943 // +128 doesn't, so in this special case use a sub instead of an add.
1944 def : Pat<(add GR64:$src1, 128),
1945 (SUB64ri8 GR64:$src1, -128)>;
1946 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1947 (SUB64mi8 addr:$dst, -128)>;
1949 // The same trick applies for 32-bit immediate fields in 64-bit
1951 def : Pat<(add GR64:$src1, 0x0000000080000000),
1952 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1953 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1954 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1956 // Use a 32-bit and with implicit zero-extension instead of a 64-bit and if it
1957 // has an immediate with at least 32 bits of leading zeros, to avoid needing to
1958 // materialize that immediate in a register first.
1959 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1963 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1964 (i32 (GetLo32XForm imm:$imm))),
1967 // r & (2^32-1) ==> movz
1968 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1969 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1970 // r & (2^16-1) ==> movz
1971 def : Pat<(and GR64:$src, 0xffff),
1972 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1973 // r & (2^8-1) ==> movz
1974 def : Pat<(and GR64:$src, 0xff),
1975 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1976 // r & (2^8-1) ==> movz
1977 def : Pat<(and GR32:$src1, 0xff),
1978 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1979 Requires<[In64BitMode]>;
1980 // r & (2^8-1) ==> movz
1981 def : Pat<(and GR16:$src1, 0xff),
1982 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
1983 Requires<[In64BitMode]>;
1985 // sext_inreg patterns
1986 def : Pat<(sext_inreg GR64:$src, i32),
1987 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1988 def : Pat<(sext_inreg GR64:$src, i16),
1989 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1990 def : Pat<(sext_inreg GR64:$src, i8),
1991 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1992 def : Pat<(sext_inreg GR32:$src, i8),
1993 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1994 Requires<[In64BitMode]>;
1995 def : Pat<(sext_inreg GR16:$src, i8),
1996 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
1997 Requires<[In64BitMode]>;
2000 def : Pat<(i32 (trunc GR64:$src)),
2001 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
2002 def : Pat<(i16 (trunc GR64:$src)),
2003 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
2004 def : Pat<(i8 (trunc GR64:$src)),
2005 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
2006 def : Pat<(i8 (trunc GR32:$src)),
2007 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
2008 Requires<[In64BitMode]>;
2009 def : Pat<(i8 (trunc GR16:$src)),
2010 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
2011 Requires<[In64BitMode]>;
2013 // h-register tricks.
2014 // For now, be conservative on x86-64 and use an h-register extract only if the
2015 // value is immediately zero-extended or stored, which are somewhat common
2016 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
2017 // from being allocated in the same instruction as the h register, as there's
2018 // currently no way to describe this requirement to the register allocator.
2020 // h-register extract and zero-extend.
2021 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
2025 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2028 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
2030 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2032 Requires<[In64BitMode]>;
2033 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
2034 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
2037 Requires<[In64BitMode]>;
2038 def : Pat<(srl GR16:$src, (i8 8)),
2041 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2044 Requires<[In64BitMode]>;
2045 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
2047 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2049 Requires<[In64BitMode]>;
2050 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
2052 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2054 Requires<[In64BitMode]>;
2055 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
2059 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2062 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
2066 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2070 // h-register extract and store.
2071 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
2074 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2076 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
2079 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2081 Requires<[In64BitMode]>;
2082 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
2085 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2087 Requires<[In64BitMode]>;
2089 // (shl x, 1) ==> (add x, x)
2090 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
2092 // (shl x (and y, 63)) ==> (shl x, y)
2093 def : Pat<(shl GR64:$src1, (and CL, 63)),
2094 (SHL64rCL GR64:$src1)>;
2095 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2096 (SHL64mCL addr:$dst)>;
2098 def : Pat<(srl GR64:$src1, (and CL, 63)),
2099 (SHR64rCL GR64:$src1)>;
2100 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2101 (SHR64mCL addr:$dst)>;
2103 def : Pat<(sra GR64:$src1, (and CL, 63)),
2104 (SAR64rCL GR64:$src1)>;
2105 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
2106 (SAR64mCL addr:$dst)>;
2108 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
2109 let AddedComplexity = 5 in { // Try this before the selecting to OR
2110 def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
2111 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2112 def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
2113 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2114 def : Pat<(or_is_add GR64:$src1, GR64:$src2),
2115 (ADD64rr GR64:$src1, GR64:$src2)>;
2116 } // AddedComplexity
2118 // X86 specific add which produces a flag.
2119 def : Pat<(addc GR64:$src1, GR64:$src2),
2120 (ADD64rr GR64:$src1, GR64:$src2)>;
2121 def : Pat<(addc GR64:$src1, (load addr:$src2)),
2122 (ADD64rm GR64:$src1, addr:$src2)>;
2123 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
2124 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2125 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
2126 (ADD64ri32 GR64:$src1, imm:$src2)>;
2128 def : Pat<(subc GR64:$src1, GR64:$src2),
2129 (SUB64rr GR64:$src1, GR64:$src2)>;
2130 def : Pat<(subc GR64:$src1, (load addr:$src2)),
2131 (SUB64rm GR64:$src1, addr:$src2)>;
2132 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
2133 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2134 def : Pat<(subc GR64:$src1, imm:$src2),
2135 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2137 //===----------------------------------------------------------------------===//
2138 // EFLAGS-defining Patterns
2139 //===----------------------------------------------------------------------===//
2142 def : Pat<(add GR64:$src1, GR64:$src2),
2143 (ADD64rr GR64:$src1, GR64:$src2)>;
2144 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
2145 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2146 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
2147 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2148 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2149 (ADD64rm GR64:$src1, addr:$src2)>;
2152 def : Pat<(sub GR64:$src1, GR64:$src2),
2153 (SUB64rr GR64:$src1, GR64:$src2)>;
2154 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2155 (SUB64rm GR64:$src1, addr:$src2)>;
2156 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
2157 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2158 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2159 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2162 def : Pat<(mul GR64:$src1, GR64:$src2),
2163 (IMUL64rr GR64:$src1, GR64:$src2)>;
2164 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2165 (IMUL64rm GR64:$src1, addr:$src2)>;
2166 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
2167 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2168 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2169 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2170 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2171 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2172 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2173 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2176 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2177 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2178 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2179 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2180 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2181 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2184 def : Pat<(or GR64:$src1, GR64:$src2),
2185 (OR64rr GR64:$src1, GR64:$src2)>;
2186 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2187 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2188 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2189 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2190 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2191 (OR64rm GR64:$src1, addr:$src2)>;
2194 def : Pat<(xor GR64:$src1, GR64:$src2),
2195 (XOR64rr GR64:$src1, GR64:$src2)>;
2196 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2197 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2198 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2199 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2200 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2201 (XOR64rm GR64:$src1, addr:$src2)>;
2204 def : Pat<(and GR64:$src1, GR64:$src2),
2205 (AND64rr GR64:$src1, GR64:$src2)>;
2206 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2207 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2208 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2209 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2210 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2211 (AND64rm GR64:$src1, addr:$src2)>;
2213 //===----------------------------------------------------------------------===//
2214 // X86-64 SSE Instructions
2215 //===----------------------------------------------------------------------===//
2217 // Move instructions...
2219 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2220 "mov{d|q}\t{$src, $dst|$dst, $src}",
2222 (v2i64 (scalar_to_vector GR64:$src)))]>;
2223 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2224 "mov{d|q}\t{$src, $dst|$dst, $src}",
2225 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2228 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2229 "mov{d|q}\t{$src, $dst|$dst, $src}",
2230 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2231 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2232 "movq\t{$src, $dst|$dst, $src}",
2233 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2235 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2236 "mov{d|q}\t{$src, $dst|$dst, $src}",
2237 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2238 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2239 "movq\t{$src, $dst|$dst, $src}",
2240 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;