1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
23 // 64-bits but only 32 bits are significant, and those bits are treated as being
25 def i64i32imm_pcrel : Operand<i64> {
26 let PrintMethod = "print_pcrel_imm";
30 // 64-bits but only 8 bits are significant.
31 def i64i8imm : Operand<i64> {
32 let ParserMatchClass = ImmSExt8AsmOperand;
35 def lea64mem : Operand<i64> {
36 let PrintMethod = "printlea64mem";
37 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
38 let ParserMatchClass = X86MemAsmOperand;
41 def lea64_32mem : Operand<i32> {
42 let PrintMethod = "printlea64_32mem";
43 let AsmOperandLowerMethod = "lower_lea64_32mem";
44 let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
45 let ParserMatchClass = X86MemAsmOperand;
48 //===----------------------------------------------------------------------===//
49 // Complex Pattern Definitions.
51 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
52 [add, sub, mul, X86mul_imm, shl, or, frameindex,
55 def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
56 [tglobaltlsaddr], []>;
58 //===----------------------------------------------------------------------===//
62 def i64immSExt8 : PatLeaf<(i64 imm), [{
63 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
64 // sign extended field.
65 return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
68 def i64immSExt32 : PatLeaf<(i64 imm), [{
69 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
70 // sign extended field.
71 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
74 def i64immZExt32 : PatLeaf<(i64 imm), [{
75 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
76 // unsignedsign extended field.
77 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
80 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
81 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
82 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
84 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
85 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
86 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
87 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
89 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
90 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
91 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
92 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
94 //===----------------------------------------------------------------------===//
95 // Instruction list...
98 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
99 // a stack adjustment and the codegen must know that they may modify the stack
100 // pointer before prolog-epilog rewriting occurs.
101 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
102 // sub / add which can clobber EFLAGS.
103 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
104 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
106 [(X86callseq_start timm:$amt)]>,
107 Requires<[In64BitMode]>;
108 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
110 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
111 Requires<[In64BitMode]>;
114 // Interrupt Instructions
115 def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>;
117 //===----------------------------------------------------------------------===//
118 // Call Instructions...
121 // All calls clobber the non-callee saved registers. RSP is marked as
122 // a use to prevent stack-pointer assignments that appear immediately
123 // before calls from potentially appearing dead. Uses for argument
124 // registers are added manually.
125 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
126 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
127 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
128 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
129 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
132 // NOTE: this pattern doesn't match "X86call imm", because we do not know
133 // that the offset between an arbitrary immediate and the call will fit in
134 // the 32-bit pcrel field that we have.
135 def CALL64pcrel32 : Ii32<0xE8, RawFrm,
136 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
137 "call{q}\t$dst", []>,
138 Requires<[In64BitMode, NotWin64]>;
139 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
140 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
141 Requires<[NotWin64]>;
142 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
143 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
144 Requires<[NotWin64]>;
146 def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
147 "lcall{q}\t{*}$dst", []>;
150 // FIXME: We need to teach codegen about single list of call-clobbered
153 // All calls clobber the non-callee saved registers. RSP is marked as
154 // a use to prevent stack-pointer assignments that appear immediately
155 // before calls from potentially appearing dead. Uses for argument
156 // registers are added manually.
157 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
158 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
159 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
160 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
162 def WINCALL64pcrel32 : I<0xE8, RawFrm,
163 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
166 def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
168 [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
169 def WINCALL64m : I<0xFF, MRM2m, (outs),
170 (ins i64mem:$dst, variable_ops), "call\t{*}$dst",
171 [(X86call (loadi64 addr:$dst))]>,
176 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
177 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
179 "#TC_RETURN $dst $offset",
182 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
183 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
185 "#TC_RETURN $dst $offset",
189 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
190 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst),
191 "jmp{q}\t{*}$dst # TAILCALL",
195 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
196 def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
198 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
199 [(brind GR64:$dst)]>;
200 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
201 [(brind (loadi64 addr:$dst))]>;
202 def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
203 "ljmp{q}\t{*}$dst", []>;
206 //===----------------------------------------------------------------------===//
207 // EH Pseudo Instructions
209 let isTerminator = 1, isReturn = 1, isBarrier = 1,
211 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
212 "ret\t#eh_return, addr: $addr",
213 [(X86ehret GR64:$addr)]>;
217 //===----------------------------------------------------------------------===//
218 // Miscellaneous Instructions...
221 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
222 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
223 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
224 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
226 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
227 def LEAVE64 : I<0xC9, RawFrm,
228 (outs), (ins), "leave", []>;
229 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
231 def POP64r : I<0x58, AddRegFrm,
232 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
233 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
234 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
236 let mayStore = 1 in {
237 def PUSH64r : I<0x50, AddRegFrm,
238 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
239 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
240 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
244 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
245 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
246 "push{q}\t$imm", []>;
247 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
248 "push{q}\t$imm", []>;
249 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
250 "push{q}\t$imm", []>;
253 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
254 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf{q}", []>, REX_W;
255 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
256 def PUSHFQ64 : I<0x9C, RawFrm, (outs), (ins), "pushf{q}", []>;
258 def LEA64_32r : I<0x8D, MRMSrcMem,
259 (outs GR32:$dst), (ins lea64_32mem:$src),
260 "lea{l}\t{$src|$dst}, {$dst|$src}",
261 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
263 let isReMaterializable = 1 in
264 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
265 "lea{q}\t{$src|$dst}, {$dst|$src}",
266 [(set GR64:$dst, lea64addr:$src)]>;
268 let isTwoAddress = 1 in
269 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
271 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
273 // Bit scan instructions.
274 let Defs = [EFLAGS] in {
275 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
276 "bsf{q}\t{$src, $dst|$dst, $src}",
277 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
278 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
279 "bsf{q}\t{$src, $dst|$dst, $src}",
280 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
281 (implicit EFLAGS)]>, TB;
283 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
284 "bsr{q}\t{$src, $dst|$dst, $src}",
285 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
286 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
287 "bsr{q}\t{$src, $dst|$dst, $src}",
288 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
289 (implicit EFLAGS)]>, TB;
293 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
294 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
295 [(X86rep_movs i64)]>, REP;
296 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
297 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
298 [(X86rep_stos i64)]>, REP;
300 def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scas{q}", []>;
302 def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmps{q}", []>;
304 // Fast system-call instructions
305 def SYSEXIT64 : RI<0x35, RawFrm,
306 (outs), (ins), "sysexit", []>, TB;
308 //===----------------------------------------------------------------------===//
309 // Move Instructions...
312 let neverHasSideEffects = 1 in
313 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
314 "mov{q}\t{$src, $dst|$dst, $src}", []>;
316 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
317 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
318 "movabs{q}\t{$src, $dst|$dst, $src}",
319 [(set GR64:$dst, imm:$src)]>;
320 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
321 "mov{q}\t{$src, $dst|$dst, $src}",
322 [(set GR64:$dst, i64immSExt32:$src)]>;
325 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
326 "mov{q}\t{$src, $dst|$dst, $src}", []>;
328 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
329 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
330 "mov{q}\t{$src, $dst|$dst, $src}",
331 [(set GR64:$dst, (load addr:$src))]>;
333 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
334 "mov{q}\t{$src, $dst|$dst, $src}",
335 [(store GR64:$src, addr:$dst)]>;
336 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
337 "mov{q}\t{$src, $dst|$dst, $src}",
338 [(store i64immSExt32:$src, addr:$dst)]>;
340 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
341 "mov{q}\t{$src, %rax|%rax, $src}", []>;
342 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
343 "mov{q}\t{$src, %rax|%rax, $src}", []>;
344 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
345 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
346 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
347 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
349 // Moves to and from segment registers
350 def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
351 "mov{q}\t{$src, $dst|$dst, $src}", []>;
352 def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
353 "mov{q}\t{$src, $dst|$dst, $src}", []>;
354 def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
355 "mov{q}\t{$src, $dst|$dst, $src}", []>;
356 def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
357 "mov{q}\t{$src, $dst|$dst, $src}", []>;
359 // Moves to and from debug registers
360 def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
361 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
362 def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
363 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
365 // Moves to and from control registers
366 def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG_64:$src),
367 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
368 def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_64:$dst), (ins GR64:$src),
369 "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
371 // Sign/Zero extenders
373 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
374 // operand, which makes it a rare instruction with an 8-bit register
375 // operand that can never access an h register. If support for h registers
376 // were generalized, this would require a special register class.
377 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
378 "movs{bq|x}\t{$src, $dst|$dst, $src}",
379 [(set GR64:$dst, (sext GR8:$src))]>, TB;
380 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
381 "movs{bq|x}\t{$src, $dst|$dst, $src}",
382 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
383 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
384 "movs{wq|x}\t{$src, $dst|$dst, $src}",
385 [(set GR64:$dst, (sext GR16:$src))]>, TB;
386 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
387 "movs{wq|x}\t{$src, $dst|$dst, $src}",
388 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
389 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
390 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
391 [(set GR64:$dst, (sext GR32:$src))]>;
392 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
393 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
394 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
396 // movzbq and movzwq encodings for the disassembler
397 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
398 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
399 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
400 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
401 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
402 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
403 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
404 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
406 // Use movzbl instead of movzbq when the destination is a register; it's
407 // equivalent due to implicit zero-extending, and it has a smaller encoding.
408 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
409 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
410 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
411 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
412 // Use movzwl instead of movzwq when the destination is a register; it's
413 // equivalent due to implicit zero-extending, and it has a smaller encoding.
414 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
415 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
416 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
417 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
419 // There's no movzlq instruction, but movl can be used for this purpose, using
420 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
421 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
422 // zero-extension, however this isn't possible when the 32-bit value is
423 // defined by a truncate or is copied from something where the high bits aren't
424 // necessarily all zero. In such cases, we fall back to these explicit zext
426 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
427 "", [(set GR64:$dst, (zext GR32:$src))]>;
428 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
429 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
431 // Any instruction that defines a 32-bit result leaves the high half of the
432 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
433 // be copying from a truncate. And x86's cmov doesn't do anything if the
434 // condition is false. But any other 32-bit operation will zero-extend
436 def def32 : PatLeaf<(i32 GR32:$src), [{
437 return N->getOpcode() != ISD::TRUNCATE &&
438 N->getOpcode() != TargetInstrInfo::EXTRACT_SUBREG &&
439 N->getOpcode() != ISD::CopyFromReg &&
440 N->getOpcode() != X86ISD::CMOV;
443 // In the case of a 32-bit def that is known to implicitly zero-extend,
444 // we can use a SUBREG_TO_REG.
445 def : Pat<(i64 (zext def32:$src)),
446 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
448 let neverHasSideEffects = 1 in {
449 let Defs = [RAX], Uses = [EAX] in
450 def CDQE : RI<0x98, RawFrm, (outs), (ins),
451 "{cltq|cdqe}", []>; // RAX = signext(EAX)
453 let Defs = [RAX,RDX], Uses = [RAX] in
454 def CQO : RI<0x99, RawFrm, (outs), (ins),
455 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
458 //===----------------------------------------------------------------------===//
459 // Arithmetic Instructions...
462 let Defs = [EFLAGS] in {
464 def ADD64i32 : RI<0x05, RawFrm, (outs), (ins i32imm:$src),
465 "add{q}\t{$src, %rax|%rax, $src}", []>;
467 let isTwoAddress = 1 in {
468 let isConvertibleToThreeAddress = 1 in {
469 let isCommutable = 1 in
470 // Register-Register Addition
471 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
472 (ins GR64:$src1, GR64:$src2),
473 "add{q}\t{$src2, $dst|$dst, $src2}",
474 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
477 // Register-Integer Addition
478 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
479 (ins GR64:$src1, i64i8imm:$src2),
480 "add{q}\t{$src2, $dst|$dst, $src2}",
481 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
483 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
484 (ins GR64:$src1, i64i32imm:$src2),
485 "add{q}\t{$src2, $dst|$dst, $src2}",
486 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
488 } // isConvertibleToThreeAddress
490 // Register-Memory Addition
491 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
492 (ins GR64:$src1, i64mem:$src2),
493 "add{q}\t{$src2, $dst|$dst, $src2}",
494 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
497 // Register-Register Addition - Equivalent to the normal rr form (ADD64rr), but
498 // differently encoded.
499 def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst),
500 (ins GR64:$src1, GR64:$src2),
501 "add{l}\t{$src2, $dst|$dst, $src2}", []>;
505 // Memory-Register Addition
506 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
507 "add{q}\t{$src2, $dst|$dst, $src2}",
508 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
510 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
511 "add{q}\t{$src2, $dst|$dst, $src2}",
512 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
514 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
515 "add{q}\t{$src2, $dst|$dst, $src2}",
516 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
519 let Uses = [EFLAGS] in {
521 def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src),
522 "adc{q}\t{$src, %rax|%rax, $src}", []>;
524 let isTwoAddress = 1 in {
525 let isCommutable = 1 in
526 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
527 (ins GR64:$src1, GR64:$src2),
528 "adc{q}\t{$src2, $dst|$dst, $src2}",
529 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
531 def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
532 (ins GR64:$src1, GR64:$src2),
533 "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
535 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
536 (ins GR64:$src1, i64mem:$src2),
537 "adc{q}\t{$src2, $dst|$dst, $src2}",
538 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
540 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
541 (ins GR64:$src1, i64i8imm:$src2),
542 "adc{q}\t{$src2, $dst|$dst, $src2}",
543 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
544 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
545 (ins GR64:$src1, i64i32imm:$src2),
546 "adc{q}\t{$src2, $dst|$dst, $src2}",
547 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
550 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
551 "adc{q}\t{$src2, $dst|$dst, $src2}",
552 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
553 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
554 "adc{q}\t{$src2, $dst|$dst, $src2}",
555 [(store (adde (load addr:$dst), i64immSExt8:$src2),
557 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
558 "adc{q}\t{$src2, $dst|$dst, $src2}",
559 [(store (adde (load addr:$dst), i64immSExt8:$src2),
563 let isTwoAddress = 1 in {
564 // Register-Register Subtraction
565 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
566 (ins GR64:$src1, GR64:$src2),
567 "sub{q}\t{$src2, $dst|$dst, $src2}",
568 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
571 def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
572 (ins GR64:$src1, GR64:$src2),
573 "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
575 // Register-Memory Subtraction
576 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
577 (ins GR64:$src1, i64mem:$src2),
578 "sub{q}\t{$src2, $dst|$dst, $src2}",
579 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
582 // Register-Integer Subtraction
583 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
584 (ins GR64:$src1, i64i8imm:$src2),
585 "sub{q}\t{$src2, $dst|$dst, $src2}",
586 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
588 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
589 (ins GR64:$src1, i64i32imm:$src2),
590 "sub{q}\t{$src2, $dst|$dst, $src2}",
591 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
595 def SUB64i32 : RI<0x2D, RawFrm, (outs), (ins i32imm:$src),
596 "sub{q}\t{$src, %rax|%rax, $src}", []>;
598 // Memory-Register Subtraction
599 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
600 "sub{q}\t{$src2, $dst|$dst, $src2}",
601 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
604 // Memory-Integer Subtraction
605 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
606 "sub{q}\t{$src2, $dst|$dst, $src2}",
607 [(store (sub (load addr:$dst), i64immSExt8:$src2),
610 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
611 "sub{q}\t{$src2, $dst|$dst, $src2}",
612 [(store (sub (load addr:$dst), i64immSExt32:$src2),
616 let Uses = [EFLAGS] in {
617 let isTwoAddress = 1 in {
618 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
619 (ins GR64:$src1, GR64:$src2),
620 "sbb{q}\t{$src2, $dst|$dst, $src2}",
621 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
623 def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
624 (ins GR64:$src1, GR64:$src2),
625 "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
627 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
628 (ins GR64:$src1, i64mem:$src2),
629 "sbb{q}\t{$src2, $dst|$dst, $src2}",
630 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
632 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
633 (ins GR64:$src1, i64i8imm:$src2),
634 "sbb{q}\t{$src2, $dst|$dst, $src2}",
635 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
636 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
637 (ins GR64:$src1, i64i32imm:$src2),
638 "sbb{q}\t{$src2, $dst|$dst, $src2}",
639 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
642 def SBB64i32 : RI<0x1D, RawFrm, (outs), (ins i32imm:$src),
643 "sbb{q}\t{$src, %rax|%rax, $src}", []>;
645 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
646 "sbb{q}\t{$src2, $dst|$dst, $src2}",
647 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
648 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
649 "sbb{q}\t{$src2, $dst|$dst, $src2}",
650 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
651 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
652 "sbb{q}\t{$src2, $dst|$dst, $src2}",
653 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
657 // Unsigned multiplication
658 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
659 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
660 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
662 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
663 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
665 // Signed multiplication
666 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
667 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
669 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
670 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
673 let Defs = [EFLAGS] in {
674 let isTwoAddress = 1 in {
675 let isCommutable = 1 in
676 // Register-Register Signed Integer Multiplication
677 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
678 (ins GR64:$src1, GR64:$src2),
679 "imul{q}\t{$src2, $dst|$dst, $src2}",
680 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
681 (implicit EFLAGS)]>, TB;
683 // Register-Memory Signed Integer Multiplication
684 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
685 (ins GR64:$src1, i64mem:$src2),
686 "imul{q}\t{$src2, $dst|$dst, $src2}",
687 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
688 (implicit EFLAGS)]>, TB;
691 // Suprisingly enough, these are not two address instructions!
693 // Register-Integer Signed Integer Multiplication
694 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
695 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
696 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
697 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
699 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
700 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
701 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
702 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
705 // Memory-Integer Signed Integer Multiplication
706 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
707 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
708 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
709 [(set GR64:$dst, (mul (load addr:$src1),
712 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
713 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
714 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
715 [(set GR64:$dst, (mul (load addr:$src1),
716 i64immSExt32:$src2)),
720 // Unsigned division / remainder
721 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
722 // RDX:RAX/r64 = RAX,RDX
723 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
725 // Signed division / remainder
726 // RDX:RAX/r64 = RAX,RDX
727 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
728 "idiv{q}\t$src", []>;
730 // RDX:RAX/[mem64] = RAX,RDX
731 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
733 // RDX:RAX/[mem64] = RAX,RDX
734 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
735 "idiv{q}\t$src", []>;
739 // Unary instructions
740 let Defs = [EFLAGS], CodeSize = 2 in {
741 let isTwoAddress = 1 in
742 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
743 [(set GR64:$dst, (ineg GR64:$src)),
745 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
746 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
749 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
750 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
751 [(set GR64:$dst, (add GR64:$src, 1)),
753 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
754 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
757 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
758 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
759 [(set GR64:$dst, (add GR64:$src, -1)),
761 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
762 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
765 // In 64-bit mode, single byte INC and DEC cannot be encoded.
766 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
767 // Can transform into LEA.
768 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
770 [(set GR16:$dst, (add GR16:$src, 1)),
772 OpSize, Requires<[In64BitMode]>;
773 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
775 [(set GR32:$dst, (add GR32:$src, 1)),
777 Requires<[In64BitMode]>;
778 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
780 [(set GR16:$dst, (add GR16:$src, -1)),
782 OpSize, Requires<[In64BitMode]>;
783 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
785 [(set GR32:$dst, (add GR32:$src, -1)),
787 Requires<[In64BitMode]>;
788 } // isConvertibleToThreeAddress
790 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
791 // how to unfold them.
792 let isTwoAddress = 0, CodeSize = 2 in {
793 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
794 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
796 OpSize, Requires<[In64BitMode]>;
797 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
798 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
800 Requires<[In64BitMode]>;
801 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
802 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
804 OpSize, Requires<[In64BitMode]>;
805 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
806 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
808 Requires<[In64BitMode]>;
810 } // Defs = [EFLAGS], CodeSize
813 let Defs = [EFLAGS] in {
814 // Shift instructions
815 let isTwoAddress = 1 in {
817 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
818 "shl{q}\t{%cl, $dst|$dst, %CL}",
819 [(set GR64:$dst, (shl GR64:$src, CL))]>;
820 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
821 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
822 (ins GR64:$src1, i8imm:$src2),
823 "shl{q}\t{$src2, $dst|$dst, $src2}",
824 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
825 // NOTE: We don't include patterns for shifts of a register by one, because
826 // 'add reg,reg' is cheaper.
827 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
832 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
833 "shl{q}\t{%cl, $dst|$dst, %CL}",
834 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
835 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
836 "shl{q}\t{$src, $dst|$dst, $src}",
837 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
838 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
840 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
842 let isTwoAddress = 1 in {
844 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
845 "shr{q}\t{%cl, $dst|$dst, %CL}",
846 [(set GR64:$dst, (srl GR64:$src, CL))]>;
847 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
848 "shr{q}\t{$src2, $dst|$dst, $src2}",
849 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
850 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
852 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
856 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
857 "shr{q}\t{%cl, $dst|$dst, %CL}",
858 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
859 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
860 "shr{q}\t{$src, $dst|$dst, $src}",
861 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
862 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
864 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
866 let isTwoAddress = 1 in {
868 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
869 "sar{q}\t{%cl, $dst|$dst, %CL}",
870 [(set GR64:$dst, (sra GR64:$src, CL))]>;
871 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
872 (ins GR64:$src1, i8imm:$src2),
873 "sar{q}\t{$src2, $dst|$dst, $src2}",
874 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
875 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
877 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
881 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
882 "sar{q}\t{%cl, $dst|$dst, %CL}",
883 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
884 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
885 "sar{q}\t{$src, $dst|$dst, $src}",
886 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
887 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
889 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
891 // Rotate instructions
893 let isTwoAddress = 1 in {
894 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
895 "rcl{q}\t{1, $dst|$dst, 1}", []>;
896 def RCL64m1 : RI<0xD1, MRM2m, (outs i64mem:$dst), (ins i64mem:$src),
897 "rcl{q}\t{1, $dst|$dst, 1}", []>;
899 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
900 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
901 def RCL64mCL : RI<0xD3, MRM2m, (outs i64mem:$dst), (ins i64mem:$src),
902 "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
904 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
905 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
906 def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst),
907 (ins i64mem:$src, i8imm:$cnt),
908 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
910 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
911 "rcr{q}\t{1, $dst|$dst, 1}", []>;
912 def RCR64m1 : RI<0xD1, MRM3m, (outs i64mem:$dst), (ins i64mem:$src),
913 "rcr{q}\t{1, $dst|$dst, 1}", []>;
915 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
916 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
917 def RCR64mCL : RI<0xD3, MRM3m, (outs i64mem:$dst), (ins i64mem:$src),
918 "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
920 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
921 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
922 def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst),
923 (ins i64mem:$src, i8imm:$cnt),
924 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
927 let isTwoAddress = 1 in {
929 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
930 "rol{q}\t{%cl, $dst|$dst, %CL}",
931 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
932 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
933 (ins GR64:$src1, i8imm:$src2),
934 "rol{q}\t{$src2, $dst|$dst, $src2}",
935 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
936 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
938 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
942 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
943 "rol{q}\t{%cl, $dst|$dst, %CL}",
944 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
945 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
946 "rol{q}\t{$src, $dst|$dst, $src}",
947 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
948 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
950 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
952 let isTwoAddress = 1 in {
954 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
955 "ror{q}\t{%cl, $dst|$dst, %CL}",
956 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
957 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
958 (ins GR64:$src1, i8imm:$src2),
959 "ror{q}\t{$src2, $dst|$dst, $src2}",
960 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
961 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
963 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
967 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
968 "ror{q}\t{%cl, $dst|$dst, %CL}",
969 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
970 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
971 "ror{q}\t{$src, $dst|$dst, $src}",
972 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
973 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
975 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
977 // Double shift instructions (generalizations of rotate)
978 let isTwoAddress = 1 in {
980 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
981 (ins GR64:$src1, GR64:$src2),
982 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
983 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
985 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
986 (ins GR64:$src1, GR64:$src2),
987 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
988 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
992 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
993 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
995 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
996 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
997 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
1000 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
1002 (ins GR64:$src1, GR64:$src2, i8imm:$src3),
1003 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1004 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
1010 let Uses = [CL] in {
1011 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1012 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1013 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
1015 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1016 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1017 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
1020 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
1021 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1022 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1023 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
1024 (i8 imm:$src3)), addr:$dst)]>,
1026 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
1027 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
1028 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1029 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
1030 (i8 imm:$src3)), addr:$dst)]>,
1032 } // Defs = [EFLAGS]
1034 //===----------------------------------------------------------------------===//
1035 // Logical Instructions...
1038 let isTwoAddress = 1 , AddedComplexity = 15 in
1039 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
1040 [(set GR64:$dst, (not GR64:$src))]>;
1041 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
1042 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
1044 let Defs = [EFLAGS] in {
1045 def AND64i32 : RI<0x25, RawFrm, (outs), (ins i32imm:$src),
1046 "and{q}\t{$src, %rax|%rax, $src}", []>;
1048 let isTwoAddress = 1 in {
1049 let isCommutable = 1 in
1050 def AND64rr : RI<0x21, MRMDestReg,
1051 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1052 "and{q}\t{$src2, $dst|$dst, $src2}",
1053 [(set GR64:$dst, (and GR64:$src1, GR64:$src2)),
1054 (implicit EFLAGS)]>;
1055 def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
1056 (ins GR64:$src1, GR64:$src2),
1057 "and{q}\t{$src2, $dst|$dst, $src2}", []>;
1058 def AND64rm : RI<0x23, MRMSrcMem,
1059 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1060 "and{q}\t{$src2, $dst|$dst, $src2}",
1061 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))),
1062 (implicit EFLAGS)]>;
1063 def AND64ri8 : RIi8<0x83, MRM4r,
1064 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1065 "and{q}\t{$src2, $dst|$dst, $src2}",
1066 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)),
1067 (implicit EFLAGS)]>;
1068 def AND64ri32 : RIi32<0x81, MRM4r,
1069 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1070 "and{q}\t{$src2, $dst|$dst, $src2}",
1071 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)),
1072 (implicit EFLAGS)]>;
1075 def AND64mr : RI<0x21, MRMDestMem,
1076 (outs), (ins i64mem:$dst, GR64:$src),
1077 "and{q}\t{$src, $dst|$dst, $src}",
1078 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
1079 (implicit EFLAGS)]>;
1080 def AND64mi8 : RIi8<0x83, MRM4m,
1081 (outs), (ins i64mem:$dst, i64i8imm :$src),
1082 "and{q}\t{$src, $dst|$dst, $src}",
1083 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
1084 (implicit EFLAGS)]>;
1085 def AND64mi32 : RIi32<0x81, MRM4m,
1086 (outs), (ins i64mem:$dst, i64i32imm:$src),
1087 "and{q}\t{$src, $dst|$dst, $src}",
1088 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1089 (implicit EFLAGS)]>;
1091 let isTwoAddress = 1 in {
1092 let isCommutable = 1 in
1093 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
1094 (ins GR64:$src1, GR64:$src2),
1095 "or{q}\t{$src2, $dst|$dst, $src2}",
1096 [(set GR64:$dst, (or GR64:$src1, GR64:$src2)),
1097 (implicit EFLAGS)]>;
1098 def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
1099 (ins GR64:$src1, GR64:$src2),
1100 "or{q}\t{$src2, $dst|$dst, $src2}", []>;
1101 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
1102 (ins GR64:$src1, i64mem:$src2),
1103 "or{q}\t{$src2, $dst|$dst, $src2}",
1104 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))),
1105 (implicit EFLAGS)]>;
1106 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
1107 (ins GR64:$src1, i64i8imm:$src2),
1108 "or{q}\t{$src2, $dst|$dst, $src2}",
1109 [(set GR64:$dst, (or_not_add GR64:$src1, i64immSExt8:$src2)),
1110 (implicit EFLAGS)]>;
1111 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
1112 (ins GR64:$src1, i64i32imm:$src2),
1113 "or{q}\t{$src2, $dst|$dst, $src2}",
1114 [(set GR64:$dst, (or_not_add GR64:$src1, i64immSExt32:$src2)),
1115 (implicit EFLAGS)]>;
1118 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1119 "or{q}\t{$src, $dst|$dst, $src}",
1120 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
1121 (implicit EFLAGS)]>;
1122 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
1123 "or{q}\t{$src, $dst|$dst, $src}",
1124 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
1125 (implicit EFLAGS)]>;
1126 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1127 "or{q}\t{$src, $dst|$dst, $src}",
1128 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1129 (implicit EFLAGS)]>;
1131 def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i32imm:$src),
1132 "or{q}\t{$src, %rax|%rax, $src}", []>;
1134 let isTwoAddress = 1 in {
1135 let isCommutable = 1 in
1136 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
1137 (ins GR64:$src1, GR64:$src2),
1138 "xor{q}\t{$src2, $dst|$dst, $src2}",
1139 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)),
1140 (implicit EFLAGS)]>;
1141 def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
1142 (ins GR64:$src1, GR64:$src2),
1143 "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
1144 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
1145 (ins GR64:$src1, i64mem:$src2),
1146 "xor{q}\t{$src2, $dst|$dst, $src2}",
1147 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))),
1148 (implicit EFLAGS)]>;
1149 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
1150 (ins GR64:$src1, i64i8imm:$src2),
1151 "xor{q}\t{$src2, $dst|$dst, $src2}",
1152 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)),
1153 (implicit EFLAGS)]>;
1154 def XOR64ri32 : RIi32<0x81, MRM6r,
1155 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1156 "xor{q}\t{$src2, $dst|$dst, $src2}",
1157 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)),
1158 (implicit EFLAGS)]>;
1161 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1162 "xor{q}\t{$src, $dst|$dst, $src}",
1163 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
1164 (implicit EFLAGS)]>;
1165 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
1166 "xor{q}\t{$src, $dst|$dst, $src}",
1167 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
1168 (implicit EFLAGS)]>;
1169 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1170 "xor{q}\t{$src, $dst|$dst, $src}",
1171 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
1172 (implicit EFLAGS)]>;
1174 def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i32imm:$src),
1175 "xor{q}\t{$src, %rax|%rax, $src}", []>;
1177 } // Defs = [EFLAGS]
1179 //===----------------------------------------------------------------------===//
1180 // Comparison Instructions...
1183 // Integer comparison
1184 let Defs = [EFLAGS] in {
1185 def TEST64i32 : RI<0xa9, RawFrm, (outs), (ins i32imm:$src),
1186 "test{q}\t{$src, %rax|%rax, $src}", []>;
1187 let isCommutable = 1 in
1188 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1189 "test{q}\t{$src2, $src1|$src1, $src2}",
1190 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
1191 (implicit EFLAGS)]>;
1192 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1193 "test{q}\t{$src2, $src1|$src1, $src2}",
1194 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
1195 (implicit EFLAGS)]>;
1196 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
1197 (ins GR64:$src1, i64i32imm:$src2),
1198 "test{q}\t{$src2, $src1|$src1, $src2}",
1199 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
1200 (implicit EFLAGS)]>;
1201 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
1202 (ins i64mem:$src1, i64i32imm:$src2),
1203 "test{q}\t{$src2, $src1|$src1, $src2}",
1204 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
1205 (implicit EFLAGS)]>;
1208 def CMP64i32 : RI<0x3D, RawFrm, (outs), (ins i32imm:$src),
1209 "cmp{q}\t{$src, %rax|%rax, $src}", []>;
1210 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1211 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1212 [(X86cmp GR64:$src1, GR64:$src2),
1213 (implicit EFLAGS)]>;
1214 def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
1215 "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
1216 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1217 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1218 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
1219 (implicit EFLAGS)]>;
1220 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1221 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1222 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
1223 (implicit EFLAGS)]>;
1224 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1225 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1226 [(X86cmp GR64:$src1, i64immSExt8:$src2),
1227 (implicit EFLAGS)]>;
1228 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1229 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1230 [(X86cmp GR64:$src1, i64immSExt32:$src2),
1231 (implicit EFLAGS)]>;
1232 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1233 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1234 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
1235 (implicit EFLAGS)]>;
1236 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1237 (ins i64mem:$src1, i64i32imm:$src2),
1238 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1239 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
1240 (implicit EFLAGS)]>;
1241 } // Defs = [EFLAGS]
1244 // TODO: BTC, BTR, and BTS
1245 let Defs = [EFLAGS] in {
1246 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1247 "bt{q}\t{$src2, $src1|$src1, $src2}",
1248 [(X86bt GR64:$src1, GR64:$src2),
1249 (implicit EFLAGS)]>, TB;
1251 // Unlike with the register+register form, the memory+register form of the
1252 // bt instruction does not ignore the high bits of the index. From ISel's
1253 // perspective, this is pretty bizarre. Disable these instructions for now.
1254 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1255 "bt{q}\t{$src2, $src1|$src1, $src2}",
1256 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1257 // (implicit EFLAGS)]
1261 def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1262 "bt{q}\t{$src2, $src1|$src1, $src2}",
1263 [(X86bt GR64:$src1, i64immSExt8:$src2),
1264 (implicit EFLAGS)]>, TB;
1265 // Note that these instructions don't need FastBTMem because that
1266 // only applies when the other operand is in a register. When it's
1267 // an immediate, bt is still fast.
1268 def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1269 "bt{q}\t{$src2, $src1|$src1, $src2}",
1270 [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2),
1271 (implicit EFLAGS)]>, TB;
1273 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1274 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1275 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1276 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1277 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1278 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1279 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1280 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1282 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1283 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1284 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1285 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1286 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1287 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1288 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1289 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1291 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1292 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1293 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1294 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1295 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1296 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1297 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1298 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1299 } // Defs = [EFLAGS]
1301 // Conditional moves
1302 let Uses = [EFLAGS], isTwoAddress = 1 in {
1303 let isCommutable = 1 in {
1304 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1305 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1306 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1307 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1308 X86_COND_B, EFLAGS))]>, TB;
1309 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1310 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1311 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1312 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1313 X86_COND_AE, EFLAGS))]>, TB;
1314 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1315 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1316 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1317 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1318 X86_COND_E, EFLAGS))]>, TB;
1319 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1320 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1321 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1322 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1323 X86_COND_NE, EFLAGS))]>, TB;
1324 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1325 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1326 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1327 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1328 X86_COND_BE, EFLAGS))]>, TB;
1329 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1330 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1331 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1332 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1333 X86_COND_A, EFLAGS))]>, TB;
1334 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1335 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1336 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1337 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1338 X86_COND_L, EFLAGS))]>, TB;
1339 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1340 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1341 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1342 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1343 X86_COND_GE, EFLAGS))]>, TB;
1344 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1345 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1346 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1347 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1348 X86_COND_LE, EFLAGS))]>, TB;
1349 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1350 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1351 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1352 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1353 X86_COND_G, EFLAGS))]>, TB;
1354 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1355 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1356 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1357 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1358 X86_COND_S, EFLAGS))]>, TB;
1359 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1360 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1361 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1362 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1363 X86_COND_NS, EFLAGS))]>, TB;
1364 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1365 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1366 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1367 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1368 X86_COND_P, EFLAGS))]>, TB;
1369 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1370 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1371 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1372 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1373 X86_COND_NP, EFLAGS))]>, TB;
1374 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1375 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1376 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1377 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1378 X86_COND_O, EFLAGS))]>, TB;
1379 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1380 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1381 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1382 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1383 X86_COND_NO, EFLAGS))]>, TB;
1384 } // isCommutable = 1
1386 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1387 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1388 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1389 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1390 X86_COND_B, EFLAGS))]>, TB;
1391 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1392 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1393 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1394 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1395 X86_COND_AE, EFLAGS))]>, TB;
1396 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1397 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1398 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1399 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1400 X86_COND_E, EFLAGS))]>, TB;
1401 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1402 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1403 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1404 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1405 X86_COND_NE, EFLAGS))]>, TB;
1406 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1407 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1408 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1409 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1410 X86_COND_BE, EFLAGS))]>, TB;
1411 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1412 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1413 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1414 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1415 X86_COND_A, EFLAGS))]>, TB;
1416 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1417 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1418 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1419 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1420 X86_COND_L, EFLAGS))]>, TB;
1421 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1422 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1423 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1424 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1425 X86_COND_GE, EFLAGS))]>, TB;
1426 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1427 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1428 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1429 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1430 X86_COND_LE, EFLAGS))]>, TB;
1431 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1432 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1433 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1434 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1435 X86_COND_G, EFLAGS))]>, TB;
1436 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1437 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1438 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1439 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1440 X86_COND_S, EFLAGS))]>, TB;
1441 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1442 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1443 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1444 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1445 X86_COND_NS, EFLAGS))]>, TB;
1446 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1447 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1448 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1449 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1450 X86_COND_P, EFLAGS))]>, TB;
1451 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1452 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1453 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1454 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1455 X86_COND_NP, EFLAGS))]>, TB;
1456 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1457 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1458 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1459 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1460 X86_COND_O, EFLAGS))]>, TB;
1461 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1462 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1463 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1464 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1465 X86_COND_NO, EFLAGS))]>, TB;
1468 // Use sbb to materialize carry flag into a GPR.
1469 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
1470 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins),
1471 "sbb{q}\t$dst, $dst",
1472 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
1474 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1477 //===----------------------------------------------------------------------===//
1478 // Conversion Instructions...
1481 // f64 -> signed i64
1482 def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1483 "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
1484 def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1485 "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
1486 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1487 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1489 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1490 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst),
1492 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1493 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1494 (load addr:$src)))]>;
1495 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1496 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1497 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1498 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1499 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1500 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1501 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1502 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1504 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1505 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst),
1507 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1509 (int_x86_sse2_cvttsd2si64
1510 (load addr:$src)))]>;
1512 // Signed i64 -> f64
1513 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1514 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1515 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1516 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1517 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1518 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1520 let isTwoAddress = 1 in {
1521 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1522 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1523 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1525 (int_x86_sse2_cvtsi642sd VR128:$src1,
1527 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1528 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1529 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1531 (int_x86_sse2_cvtsi642sd VR128:$src1,
1532 (loadi64 addr:$src2)))]>;
1535 // Signed i64 -> f32
1536 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1537 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1538 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1539 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1540 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1541 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1543 let isTwoAddress = 1 in {
1544 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1545 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1546 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1548 (int_x86_sse_cvtsi642ss VR128:$src1,
1550 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1552 (ins VR128:$src1, i64mem:$src2),
1553 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1555 (int_x86_sse_cvtsi642ss VR128:$src1,
1556 (loadi64 addr:$src2)))]>;
1559 // f32 -> signed i64
1560 def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1561 "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
1562 def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1563 "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
1564 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1565 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1567 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1568 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1569 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1570 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1571 (load addr:$src)))]>;
1572 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1573 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1574 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1575 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1576 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1577 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1578 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1579 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1581 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1582 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst),
1584 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1586 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1588 // Descriptor-table support instructions
1590 // LLDT is not interpreted specially in 64-bit mode because there is no sign
1592 def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
1593 "sldt{q}\t$dst", []>, TB;
1594 def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
1595 "sldt{q}\t$dst", []>, TB;
1597 //===----------------------------------------------------------------------===//
1598 // Alias Instructions
1599 //===----------------------------------------------------------------------===//
1601 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1602 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1604 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
1605 // when we have a better way to specify isel priority.
1606 let AddedComplexity = 1 in
1608 (SUBREG_TO_REG (i64 0), (MOV32r0), x86_subreg_32bit)>;
1611 // Materialize i64 constant where top 32-bits are zero.
1612 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1613 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1614 "", [(set GR64:$dst, i64immZExt32:$src)]>;
1616 //===----------------------------------------------------------------------===//
1617 // Thread Local Storage Instructions
1618 //===----------------------------------------------------------------------===//
1620 // All calls clobber the non-callee saved registers. RSP is marked as
1621 // a use to prevent stack-pointer assignments that appear immediately
1622 // before calls from potentially appearing dead.
1623 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
1624 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
1625 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
1626 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
1627 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
1629 def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
1631 "leaq\t$sym(%rip), %rdi; "
1634 "call\t__tls_get_addr@PLT",
1635 [(X86tlsaddr tls64addr:$sym)]>,
1636 Requires<[In64BitMode]>;
1638 let AddedComplexity = 5, isCodeGenOnly = 1 in
1639 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1640 "movq\t%gs:$src, $dst",
1641 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1643 let AddedComplexity = 5, isCodeGenOnly = 1 in
1644 def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1645 "movq\t%fs:$src, $dst",
1646 [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
1648 //===----------------------------------------------------------------------===//
1649 // Atomic Instructions
1650 //===----------------------------------------------------------------------===//
1652 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1653 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1655 "cmpxchgq\t$swap,$ptr",
1656 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1659 let Constraints = "$val = $dst" in {
1660 let Defs = [EFLAGS] in
1661 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
1664 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1667 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),
1668 (ins GR64:$val,i64mem:$ptr),
1669 "xchg{q}\t{$val, $ptr|$ptr, $val}",
1670 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1672 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1673 "xchg{q}\t{$val, $src|$src, $val}", []>;
1676 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1677 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1678 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1679 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1681 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1682 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1683 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1684 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1686 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1687 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1688 "cmpxchg16b\t$dst", []>, TB;
1690 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1691 "xchg{q}\t{$src, %rax|%rax, $src}", []>;
1693 // Optimized codegen when the non-memory output is not used.
1694 let Defs = [EFLAGS] in {
1695 // FIXME: Use normal add / sub instructions and add lock prefix dynamically.
1696 def LOCK_ADD64mr : RI<0x03, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1698 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1699 def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
1700 (ins i64mem:$dst, i64i8imm :$src2),
1702 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1703 def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
1704 (ins i64mem:$dst, i64i32imm :$src2),
1706 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1707 def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
1709 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1710 def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
1711 (ins i64mem:$dst, i64i8imm :$src2),
1713 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1714 def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
1715 (ins i64mem:$dst, i64i32imm:$src2),
1717 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
1718 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
1720 "inc{q}\t$dst", []>, LOCK;
1721 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
1723 "dec{q}\t$dst", []>, LOCK;
1725 // Atomic exchange, and, or, xor
1726 let Constraints = "$val = $dst", Defs = [EFLAGS],
1727 usesCustomInserter = 1 in {
1728 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1729 "#ATOMAND64 PSEUDO!",
1730 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1731 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1732 "#ATOMOR64 PSEUDO!",
1733 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1734 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1735 "#ATOMXOR64 PSEUDO!",
1736 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1737 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1738 "#ATOMNAND64 PSEUDO!",
1739 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1740 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1741 "#ATOMMIN64 PSEUDO!",
1742 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1743 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1744 "#ATOMMAX64 PSEUDO!",
1745 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1746 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1747 "#ATOMUMIN64 PSEUDO!",
1748 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1749 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1750 "#ATOMUMAX64 PSEUDO!",
1751 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1754 // Segmentation support instructions
1756 // i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
1757 def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
1758 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1759 def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
1760 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
1762 def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1763 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1764 def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1765 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
1767 def SWPGS : I<0x01, RawFrm, (outs), (ins), "swpgs", []>, TB;
1769 def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
1770 "push{q}\t%fs", []>, TB;
1771 def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
1772 "push{q}\t%gs", []>, TB;
1774 def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
1775 "pop{q}\t%fs", []>, TB;
1776 def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
1777 "pop{q}\t%gs", []>, TB;
1779 def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1780 "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
1781 def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1782 "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1783 def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
1784 "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
1786 // Specialized register support
1788 // no m form encodable; use SMSW16m
1789 def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
1790 "smsw{q}\t$dst", []>, TB;
1792 // String manipulation instructions
1794 def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
1796 //===----------------------------------------------------------------------===//
1797 // Non-Instruction Patterns
1798 //===----------------------------------------------------------------------===//
1800 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
1801 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
1802 // 'movabs' predicate should handle this sort of thing.
1803 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1804 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
1805 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1806 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
1807 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1808 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
1809 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1810 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
1811 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1812 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
1814 // In static codegen with small code model, we can get the address of a label
1815 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
1816 // the MOV64ri64i32 should accept these.
1817 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1818 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
1819 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1820 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
1821 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1822 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
1823 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1824 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
1825 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1826 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
1828 // In kernel code model, we can get the address of a label
1829 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1830 // the MOV64ri32 should accept these.
1831 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1832 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1833 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1834 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1835 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1836 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1837 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1838 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1839 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1840 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1842 // If we have small model and -static mode, it is safe to store global addresses
1843 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1844 // for MOV64mi32 should handle this sort of thing.
1845 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1846 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1847 Requires<[NearData, IsStatic]>;
1848 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1849 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1850 Requires<[NearData, IsStatic]>;
1851 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1852 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1853 Requires<[NearData, IsStatic]>;
1854 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1855 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1856 Requires<[NearData, IsStatic]>;
1857 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1858 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1859 Requires<[NearData, IsStatic]>;
1862 // Direct PC relative function call for small code model. 32-bit displacement
1863 // sign extended to 64-bit.
1864 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1865 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
1866 def : Pat<(X86call (i64 texternalsym:$dst)),
1867 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
1869 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1870 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
1871 def : Pat<(X86call (i64 texternalsym:$dst)),
1872 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
1875 def : Pat<(X86tcret GR64:$dst, imm:$off),
1876 (TCRETURNri64 GR64:$dst, imm:$off)>;
1878 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1879 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>;
1881 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1882 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1886 // TEST R,R is smaller than CMP R,0
1887 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1888 (TEST64rr GR64:$src1, GR64:$src1)>;
1890 // Conditional moves with folded loads with operands swapped and conditions
1892 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1893 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1894 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1895 (CMOVB64rm GR64:$src2, addr:$src1)>;
1896 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1897 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1898 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1899 (CMOVE64rm GR64:$src2, addr:$src1)>;
1900 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
1901 (CMOVA64rm GR64:$src2, addr:$src1)>;
1902 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
1903 (CMOVBE64rm GR64:$src2, addr:$src1)>;
1904 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
1905 (CMOVGE64rm GR64:$src2, addr:$src1)>;
1906 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
1907 (CMOVL64rm GR64:$src2, addr:$src1)>;
1908 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
1909 (CMOVG64rm GR64:$src2, addr:$src1)>;
1910 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
1911 (CMOVLE64rm GR64:$src2, addr:$src1)>;
1912 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
1913 (CMOVNP64rm GR64:$src2, addr:$src1)>;
1914 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
1915 (CMOVP64rm GR64:$src2, addr:$src1)>;
1916 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
1917 (CMOVNS64rm GR64:$src2, addr:$src1)>;
1918 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
1919 (CMOVS64rm GR64:$src2, addr:$src1)>;
1920 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
1921 (CMOVNO64rm GR64:$src2, addr:$src1)>;
1922 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
1923 (CMOVO64rm GR64:$src2, addr:$src1)>;
1925 // zextload bool -> zextload byte
1926 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1929 // When extloading from 16-bit and smaller memory locations into 64-bit
1930 // registers, use zero-extending loads so that the entire 64-bit register is
1931 // defined, avoiding partial-register updates.
1932 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1933 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1934 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1935 // For other extloads, use subregs, since the high contents of the register are
1936 // defined after an extload.
1937 def : Pat<(extloadi64i32 addr:$src),
1938 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1941 // anyext. Define these to do an explicit zero-extend to
1942 // avoid partial-register updates.
1943 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1944 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1945 def : Pat<(i64 (anyext GR32:$src)),
1946 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
1948 //===----------------------------------------------------------------------===//
1950 //===----------------------------------------------------------------------===//
1952 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1953 // +128 doesn't, so in this special case use a sub instead of an add.
1954 def : Pat<(add GR64:$src1, 128),
1955 (SUB64ri8 GR64:$src1, -128)>;
1956 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1957 (SUB64mi8 addr:$dst, -128)>;
1959 // The same trick applies for 32-bit immediate fields in 64-bit
1961 def : Pat<(add GR64:$src1, 0x0000000080000000),
1962 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1963 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1964 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1966 // r & (2^32-1) ==> movz
1967 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1968 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1969 // r & (2^16-1) ==> movz
1970 def : Pat<(and GR64:$src, 0xffff),
1971 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1972 // r & (2^8-1) ==> movz
1973 def : Pat<(and GR64:$src, 0xff),
1974 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1975 // r & (2^8-1) ==> movz
1976 def : Pat<(and GR32:$src1, 0xff),
1977 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>,
1978 Requires<[In64BitMode]>;
1979 // r & (2^8-1) ==> movz
1980 def : Pat<(and GR16:$src1, 0xff),
1981 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
1982 Requires<[In64BitMode]>;
1984 // sext_inreg patterns
1985 def : Pat<(sext_inreg GR64:$src, i32),
1986 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1987 def : Pat<(sext_inreg GR64:$src, i16),
1988 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
1989 def : Pat<(sext_inreg GR64:$src, i8),
1990 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
1991 def : Pat<(sext_inreg GR32:$src, i8),
1992 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
1993 Requires<[In64BitMode]>;
1994 def : Pat<(sext_inreg GR16:$src, i8),
1995 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
1996 Requires<[In64BitMode]>;
1999 def : Pat<(i32 (trunc GR64:$src)),
2000 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>;
2001 def : Pat<(i16 (trunc GR64:$src)),
2002 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>;
2003 def : Pat<(i8 (trunc GR64:$src)),
2004 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>;
2005 def : Pat<(i8 (trunc GR32:$src)),
2006 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>,
2007 Requires<[In64BitMode]>;
2008 def : Pat<(i8 (trunc GR16:$src)),
2009 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>,
2010 Requires<[In64BitMode]>;
2012 // h-register tricks.
2013 // For now, be conservative on x86-64 and use an h-register extract only if the
2014 // value is immediately zero-extended or stored, which are somewhat common
2015 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
2016 // from being allocated in the same instruction as the h register, as there's
2017 // currently no way to describe this requirement to the register allocator.
2019 // h-register extract and zero-extend.
2020 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
2024 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2025 x86_subreg_8bit_hi)),
2027 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
2029 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2030 x86_subreg_8bit_hi))>,
2031 Requires<[In64BitMode]>;
2032 def : Pat<(srl GR16:$src, (i8 8)),
2035 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2036 x86_subreg_8bit_hi)),
2038 Requires<[In64BitMode]>;
2039 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
2041 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2042 x86_subreg_8bit_hi))>,
2043 Requires<[In64BitMode]>;
2044 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
2046 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2047 x86_subreg_8bit_hi))>,
2048 Requires<[In64BitMode]>;
2049 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
2053 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2054 x86_subreg_8bit_hi)),
2056 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
2060 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2061 x86_subreg_8bit_hi)),
2064 // h-register extract and store.
2065 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
2068 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
2069 x86_subreg_8bit_hi))>;
2070 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
2073 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
2074 x86_subreg_8bit_hi))>,
2075 Requires<[In64BitMode]>;
2076 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
2079 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
2080 x86_subreg_8bit_hi))>,
2081 Requires<[In64BitMode]>;
2083 // (shl x, 1) ==> (add x, x)
2084 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
2086 // (shl x (and y, 63)) ==> (shl x, y)
2087 def : Pat<(shl GR64:$src1, (and CL:$amt, 63)),
2088 (SHL64rCL GR64:$src1)>;
2089 def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
2090 (SHL64mCL addr:$dst)>;
2092 def : Pat<(srl GR64:$src1, (and CL:$amt, 63)),
2093 (SHR64rCL GR64:$src1)>;
2094 def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
2095 (SHR64mCL addr:$dst)>;
2097 def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
2098 (SAR64rCL GR64:$src1)>;
2099 def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
2100 (SAR64mCL addr:$dst)>;
2102 // Double shift patterns
2103 def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
2104 (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
2106 def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
2107 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
2108 (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
2110 def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
2111 (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
2113 def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1),
2114 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
2115 (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
2117 // (or x, c) -> (add x, c) if masked bits are known zero.
2118 def : Pat<(parallel (or_is_add GR64:$src1, i64immSExt8:$src2),
2120 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2121 def : Pat<(parallel (or_is_add GR64:$src1, i64immSExt32:$src2),
2123 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2125 // X86 specific add which produces a flag.
2126 def : Pat<(addc GR64:$src1, GR64:$src2),
2127 (ADD64rr GR64:$src1, GR64:$src2)>;
2128 def : Pat<(addc GR64:$src1, (load addr:$src2)),
2129 (ADD64rm GR64:$src1, addr:$src2)>;
2130 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
2131 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2132 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
2133 (ADD64ri32 GR64:$src1, imm:$src2)>;
2135 def : Pat<(subc GR64:$src1, GR64:$src2),
2136 (SUB64rr GR64:$src1, GR64:$src2)>;
2137 def : Pat<(subc GR64:$src1, (load addr:$src2)),
2138 (SUB64rm GR64:$src1, addr:$src2)>;
2139 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
2140 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2141 def : Pat<(subc GR64:$src1, imm:$src2),
2142 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2144 //===----------------------------------------------------------------------===//
2145 // EFLAGS-defining Patterns
2146 //===----------------------------------------------------------------------===//
2148 // Register-Register Addition with EFLAGS result
2149 def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2),
2151 (ADD64rr GR64:$src1, GR64:$src2)>;
2153 // Register-Integer Addition with EFLAGS result
2154 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2),
2156 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2157 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2),
2159 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2161 // Register-Memory Addition with EFLAGS result
2162 def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)),
2164 (ADD64rm GR64:$src1, addr:$src2)>;
2166 // Memory-Register Addition with EFLAGS result
2167 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2),
2170 (ADD64mr addr:$dst, GR64:$src2)>;
2171 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2),
2174 (ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
2175 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst),
2176 i64immSExt32:$src2),
2179 (ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
2181 // Register-Register Subtraction with EFLAGS result
2182 def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2),
2184 (SUB64rr GR64:$src1, GR64:$src2)>;
2186 // Register-Memory Subtraction with EFLAGS result
2187 def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)),
2189 (SUB64rm GR64:$src1, addr:$src2)>;
2191 // Register-Integer Subtraction with EFLAGS result
2192 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2),
2194 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2195 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2),
2197 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2199 // Memory-Register Subtraction with EFLAGS result
2200 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2),
2203 (SUB64mr addr:$dst, GR64:$src2)>;
2205 // Memory-Integer Subtraction with EFLAGS result
2206 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst),
2210 (SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
2211 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst),
2212 i64immSExt32:$src2),
2215 (SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
2217 // Register-Register Signed Integer Multiplication with EFLAGS result
2218 def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2),
2220 (IMUL64rr GR64:$src1, GR64:$src2)>;
2222 // Register-Memory Signed Integer Multiplication with EFLAGS result
2223 def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)),
2225 (IMUL64rm GR64:$src1, addr:$src2)>;
2227 // Register-Integer Signed Integer Multiplication with EFLAGS result
2228 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2),
2230 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2231 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2),
2233 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2235 // Memory-Integer Signed Integer Multiplication with EFLAGS result
2236 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2),
2238 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2239 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2),
2241 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2243 // INC and DEC with EFLAGS result. Note that these do not set CF.
2244 def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
2245 (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2246 def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
2248 (INC64_16m addr:$dst)>, Requires<[In64BitMode]>;
2249 def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
2250 (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
2251 def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
2253 (DEC64_16m addr:$dst)>, Requires<[In64BitMode]>;
2255 def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
2256 (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2257 def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
2259 (INC64_32m addr:$dst)>, Requires<[In64BitMode]>;
2260 def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
2261 (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
2262 def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
2264 (DEC64_32m addr:$dst)>, Requires<[In64BitMode]>;
2266 def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)),
2267 (INC64r GR64:$src)>;
2268 def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst),
2270 (INC64m addr:$dst)>;
2271 def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)),
2272 (DEC64r GR64:$src)>;
2273 def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst),
2275 (DEC64m addr:$dst)>;
2277 // Register-Register Logical Or with EFLAGS result
2278 def : Pat<(parallel (X86or_flag GR64:$src1, GR64:$src2),
2280 (OR64rr GR64:$src1, GR64:$src2)>;
2282 // Register-Integer Logical Or with EFLAGS result
2283 def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt8:$src2),
2285 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2286 def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt32:$src2),
2288 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2290 // Register-Memory Logical Or with EFLAGS result
2291 def : Pat<(parallel (X86or_flag GR64:$src1, (loadi64 addr:$src2)),
2293 (OR64rm GR64:$src1, addr:$src2)>;
2295 // Memory-Register Logical Or with EFLAGS result
2296 def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), GR64:$src2),
2299 (OR64mr addr:$dst, GR64:$src2)>;
2300 def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt8:$src2),
2303 (OR64mi8 addr:$dst, i64immSExt8:$src2)>;
2304 def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt32:$src2),
2307 (OR64mi32 addr:$dst, i64immSExt32:$src2)>;
2309 // Register-Register Logical XOr with EFLAGS result
2310 def : Pat<(parallel (X86xor_flag GR64:$src1, GR64:$src2),
2312 (XOR64rr GR64:$src1, GR64:$src2)>;
2314 // Register-Integer Logical XOr with EFLAGS result
2315 def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt8:$src2),
2317 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2318 def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt32:$src2),
2320 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2322 // Register-Memory Logical XOr with EFLAGS result
2323 def : Pat<(parallel (X86xor_flag GR64:$src1, (loadi64 addr:$src2)),
2325 (XOR64rm GR64:$src1, addr:$src2)>;
2327 // Memory-Register Logical XOr with EFLAGS result
2328 def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), GR64:$src2),
2331 (XOR64mr addr:$dst, GR64:$src2)>;
2332 def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt8:$src2),
2335 (XOR64mi8 addr:$dst, i64immSExt8:$src2)>;
2336 def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst),
2337 i64immSExt32:$src2),
2340 (XOR64mi32 addr:$dst, i64immSExt32:$src2)>;
2342 // Register-Register Logical And with EFLAGS result
2343 def : Pat<(parallel (X86and_flag GR64:$src1, GR64:$src2),
2345 (AND64rr GR64:$src1, GR64:$src2)>;
2347 // Register-Integer Logical And with EFLAGS result
2348 def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt8:$src2),
2350 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2351 def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt32:$src2),
2353 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2355 // Register-Memory Logical And with EFLAGS result
2356 def : Pat<(parallel (X86and_flag GR64:$src1, (loadi64 addr:$src2)),
2358 (AND64rm GR64:$src1, addr:$src2)>;
2360 // Memory-Register Logical And with EFLAGS result
2361 def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), GR64:$src2),
2364 (AND64mr addr:$dst, GR64:$src2)>;
2365 def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt8:$src2),
2368 (AND64mi8 addr:$dst, i64immSExt8:$src2)>;
2369 def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst),
2370 i64immSExt32:$src2),
2373 (AND64mi32 addr:$dst, i64immSExt32:$src2)>;
2375 //===----------------------------------------------------------------------===//
2376 // X86-64 SSE Instructions
2377 //===----------------------------------------------------------------------===//
2379 // Move instructions...
2381 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2382 "mov{d|q}\t{$src, $dst|$dst, $src}",
2384 (v2i64 (scalar_to_vector GR64:$src)))]>;
2385 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2386 "mov{d|q}\t{$src, $dst|$dst, $src}",
2387 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2390 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2391 "mov{d|q}\t{$src, $dst|$dst, $src}",
2392 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2393 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2394 "movq\t{$src, $dst|$dst, $src}",
2395 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2397 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2398 "mov{d|q}\t{$src, $dst|$dst, $src}",
2399 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2400 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2401 "movq\t{$src, $dst|$dst, $src}",
2402 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2404 //===----------------------------------------------------------------------===//
2405 // X86-64 SSE4.1 Instructions
2406 //===----------------------------------------------------------------------===//
2408 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
2409 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
2410 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
2411 (ins VR128:$src1, i32i8imm:$src2),
2412 !strconcat(OpcodeStr,
2413 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2415 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
2416 def mr : SS4AIi8<opc, MRMDestMem, (outs),
2417 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
2418 !strconcat(OpcodeStr,
2419 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2420 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
2421 addr:$dst)]>, OpSize, REX_W;
2424 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
2426 let isTwoAddress = 1 in {
2427 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
2428 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
2429 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2430 !strconcat(OpcodeStr,
2431 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2433 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
2435 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
2436 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
2437 !strconcat(OpcodeStr,
2438 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2440 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
2441 imm:$src3)))]>, OpSize, REX_W;
2445 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;
2447 // -disable-16bit support.
2448 def : Pat<(truncstorei16 (i64 imm:$src), addr:$dst),
2449 (MOV16mi addr:$dst, imm:$src)>;
2450 def : Pat<(truncstorei16 GR64:$src, addr:$dst),
2451 (MOV16mr addr:$dst, (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
2452 def : Pat<(i64 (sextloadi16 addr:$dst)),
2453 (MOVSX64rm16 addr:$dst)>;
2454 def : Pat<(i64 (zextloadi16 addr:$dst)),
2455 (MOVZX64rm16 addr:$dst)>;
2456 def : Pat<(i64 (extloadi16 addr:$dst)),
2457 (MOVZX64rm16 addr:$dst)>;