1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue(), SDLoc(N));
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
55 def : Pat<(X86callseq_start timm:$amt1),
56 (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
59 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
60 // a stack adjustment and the codegen must know that they may modify the stack
61 // pointer before prolog-epilog rewriting occurs.
62 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
63 // sub / add which can clobber EFLAGS.
64 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
65 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
69 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
71 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
74 def : Pat<(X86callseq_start timm:$amt1),
75 (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
78 // x86-64 va_start lowering magic.
79 let usesCustomInserter = 1, Defs = [EFLAGS] in {
80 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
83 i64imm:$regsavefi, i64imm:$offset,
85 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
86 [(X86vastart_save_xmm_regs GR8:$al,
91 // The VAARG_64 pseudo-instruction takes the address of the va_list,
92 // and places the address of the next argument into a register.
93 let Defs = [EFLAGS] in
94 def VAARG_64 : I<0, Pseudo,
96 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
97 "#VAARG_64 $dst, $ap, $size, $mode, $align",
99 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
102 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
103 // targets. These calls are needed to probe the stack when allocating more than
104 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
105 // ensure that the guard pages used by the OS virtual memory manager are
106 // allocated in correct sequence.
107 // The main point of having separate instruction are extra unmodelled effects
108 // (compared to ordinary calls) like stack pointer change.
110 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
111 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
112 "# dynamic stack allocation",
115 // When using segmented stacks these are lowered into instructions which first
116 // check if the current stacklet has enough free memory. If it does, memory is
117 // allocated by bumping the stack pointer. Otherwise memory is allocated from
120 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
121 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
122 "# variable sized alloca for segmented stacks",
124 (X86SegAlloca GR32:$size))]>,
127 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
128 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
129 "# variable sized alloca for segmented stacks",
131 (X86SegAlloca GR64:$size))]>,
132 Requires<[In64BitMode]>;
135 //===----------------------------------------------------------------------===//
136 // EH Pseudo Instructions
138 let SchedRW = [WriteSystem] in {
139 let isTerminator = 1, isReturn = 1, isBarrier = 1,
140 hasCtrlDep = 1, isCodeGenOnly = 1 in {
141 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
142 "ret\t#eh_return, addr: $addr",
143 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
147 let isTerminator = 1, isReturn = 1, isBarrier = 1,
148 hasCtrlDep = 1, isCodeGenOnly = 1 in {
149 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
150 "ret\t#eh_return, addr: $addr",
151 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
155 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, isCodeGenOnly = 1, isReturn = 1 in {
156 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
158 [(catchret bb:$dst, bb:$from)]>;
159 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
162 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
163 usesCustomInserter = 1 in {
164 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
166 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
167 Requires<[Not64BitMode]>;
168 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
170 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
171 Requires<[In64BitMode]>;
172 let isTerminator = 1 in {
173 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
174 "#EH_SJLJ_LONGJMP32",
175 [(X86eh_sjlj_longjmp addr:$buf)]>,
176 Requires<[Not64BitMode]>;
177 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
178 "#EH_SJLJ_LONGJMP64",
179 [(X86eh_sjlj_longjmp addr:$buf)]>,
180 Requires<[In64BitMode]>;
185 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
186 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
187 "#EH_SjLj_Setup\t$dst", []>;
190 //===----------------------------------------------------------------------===//
191 // Pseudo instructions used by unwind info.
193 let isPseudo = 1 in {
194 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
195 "#SEH_PushReg $reg", []>;
196 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
197 "#SEH_SaveReg $reg, $dst", []>;
198 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
199 "#SEH_SaveXMM $reg, $dst", []>;
200 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
201 "#SEH_StackAlloc $size", []>;
202 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
203 "#SEH_SetFrame $reg, $offset", []>;
204 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
205 "#SEH_PushFrame $mode", []>;
206 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
207 "#SEH_EndPrologue", []>;
208 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
209 "#SEH_Epilogue", []>;
212 //===----------------------------------------------------------------------===//
213 // Pseudo instructions used by segmented stacks.
216 // This is lowered into a RET instruction by MCInstLower. We need
217 // this so that we don't have to have a MachineBasicBlock which ends
218 // with a RET and also has successors.
219 let isPseudo = 1 in {
220 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
223 // This instruction is lowered to a RET followed by a MOV. The two
224 // instructions are not generated on a higher level since then the
225 // verifier sees a MachineBasicBlock ending with a non-terminator.
226 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
230 //===----------------------------------------------------------------------===//
231 // Alias Instructions
232 //===----------------------------------------------------------------------===//
234 // Alias instruction mapping movr0 to xor.
235 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
236 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
238 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
239 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
241 // Other widths can also make use of the 32-bit xor, which may have a smaller
242 // encoding and avoid partial register updates.
243 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
244 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
245 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
246 let AddedComplexity = 20;
249 // Materialize i64 constant where top 32-bits are zero. This could theoretically
250 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
251 // that would make it more difficult to rematerialize.
252 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
253 isCodeGenOnly = 1, hasSideEffects = 0 in
254 def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
255 "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
257 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
258 // actually the zero-extension of a 32-bit constant, and for labels in the
259 // x86-64 small code model.
260 def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
262 let AddedComplexity = 1 in
263 def : Pat<(i64 mov64imm32:$src),
264 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
266 // Use sbb to materialize carry bit.
267 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
268 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
269 // However, Pat<> can't replicate the destination reg into the inputs of the
271 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
272 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
273 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
274 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
275 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
276 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
277 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
278 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
282 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
284 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
286 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
289 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
291 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
293 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
296 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
297 // will be eliminated and that the sbb can be extended up to a wider type. When
298 // this happens, it is great. However, if we are left with an 8-bit sbb and an
299 // and, we might as well just match it as a setb.
300 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
303 // (add OP, SETB) -> (adc OP, 0)
304 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
305 (ADC8ri GR8:$op, 0)>;
306 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
307 (ADC32ri8 GR32:$op, 0)>;
308 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
309 (ADC64ri8 GR64:$op, 0)>;
311 // (sub OP, SETB) -> (sbb OP, 0)
312 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
313 (SBB8ri GR8:$op, 0)>;
314 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
315 (SBB32ri8 GR32:$op, 0)>;
316 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
317 (SBB64ri8 GR64:$op, 0)>;
319 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
320 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
321 (ADC8ri GR8:$op, 0)>;
322 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
323 (ADC32ri8 GR32:$op, 0)>;
324 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
325 (ADC64ri8 GR64:$op, 0)>;
327 //===----------------------------------------------------------------------===//
328 // String Pseudo Instructions
330 let SchedRW = [WriteMicrocoded] in {
331 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
332 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
333 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
334 Requires<[Not64BitMode]>;
335 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
336 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
337 Requires<[Not64BitMode]>;
338 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
339 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
340 Requires<[Not64BitMode]>;
343 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
344 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
345 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
346 Requires<[In64BitMode]>;
347 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
348 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
349 Requires<[In64BitMode]>;
350 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
351 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
352 Requires<[In64BitMode]>;
353 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
354 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
355 Requires<[In64BitMode]>;
358 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
359 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
360 let Uses = [AL,ECX,EDI] in
361 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
362 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
363 Requires<[Not64BitMode]>;
364 let Uses = [AX,ECX,EDI] in
365 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
366 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
367 Requires<[Not64BitMode]>;
368 let Uses = [EAX,ECX,EDI] in
369 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
370 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
371 Requires<[Not64BitMode]>;
374 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
375 let Uses = [AL,RCX,RDI] in
376 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
377 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
378 Requires<[In64BitMode]>;
379 let Uses = [AX,RCX,RDI] in
380 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
381 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
382 Requires<[In64BitMode]>;
383 let Uses = [RAX,RCX,RDI] in
384 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
385 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
386 Requires<[In64BitMode]>;
388 let Uses = [RAX,RCX,RDI] in
389 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
390 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
391 Requires<[In64BitMode]>;
395 //===----------------------------------------------------------------------===//
396 // Thread Local Storage Instructions
400 // All calls clobber the non-callee saved registers. ESP is marked as
401 // a use to prevent stack-pointer assignments that appear immediately
402 // before calls from potentially appearing dead.
403 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
404 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
405 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
406 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
407 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
409 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
411 [(X86tlsaddr tls32addr:$sym)]>,
412 Requires<[Not64BitMode]>;
413 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
415 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
416 Requires<[Not64BitMode]>;
419 // All calls clobber the non-callee saved registers. RSP is marked as
420 // a use to prevent stack-pointer assignments that appear immediately
421 // before calls from potentially appearing dead.
422 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
423 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
424 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
425 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
426 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
427 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
429 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
431 [(X86tlsaddr tls64addr:$sym)]>,
432 Requires<[In64BitMode]>;
433 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
435 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
436 Requires<[In64BitMode]>;
439 // Darwin TLS Support
440 // For i386, the address of the thunk is passed on the stack, on return the
441 // address of the variable is in %eax. %ecx is trashed during the function
442 // call. All other registers are preserved.
443 let Defs = [EAX, ECX, EFLAGS],
445 usesCustomInserter = 1 in
446 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
448 [(X86TLSCall addr:$sym)]>,
449 Requires<[Not64BitMode]>;
451 // For x86_64, the address of the thunk is passed in %rdi, on return
452 // the address of the variable is in %rax. All other registers are preserved.
453 let Defs = [RAX, EFLAGS],
455 usesCustomInserter = 1 in
456 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
458 [(X86TLSCall addr:$sym)]>,
459 Requires<[In64BitMode]>;
462 //===----------------------------------------------------------------------===//
463 // Conditional Move Pseudo Instructions
465 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
466 // instruction selection into a branch sequence.
467 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
468 def CMOV#NAME : I<0, Pseudo,
469 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
470 "#CMOV_"#NAME#" PSEUDO!",
471 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
475 let usesCustomInserter = 1, Uses = [EFLAGS] in {
476 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
477 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
478 // however that requires promoting the operands, and can induce additional
479 // i8 register pressure.
480 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
482 let Predicates = [NoCMov] in {
483 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
484 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
485 } // Predicates = [NoCMov]
487 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
489 let Predicates = [FPStackf32] in
490 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
492 let Predicates = [FPStackf64] in
493 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
495 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
497 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
498 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
499 defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
500 defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
501 defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
502 defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
503 defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
504 defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
505 defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
506 defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
507 defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
508 defm _V8I1 : CMOVrr_PSEUDO<VK8, v8i1>;
509 defm _V16I1 : CMOVrr_PSEUDO<VK16, v16i1>;
510 defm _V32I1 : CMOVrr_PSEUDO<VK32, v32i1>;
511 defm _V64I1 : CMOVrr_PSEUDO<VK64, v64i1>;
512 } // usesCustomInserter = 1, Uses = [EFLAGS]
514 //===----------------------------------------------------------------------===//
515 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
516 //===----------------------------------------------------------------------===//
518 // FIXME: Use normal instructions and add lock prefix dynamically.
522 // TODO: Get this to fold the constant into the instruction.
523 let isCodeGenOnly = 1, Defs = [EFLAGS] in
524 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
525 "or{l}\t{$zero, $dst|$dst, $zero}",
526 [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
527 Sched<[WriteALULd, WriteRMW]>;
529 let hasSideEffects = 1 in
530 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
532 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
534 // RegOpc corresponds to the mr version of the instruction
535 // ImmOpc corresponds to the mi version of the instruction
536 // ImmOpc8 corresponds to the mi8 version of the instruction
537 // ImmMod corresponds to the instruction format of the mi and mi8 versions
538 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
539 Format ImmMod, string mnemonic> {
540 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
541 SchedRW = [WriteALULd, WriteRMW] in {
543 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
544 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
545 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
546 !strconcat(mnemonic, "{b}\t",
547 "{$src2, $dst|$dst, $src2}"),
548 [], IIC_ALU_NONMEM>, LOCK;
549 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
550 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
551 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
552 !strconcat(mnemonic, "{w}\t",
553 "{$src2, $dst|$dst, $src2}"),
554 [], IIC_ALU_NONMEM>, OpSize16, LOCK;
555 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
556 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
557 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
558 !strconcat(mnemonic, "{l}\t",
559 "{$src2, $dst|$dst, $src2}"),
560 [], IIC_ALU_NONMEM>, OpSize32, LOCK;
561 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
562 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
563 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
564 !strconcat(mnemonic, "{q}\t",
565 "{$src2, $dst|$dst, $src2}"),
566 [], IIC_ALU_NONMEM>, LOCK;
568 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
569 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
570 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
571 !strconcat(mnemonic, "{b}\t",
572 "{$src2, $dst|$dst, $src2}"),
573 [], IIC_ALU_MEM>, LOCK;
575 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
576 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
577 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
578 !strconcat(mnemonic, "{w}\t",
579 "{$src2, $dst|$dst, $src2}"),
580 [], IIC_ALU_MEM>, OpSize16, LOCK;
582 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
583 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
584 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
585 !strconcat(mnemonic, "{l}\t",
586 "{$src2, $dst|$dst, $src2}"),
587 [], IIC_ALU_MEM>, OpSize32, LOCK;
589 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
590 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
591 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
592 !strconcat(mnemonic, "{q}\t",
593 "{$src2, $dst|$dst, $src2}"),
594 [], IIC_ALU_MEM>, LOCK;
596 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
597 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
598 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
599 !strconcat(mnemonic, "{w}\t",
600 "{$src2, $dst|$dst, $src2}"),
601 [], IIC_ALU_MEM>, OpSize16, LOCK;
602 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
603 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
604 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
605 !strconcat(mnemonic, "{l}\t",
606 "{$src2, $dst|$dst, $src2}"),
607 [], IIC_ALU_MEM>, OpSize32, LOCK;
608 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
609 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
610 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
611 !strconcat(mnemonic, "{q}\t",
612 "{$src2, $dst|$dst, $src2}"),
613 [], IIC_ALU_MEM>, LOCK;
619 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
620 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
621 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
622 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
623 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
625 // Optimized codegen when the non-memory output is not used.
626 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
628 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
629 SchedRW = [WriteALULd, WriteRMW] in {
631 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
632 !strconcat(mnemonic, "{b}\t$dst"),
633 [], IIC_UNARY_MEM>, LOCK;
634 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
635 !strconcat(mnemonic, "{w}\t$dst"),
636 [], IIC_UNARY_MEM>, OpSize16, LOCK;
637 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
638 !strconcat(mnemonic, "{l}\t$dst"),
639 [], IIC_UNARY_MEM>, OpSize32, LOCK;
640 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
641 !strconcat(mnemonic, "{q}\t$dst"),
642 [], IIC_UNARY_MEM>, LOCK;
646 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
647 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
649 // Atomic compare and swap.
650 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
651 SDPatternOperator frag, X86MemOperand x86memop,
652 InstrItinClass itin> {
653 let isCodeGenOnly = 1 in {
654 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
655 !strconcat(mnemonic, "\t$ptr"),
656 [(frag addr:$ptr)], itin>, TB, LOCK;
660 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
661 string mnemonic, SDPatternOperator frag,
662 InstrItinClass itin8, InstrItinClass itin> {
663 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
664 let Defs = [AL, EFLAGS], Uses = [AL] in
665 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
666 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
667 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
668 let Defs = [AX, EFLAGS], Uses = [AX] in
669 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
670 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
671 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
672 let Defs = [EAX, EFLAGS], Uses = [EAX] in
673 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
674 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
675 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
676 let Defs = [RAX, EFLAGS], Uses = [RAX] in
677 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
678 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
679 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
683 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
684 SchedRW = [WriteALULd, WriteRMW] in {
685 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
690 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
691 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
692 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
694 IIC_CMPX_LOCK_16B>, REX_W;
697 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
698 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
700 // Atomic exchange and add
701 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
703 InstrItinClass itin8, InstrItinClass itin> {
704 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
705 SchedRW = [WriteALULd, WriteRMW] in {
706 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
707 (ins GR8:$val, i8mem:$ptr),
708 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
710 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
712 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
713 (ins GR16:$val, i16mem:$ptr),
714 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
717 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
719 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
720 (ins GR32:$val, i32mem:$ptr),
721 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
724 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
726 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
727 (ins GR64:$val, i64mem:$ptr),
728 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
731 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
736 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
737 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
740 /* The following multiclass tries to make sure that in code like
741 * x.store (immediate op x.load(acquire), release)
743 * x.store (register op x.load(acquire), release)
744 * an operation directly on memory is generated instead of wasting a register.
745 * It is not automatic as atomic_store/load are only lowered to MOV instructions
746 * extremely late to prevent them from being accidentally reordered in the backend
747 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
749 multiclass RELEASE_BINOP_MI<SDNode op> {
750 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
751 "#BINOP "#NAME#"8mi PSEUDO!",
752 [(atomic_store_8 addr:$dst, (op
753 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
754 def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
755 "#BINOP "#NAME#"8mr PSEUDO!",
756 [(atomic_store_8 addr:$dst, (op
757 (atomic_load_8 addr:$dst), GR8:$src))]>;
758 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
759 // costly and avoided as far as possible by this backend anyway
760 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
761 "#BINOP "#NAME#"32mi PSEUDO!",
762 [(atomic_store_32 addr:$dst, (op
763 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
764 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
765 "#BINOP "#NAME#"32mr PSEUDO!",
766 [(atomic_store_32 addr:$dst, (op
767 (atomic_load_32 addr:$dst), GR32:$src))]>;
768 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
769 "#BINOP "#NAME#"64mi32 PSEUDO!",
770 [(atomic_store_64 addr:$dst, (op
771 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
772 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
773 "#BINOP "#NAME#"64mr PSEUDO!",
774 [(atomic_store_64 addr:$dst, (op
775 (atomic_load_64 addr:$dst), GR64:$src))]>;
777 defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
778 defm RELEASE_AND : RELEASE_BINOP_MI<and>;
779 defm RELEASE_OR : RELEASE_BINOP_MI<or>;
780 defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
781 // Note: we don't deal with sub, because substractions of constants are
782 // optimized into additions before this code can run
784 // Same as above, but for floating-point.
785 // FIXME: imm version.
786 // FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
787 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
788 let usesCustomInserter = 1 in {
789 multiclass RELEASE_FP_BINOP_MI<SDNode op> {
790 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
791 "#BINOP "#NAME#"32mr PSEUDO!",
792 [(atomic_store_32 addr:$dst,
794 (f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
795 FR32:$src))))]>, Requires<[HasSSE1]>;
796 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
797 "#BINOP "#NAME#"64mr PSEUDO!",
798 [(atomic_store_64 addr:$dst,
800 (f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
801 FR64:$src))))]>, Requires<[HasSSE2]>;
803 defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
804 // FIXME: Add fsub, fmul, fdiv, ...
807 multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
808 def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
809 "#UNOP "#NAME#"8m PSEUDO!",
810 [(atomic_store_8 addr:$dst, dag8)]>;
811 def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
812 "#UNOP "#NAME#"16m PSEUDO!",
813 [(atomic_store_16 addr:$dst, dag16)]>;
814 def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
815 "#UNOP "#NAME#"32m PSEUDO!",
816 [(atomic_store_32 addr:$dst, dag32)]>;
817 def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
818 "#UNOP "#NAME#"64m PSEUDO!",
819 [(atomic_store_64 addr:$dst, dag64)]>;
822 defm RELEASE_INC : RELEASE_UNOP<
823 (add (atomic_load_8 addr:$dst), (i8 1)),
824 (add (atomic_load_16 addr:$dst), (i16 1)),
825 (add (atomic_load_32 addr:$dst), (i32 1)),
826 (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
827 defm RELEASE_DEC : RELEASE_UNOP<
828 (add (atomic_load_8 addr:$dst), (i8 -1)),
829 (add (atomic_load_16 addr:$dst), (i16 -1)),
830 (add (atomic_load_32 addr:$dst), (i32 -1)),
831 (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
833 TODO: These don't work because the type inference of TableGen fails.
834 TODO: find a way to fix it.
835 defm RELEASE_NEG : RELEASE_UNOP<
836 (ineg (atomic_load_8 addr:$dst)),
837 (ineg (atomic_load_16 addr:$dst)),
838 (ineg (atomic_load_32 addr:$dst)),
839 (ineg (atomic_load_64 addr:$dst))>;
840 defm RELEASE_NOT : RELEASE_UNOP<
841 (not (atomic_load_8 addr:$dst)),
842 (not (atomic_load_16 addr:$dst)),
843 (not (atomic_load_32 addr:$dst)),
844 (not (atomic_load_64 addr:$dst))>;
847 def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
848 "#RELEASE_MOV8mi PSEUDO!",
849 [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
850 def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
851 "#RELEASE_MOV16mi PSEUDO!",
852 [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
853 def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
854 "#RELEASE_MOV32mi PSEUDO!",
855 [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
856 def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
857 "#RELEASE_MOV64mi32 PSEUDO!",
858 [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
860 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
861 "#RELEASE_MOV8mr PSEUDO!",
862 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
863 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
864 "#RELEASE_MOV16mr PSEUDO!",
865 [(atomic_store_16 addr:$dst, GR16:$src)]>;
866 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
867 "#RELEASE_MOV32mr PSEUDO!",
868 [(atomic_store_32 addr:$dst, GR32:$src)]>;
869 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
870 "#RELEASE_MOV64mr PSEUDO!",
871 [(atomic_store_64 addr:$dst, GR64:$src)]>;
873 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
874 "#ACQUIRE_MOV8rm PSEUDO!",
875 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
876 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
877 "#ACQUIRE_MOV16rm PSEUDO!",
878 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
879 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
880 "#ACQUIRE_MOV32rm PSEUDO!",
881 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
882 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
883 "#ACQUIRE_MOV64rm PSEUDO!",
884 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
886 //===----------------------------------------------------------------------===//
887 // DAG Pattern Matching Rules
888 //===----------------------------------------------------------------------===//
890 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
891 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
892 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
893 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
894 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
895 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
896 def : Pat<(i32 (X86Wrapper mcsym:$dst)), (MOV32ri mcsym:$dst)>;
897 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
899 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
900 (ADD32ri GR32:$src1, tconstpool:$src2)>;
901 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
902 (ADD32ri GR32:$src1, tjumptable:$src2)>;
903 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
904 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
905 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
906 (ADD32ri GR32:$src1, texternalsym:$src2)>;
907 def : Pat<(add GR32:$src1, (X86Wrapper mcsym:$src2)),
908 (ADD32ri GR32:$src1, mcsym:$src2)>;
909 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
910 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
912 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
913 (MOV32mi addr:$dst, tglobaladdr:$src)>;
914 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
915 (MOV32mi addr:$dst, texternalsym:$src)>;
916 def : Pat<(store (i32 (X86Wrapper mcsym:$src)), addr:$dst),
917 (MOV32mi addr:$dst, mcsym:$src)>;
918 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
919 (MOV32mi addr:$dst, tblockaddress:$src)>;
921 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
922 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
923 // 'movabs' predicate should handle this sort of thing.
924 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
925 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
926 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
927 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
928 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
929 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
930 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
931 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
932 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
933 (MOV64ri mcsym:$dst)>, Requires<[FarData]>;
934 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
935 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
937 // In kernel code model, we can get the address of a label
938 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
939 // the MOV64ri32 should accept these.
940 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
941 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
942 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
943 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
944 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
945 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
946 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
947 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
948 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
949 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
950 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
951 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
953 // If we have small model and -static mode, it is safe to store global addresses
954 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
955 // for MOV64mi32 should handle this sort of thing.
956 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
957 (MOV64mi32 addr:$dst, tconstpool:$src)>,
958 Requires<[NearData, IsStatic]>;
959 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
960 (MOV64mi32 addr:$dst, tjumptable:$src)>,
961 Requires<[NearData, IsStatic]>;
962 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
963 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
964 Requires<[NearData, IsStatic]>;
965 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
966 (MOV64mi32 addr:$dst, texternalsym:$src)>,
967 Requires<[NearData, IsStatic]>;
968 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
969 (MOV64mi32 addr:$dst, mcsym:$src)>,
970 Requires<[NearData, IsStatic]>;
971 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
972 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
973 Requires<[NearData, IsStatic]>;
975 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
976 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
980 // tls has some funny stuff here...
981 // This corresponds to movabs $foo@tpoff, %rax
982 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
983 (MOV64ri32 tglobaltlsaddr :$dst)>;
984 // This corresponds to add $foo@tpoff, %rax
985 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
986 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
989 // Direct PC relative function call for small code model. 32-bit displacement
990 // sign extended to 64-bit.
991 def : Pat<(X86call (i64 tglobaladdr:$dst)),
992 (CALL64pcrel32 tglobaladdr:$dst)>;
993 def : Pat<(X86call (i64 texternalsym:$dst)),
994 (CALL64pcrel32 texternalsym:$dst)>;
996 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
997 // can never use callee-saved registers. That is the purpose of the GR64_TC
1000 // The only volatile register that is never used by the calling convention is
1001 // %r11. This happens when calling a vararg function with 6 arguments.
1003 // Match an X86tcret that uses less than 7 volatile registers.
1004 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1005 (X86tcret node:$ptr, node:$off), [{
1006 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1007 unsigned NumRegs = 0;
1008 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1009 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1014 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1015 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1016 Requires<[Not64BitMode]>;
1018 // FIXME: This is disabled for 32-bit PIC mode because the global base
1019 // register which is part of the address mode may be assigned a
1020 // callee-saved register.
1021 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1022 (TCRETURNmi addr:$dst, imm:$off)>,
1023 Requires<[Not64BitMode, IsNotPIC]>;
1025 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1026 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
1027 Requires<[NotLP64]>;
1029 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1030 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1031 Requires<[NotLP64]>;
1033 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1034 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1035 Requires<[In64BitMode]>;
1037 // Don't fold loads into X86tcret requiring more than 6 regs.
1038 // There wouldn't be enough scratch registers for base+index.
1039 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1040 (TCRETURNmi64 addr:$dst, imm:$off)>,
1041 Requires<[In64BitMode]>;
1043 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1044 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1047 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1048 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1051 // Normal calls, with various flavors of addresses.
1052 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1053 (CALLpcrel32 tglobaladdr:$dst)>;
1054 def : Pat<(X86call (i32 texternalsym:$dst)),
1055 (CALLpcrel32 texternalsym:$dst)>;
1056 def : Pat<(X86call (i32 imm:$dst)),
1057 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1061 // TEST R,R is smaller than CMP R,0
1062 def : Pat<(X86cmp GR8:$src1, 0),
1063 (TEST8rr GR8:$src1, GR8:$src1)>;
1064 def : Pat<(X86cmp GR16:$src1, 0),
1065 (TEST16rr GR16:$src1, GR16:$src1)>;
1066 def : Pat<(X86cmp GR32:$src1, 0),
1067 (TEST32rr GR32:$src1, GR32:$src1)>;
1068 def : Pat<(X86cmp GR64:$src1, 0),
1069 (TEST64rr GR64:$src1, GR64:$src1)>;
1071 // Conditional moves with folded loads with operands swapped and conditions
1073 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1074 Instruction Inst64> {
1075 let Predicates = [HasCMov] in {
1076 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1077 (Inst16 GR16:$src2, addr:$src1)>;
1078 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1079 (Inst32 GR32:$src2, addr:$src1)>;
1080 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1081 (Inst64 GR64:$src2, addr:$src1)>;
1085 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1086 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1087 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1088 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1089 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1090 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1091 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1092 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1093 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1094 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1095 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1096 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1097 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1098 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1099 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1100 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1102 // zextload bool -> zextload byte
1103 def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
1104 def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
1105 def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
1106 def : Pat<(zextloadi64i1 addr:$src),
1107 (SUBREG_TO_REG (i64 0),
1108 (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
1110 // extload bool -> extload byte
1111 // When extloading from 16-bit and smaller memory locations into 64-bit
1112 // registers, use zero-extending loads so that the entire 64-bit register is
1113 // defined, avoiding partial-register updates.
1115 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1116 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1117 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1118 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1119 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1120 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1122 // For other extloads, use subregs, since the high contents of the register are
1123 // defined after an extload.
1124 def : Pat<(extloadi64i1 addr:$src),
1125 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1126 def : Pat<(extloadi64i8 addr:$src),
1127 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1128 def : Pat<(extloadi64i16 addr:$src),
1129 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1130 def : Pat<(extloadi64i32 addr:$src),
1131 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1133 // anyext. Define these to do an explicit zero-extend to
1134 // avoid partial-register updates.
1135 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1136 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1137 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1139 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1140 def : Pat<(i32 (anyext GR16:$src)),
1141 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1143 def : Pat<(i64 (anyext GR8 :$src)),
1144 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1145 def : Pat<(i64 (anyext GR16:$src)),
1146 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1147 def : Pat<(i64 (anyext GR32:$src)),
1148 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1151 // Any instruction that defines a 32-bit result leaves the high half of the
1152 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1153 // be copying from a truncate. And x86's cmov doesn't do anything if the
1154 // condition is false. But any other 32-bit operation will zero-extend
1156 def def32 : PatLeaf<(i32 GR32:$src), [{
1157 return N->getOpcode() != ISD::TRUNCATE &&
1158 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1159 N->getOpcode() != ISD::CopyFromReg &&
1160 N->getOpcode() != ISD::AssertSext &&
1161 N->getOpcode() != X86ISD::CMOV;
1164 // In the case of a 32-bit def that is known to implicitly zero-extend,
1165 // we can use a SUBREG_TO_REG.
1166 def : Pat<(i64 (zext def32:$src)),
1167 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1169 //===----------------------------------------------------------------------===//
1170 // Pattern match OR as ADD
1171 //===----------------------------------------------------------------------===//
1173 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1174 // 3-addressified into an LEA instruction to avoid copies. However, we also
1175 // want to finally emit these instructions as an or at the end of the code
1176 // generator to make the generated code easier to read. To do this, we select
1177 // into "disjoint bits" pseudo ops.
1179 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1180 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1181 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1182 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1184 APInt KnownZero0, KnownOne0;
1185 CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1186 APInt KnownZero1, KnownOne1;
1187 CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1188 return (~KnownZero0 & ~KnownZero1) == 0;
1192 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1193 // Try this before the selecting to OR.
1194 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1196 let isConvertibleToThreeAddress = 1,
1197 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1198 let isCommutable = 1 in {
1199 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1200 "", // orw/addw REG, REG
1201 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1202 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1203 "", // orl/addl REG, REG
1204 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1205 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1206 "", // orq/addq REG, REG
1207 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1210 // NOTE: These are order specific, we want the ri8 forms to be listed
1211 // first so that they are slightly preferred to the ri forms.
1213 def ADD16ri8_DB : I<0, Pseudo,
1214 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1215 "", // orw/addw REG, imm8
1216 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1217 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1218 "", // orw/addw REG, imm
1219 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1221 def ADD32ri8_DB : I<0, Pseudo,
1222 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1223 "", // orl/addl REG, imm8
1224 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1225 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1226 "", // orl/addl REG, imm
1227 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1230 def ADD64ri8_DB : I<0, Pseudo,
1231 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1232 "", // orq/addq REG, imm8
1233 [(set GR64:$dst, (or_is_add GR64:$src1,
1234 i64immSExt8:$src2))]>;
1235 def ADD64ri32_DB : I<0, Pseudo,
1236 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1237 "", // orq/addq REG, imm
1238 [(set GR64:$dst, (or_is_add GR64:$src1,
1239 i64immSExt32:$src2))]>;
1241 } // AddedComplexity, SchedRW
1244 //===----------------------------------------------------------------------===//
1246 //===----------------------------------------------------------------------===//
1248 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1249 // +128 doesn't, so in this special case use a sub instead of an add.
1250 def : Pat<(add GR16:$src1, 128),
1251 (SUB16ri8 GR16:$src1, -128)>;
1252 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1253 (SUB16mi8 addr:$dst, -128)>;
1255 def : Pat<(add GR32:$src1, 128),
1256 (SUB32ri8 GR32:$src1, -128)>;
1257 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1258 (SUB32mi8 addr:$dst, -128)>;
1260 def : Pat<(add GR64:$src1, 128),
1261 (SUB64ri8 GR64:$src1, -128)>;
1262 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1263 (SUB64mi8 addr:$dst, -128)>;
1265 // The same trick applies for 32-bit immediate fields in 64-bit
1267 def : Pat<(add GR64:$src1, 0x0000000080000000),
1268 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1269 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1270 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1272 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1273 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1274 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1275 // represented with a sign extension of a 8 bit constant, use that.
1276 // This can also reduce instruction size by eliminating the need for the REX
1279 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1280 let AddedComplexity = 1 in {
1281 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1285 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1286 (i32 (GetLo8XForm imm:$imm))),
1289 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1293 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1294 (i32 (GetLo32XForm imm:$imm))),
1296 } // AddedComplexity = 1
1299 // AddedComplexity is needed due to the increased complexity on the
1300 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1301 // the MOVZX patterns keeps thems together in DAGIsel tables.
1302 let AddedComplexity = 1 in {
1303 // r & (2^16-1) ==> movz
1304 def : Pat<(and GR32:$src1, 0xffff),
1305 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1306 // r & (2^8-1) ==> movz
1307 def : Pat<(and GR32:$src1, 0xff),
1308 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1311 Requires<[Not64BitMode]>;
1312 // r & (2^8-1) ==> movz
1313 def : Pat<(and GR16:$src1, 0xff),
1314 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1315 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1317 Requires<[Not64BitMode]>;
1319 // r & (2^32-1) ==> movz
1320 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1321 (SUBREG_TO_REG (i64 0),
1322 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1324 // r & (2^16-1) ==> movz
1325 let AddedComplexity = 1 in // Give priority over i64immZExt32.
1326 def : Pat<(and GR64:$src, 0xffff),
1327 (SUBREG_TO_REG (i64 0),
1328 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1330 // r & (2^8-1) ==> movz
1331 def : Pat<(and GR64:$src, 0xff),
1332 (SUBREG_TO_REG (i64 0),
1333 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1335 // r & (2^8-1) ==> movz
1336 def : Pat<(and GR32:$src1, 0xff),
1337 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1338 Requires<[In64BitMode]>;
1339 // r & (2^8-1) ==> movz
1340 def : Pat<(and GR16:$src1, 0xff),
1341 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1342 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1343 Requires<[In64BitMode]>;
1344 } // AddedComplexity = 1
1347 // sext_inreg patterns
1348 def : Pat<(sext_inreg GR32:$src, i16),
1349 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1350 def : Pat<(sext_inreg GR32:$src, i8),
1351 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1354 Requires<[Not64BitMode]>;
1356 def : Pat<(sext_inreg GR16:$src, i8),
1357 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1358 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1360 Requires<[Not64BitMode]>;
1362 def : Pat<(sext_inreg GR64:$src, i32),
1363 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1364 def : Pat<(sext_inreg GR64:$src, i16),
1365 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1366 def : Pat<(sext_inreg GR64:$src, i8),
1367 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1368 def : Pat<(sext_inreg GR32:$src, i8),
1369 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1370 Requires<[In64BitMode]>;
1371 def : Pat<(sext_inreg GR16:$src, i8),
1372 (EXTRACT_SUBREG (MOVSX32rr8
1373 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1374 Requires<[In64BitMode]>;
1376 // sext, sext_load, zext, zext_load
1377 def: Pat<(i16 (sext GR8:$src)),
1378 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1379 def: Pat<(sextloadi16i8 addr:$src),
1380 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1381 def: Pat<(i16 (zext GR8:$src)),
1382 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1383 def: Pat<(zextloadi16i8 addr:$src),
1384 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1387 def : Pat<(i16 (trunc GR32:$src)),
1388 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1389 def : Pat<(i8 (trunc GR32:$src)),
1390 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1392 Requires<[Not64BitMode]>;
1393 def : Pat<(i8 (trunc GR16:$src)),
1394 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1396 Requires<[Not64BitMode]>;
1397 def : Pat<(i32 (trunc GR64:$src)),
1398 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1399 def : Pat<(i16 (trunc GR64:$src)),
1400 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1401 def : Pat<(i8 (trunc GR64:$src)),
1402 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1403 def : Pat<(i8 (trunc GR32:$src)),
1404 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1405 Requires<[In64BitMode]>;
1406 def : Pat<(i8 (trunc GR16:$src)),
1407 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1408 Requires<[In64BitMode]>;
1410 // h-register tricks
1411 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1412 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1414 Requires<[Not64BitMode]>;
1415 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1416 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1418 Requires<[Not64BitMode]>;
1419 def : Pat<(srl GR16:$src, (i8 8)),
1422 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1425 Requires<[Not64BitMode]>;
1426 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1427 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1430 Requires<[Not64BitMode]>;
1431 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1432 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1435 Requires<[Not64BitMode]>;
1436 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1437 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1440 Requires<[Not64BitMode]>;
1441 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1442 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1445 Requires<[Not64BitMode]>;
1447 // h-register tricks.
1448 // For now, be conservative on x86-64 and use an h-register extract only if the
1449 // value is immediately zero-extended or stored, which are somewhat common
1450 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1451 // from being allocated in the same instruction as the h register, as there's
1452 // currently no way to describe this requirement to the register allocator.
1454 // h-register extract and zero-extend.
1455 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1459 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1462 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1464 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1466 Requires<[In64BitMode]>;
1467 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1468 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1471 Requires<[In64BitMode]>;
1472 def : Pat<(srl GR16:$src, (i8 8)),
1475 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1478 Requires<[In64BitMode]>;
1479 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1481 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1483 Requires<[In64BitMode]>;
1484 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1486 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1488 Requires<[In64BitMode]>;
1489 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1493 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1496 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1500 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1504 // h-register extract and store.
1505 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1508 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1510 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1513 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1515 Requires<[In64BitMode]>;
1516 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1519 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1521 Requires<[In64BitMode]>;
1524 // (shl x, 1) ==> (add x, x)
1525 // Note that if x is undef (immediate or otherwise), we could theoretically
1526 // end up with the two uses of x getting different values, producing a result
1527 // where the least significant bit is not 0. However, the probability of this
1528 // happening is considered low enough that this is officially not a
1530 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1531 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1532 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1533 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1535 // Helper imms that check if a mask doesn't change significant shift bits.
1536 def immShift32 : ImmLeaf<i8, [{
1537 return countTrailingOnes<uint64_t>(Imm) >= 5;
1539 def immShift64 : ImmLeaf<i8, [{
1540 return countTrailingOnes<uint64_t>(Imm) >= 6;
1543 // Shift amount is implicitly masked.
1544 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1545 // (shift x (and y, 31)) ==> (shift x, y)
1546 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1547 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1548 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1549 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1550 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1551 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1552 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1553 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1554 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1555 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1556 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1557 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1559 // (shift x (and y, 63)) ==> (shift x, y)
1560 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1561 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1562 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1563 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1566 defm : MaskedShiftAmountPats<shl, "SHL">;
1567 defm : MaskedShiftAmountPats<srl, "SHR">;
1568 defm : MaskedShiftAmountPats<sra, "SAR">;
1569 defm : MaskedShiftAmountPats<rotl, "ROL">;
1570 defm : MaskedShiftAmountPats<rotr, "ROR">;
1572 // (anyext (setcc_carry)) -> (setcc_carry)
1573 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1575 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1577 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1583 //===----------------------------------------------------------------------===//
1584 // EFLAGS-defining Patterns
1585 //===----------------------------------------------------------------------===//
1588 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1589 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1590 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1593 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1594 (ADD8rm GR8:$src1, addr:$src2)>;
1595 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1596 (ADD16rm GR16:$src1, addr:$src2)>;
1597 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1598 (ADD32rm GR32:$src1, addr:$src2)>;
1601 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1602 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1603 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1604 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1605 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1606 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1607 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1610 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1611 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1612 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1615 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1616 (SUB8rm GR8:$src1, addr:$src2)>;
1617 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1618 (SUB16rm GR16:$src1, addr:$src2)>;
1619 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1620 (SUB32rm GR32:$src1, addr:$src2)>;
1623 def : Pat<(sub GR8:$src1, imm:$src2),
1624 (SUB8ri GR8:$src1, imm:$src2)>;
1625 def : Pat<(sub GR16:$src1, imm:$src2),
1626 (SUB16ri GR16:$src1, imm:$src2)>;
1627 def : Pat<(sub GR32:$src1, imm:$src2),
1628 (SUB32ri GR32:$src1, imm:$src2)>;
1629 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1630 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1631 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1632 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1635 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1636 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1637 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1638 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1641 def : Pat<(mul GR16:$src1, GR16:$src2),
1642 (IMUL16rr GR16:$src1, GR16:$src2)>;
1643 def : Pat<(mul GR32:$src1, GR32:$src2),
1644 (IMUL32rr GR32:$src1, GR32:$src2)>;
1647 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1648 (IMUL16rm GR16:$src1, addr:$src2)>;
1649 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1650 (IMUL32rm GR32:$src1, addr:$src2)>;
1653 def : Pat<(mul GR16:$src1, imm:$src2),
1654 (IMUL16rri GR16:$src1, imm:$src2)>;
1655 def : Pat<(mul GR32:$src1, imm:$src2),
1656 (IMUL32rri GR32:$src1, imm:$src2)>;
1657 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1658 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1659 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1660 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1662 // reg = mul mem, imm
1663 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1664 (IMUL16rmi addr:$src1, imm:$src2)>;
1665 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1666 (IMUL32rmi addr:$src1, imm:$src2)>;
1667 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1668 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1669 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1670 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1672 // Patterns for nodes that do not produce flags, for instructions that do.
1675 def : Pat<(add GR64:$src1, GR64:$src2),
1676 (ADD64rr GR64:$src1, GR64:$src2)>;
1677 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1678 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1679 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1680 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1681 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1682 (ADD64rm GR64:$src1, addr:$src2)>;
1685 def : Pat<(sub GR64:$src1, GR64:$src2),
1686 (SUB64rr GR64:$src1, GR64:$src2)>;
1687 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1688 (SUB64rm GR64:$src1, addr:$src2)>;
1689 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1690 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1691 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1692 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1695 def : Pat<(mul GR64:$src1, GR64:$src2),
1696 (IMUL64rr GR64:$src1, GR64:$src2)>;
1697 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1698 (IMUL64rm GR64:$src1, addr:$src2)>;
1699 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1700 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1701 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1702 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1703 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1704 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1705 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1706 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1708 // Increment/Decrement reg.
1709 // Do not make INC/DEC if it is slow
1710 let Predicates = [NotSlowIncDec] in {
1711 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
1712 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
1713 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
1714 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1715 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
1716 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
1717 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
1718 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1722 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1723 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1724 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1725 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1728 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1729 (OR8rm GR8:$src1, addr:$src2)>;
1730 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1731 (OR16rm GR16:$src1, addr:$src2)>;
1732 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1733 (OR32rm GR32:$src1, addr:$src2)>;
1734 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1735 (OR64rm GR64:$src1, addr:$src2)>;
1738 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1739 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1740 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1741 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1742 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1743 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1744 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1745 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1746 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1747 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1748 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1751 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1752 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1753 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1754 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1757 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1758 (XOR8rm GR8:$src1, addr:$src2)>;
1759 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1760 (XOR16rm GR16:$src1, addr:$src2)>;
1761 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1762 (XOR32rm GR32:$src1, addr:$src2)>;
1763 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1764 (XOR64rm GR64:$src1, addr:$src2)>;
1767 def : Pat<(xor GR8:$src1, imm:$src2),
1768 (XOR8ri GR8:$src1, imm:$src2)>;
1769 def : Pat<(xor GR16:$src1, imm:$src2),
1770 (XOR16ri GR16:$src1, imm:$src2)>;
1771 def : Pat<(xor GR32:$src1, imm:$src2),
1772 (XOR32ri GR32:$src1, imm:$src2)>;
1773 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1774 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1775 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1776 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1777 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1778 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1779 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1780 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1783 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1784 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1785 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1786 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1789 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1790 (AND8rm GR8:$src1, addr:$src2)>;
1791 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1792 (AND16rm GR16:$src1, addr:$src2)>;
1793 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1794 (AND32rm GR32:$src1, addr:$src2)>;
1795 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1796 (AND64rm GR64:$src1, addr:$src2)>;
1799 def : Pat<(and GR8:$src1, imm:$src2),
1800 (AND8ri GR8:$src1, imm:$src2)>;
1801 def : Pat<(and GR16:$src1, imm:$src2),
1802 (AND16ri GR16:$src1, imm:$src2)>;
1803 def : Pat<(and GR32:$src1, imm:$src2),
1804 (AND32ri GR32:$src1, imm:$src2)>;
1805 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1806 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1807 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1808 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1809 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1810 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1811 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1812 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1814 // Bit scan instruction patterns to match explicit zero-undef behavior.
1815 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1816 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1817 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1818 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1819 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1820 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1822 // When HasMOVBE is enabled it is possible to get a non-legalized
1823 // register-register 16 bit bswap. This maps it to a ROL instruction.
1824 let Predicates = [HasMOVBE] in {
1825 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;