1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue(), SDLoc(N));
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
55 def : Pat<(X86callseq_start timm:$amt1),
56 (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
59 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
60 // a stack adjustment and the codegen must know that they may modify the stack
61 // pointer before prolog-epilog rewriting occurs.
62 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
63 // sub / add which can clobber EFLAGS.
64 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
65 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
69 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
71 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
74 def : Pat<(X86callseq_start timm:$amt1),
75 (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
78 // x86-64 va_start lowering magic.
79 let usesCustomInserter = 1, Defs = [EFLAGS] in {
80 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
83 i64imm:$regsavefi, i64imm:$offset,
85 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
86 [(X86vastart_save_xmm_regs GR8:$al,
91 // The VAARG_64 pseudo-instruction takes the address of the va_list,
92 // and places the address of the next argument into a register.
93 let Defs = [EFLAGS] in
94 def VAARG_64 : I<0, Pseudo,
96 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
97 "#VAARG_64 $dst, $ap, $size, $mode, $align",
99 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
102 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
103 // targets. These calls are needed to probe the stack when allocating more than
104 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
105 // ensure that the guard pages used by the OS virtual memory manager are
106 // allocated in correct sequence.
107 // The main point of having separate instruction are extra unmodelled effects
108 // (compared to ordinary calls) like stack pointer change.
110 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
111 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
112 "# dynamic stack allocation",
115 // When using segmented stacks these are lowered into instructions which first
116 // check if the current stacklet has enough free memory. If it does, memory is
117 // allocated by bumping the stack pointer. Otherwise memory is allocated from
120 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
121 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
122 "# variable sized alloca for segmented stacks",
124 (X86SegAlloca GR32:$size))]>,
127 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
128 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
129 "# variable sized alloca for segmented stacks",
131 (X86SegAlloca GR64:$size))]>,
132 Requires<[In64BitMode]>;
135 //===----------------------------------------------------------------------===//
136 // EH Pseudo Instructions
138 let SchedRW = [WriteSystem] in {
139 let isTerminator = 1, isReturn = 1, isBarrier = 1,
140 hasCtrlDep = 1, isCodeGenOnly = 1 in {
141 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
142 "ret\t#eh_return, addr: $addr",
143 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
147 let isTerminator = 1, isReturn = 1, isBarrier = 1,
148 hasCtrlDep = 1, isCodeGenOnly = 1 in {
149 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
150 "ret\t#eh_return, addr: $addr",
151 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
155 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
156 isCodeGenOnly = 1, isReturn = 1 in {
157 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
159 // CATCHRET needs a custom inserter for SEH nonsense.
160 let usesCustomInserter = 1 in
161 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
163 [(catchret bb:$dst, bb:$from)]>;
166 // This instruction is responsible for re-establishing stack pointers after an
167 // exception has been caught and we are rejoining normal control flow in the
168 // parent function or funclet. It generally sets ESP and EBP, and optionally
169 // ESI. It is only needed for 32-bit WinEH, as the runtime restores CSRs for us
171 let hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in
172 def EH_RESTORE : I<0, Pseudo, (outs), (ins), "# EH_RESTORE", []>;
174 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
175 usesCustomInserter = 1 in {
176 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
178 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
179 Requires<[Not64BitMode]>;
180 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
182 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
183 Requires<[In64BitMode]>;
184 let isTerminator = 1 in {
185 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
186 "#EH_SJLJ_LONGJMP32",
187 [(X86eh_sjlj_longjmp addr:$buf)]>,
188 Requires<[Not64BitMode]>;
189 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
190 "#EH_SJLJ_LONGJMP64",
191 [(X86eh_sjlj_longjmp addr:$buf)]>,
192 Requires<[In64BitMode]>;
197 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
198 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
199 "#EH_SjLj_Setup\t$dst", []>;
202 //===----------------------------------------------------------------------===//
203 // Pseudo instructions used by unwind info.
205 let isPseudo = 1 in {
206 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
207 "#SEH_PushReg $reg", []>;
208 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
209 "#SEH_SaveReg $reg, $dst", []>;
210 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
211 "#SEH_SaveXMM $reg, $dst", []>;
212 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
213 "#SEH_StackAlloc $size", []>;
214 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
215 "#SEH_SetFrame $reg, $offset", []>;
216 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
217 "#SEH_PushFrame $mode", []>;
218 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
219 "#SEH_EndPrologue", []>;
220 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
221 "#SEH_Epilogue", []>;
224 //===----------------------------------------------------------------------===//
225 // Pseudo instructions used by segmented stacks.
228 // This is lowered into a RET instruction by MCInstLower. We need
229 // this so that we don't have to have a MachineBasicBlock which ends
230 // with a RET and also has successors.
231 let isPseudo = 1 in {
232 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
235 // This instruction is lowered to a RET followed by a MOV. The two
236 // instructions are not generated on a higher level since then the
237 // verifier sees a MachineBasicBlock ending with a non-terminator.
238 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
242 //===----------------------------------------------------------------------===//
243 // Alias Instructions
244 //===----------------------------------------------------------------------===//
246 // Alias instruction mapping movr0 to xor.
247 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
248 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
250 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
251 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
253 // Other widths can also make use of the 32-bit xor, which may have a smaller
254 // encoding and avoid partial register updates.
255 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
256 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
257 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
258 let AddedComplexity = 20;
261 // Materialize i64 constant where top 32-bits are zero. This could theoretically
262 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
263 // that would make it more difficult to rematerialize.
264 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
265 isCodeGenOnly = 1, hasSideEffects = 0 in
266 def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
267 "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
269 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
270 // actually the zero-extension of a 32-bit constant and for labels in the
271 // x86-64 small code model.
272 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [imm, X86Wrapper]>;
274 let AddedComplexity = 1 in
275 def : Pat<(i64 mov64imm32:$src),
276 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
278 // Use sbb to materialize carry bit.
279 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
280 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
281 // However, Pat<> can't replicate the destination reg into the inputs of the
283 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
284 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
285 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
286 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
287 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
288 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
289 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
290 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
294 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
296 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
298 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
301 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
303 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
305 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
308 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
309 // will be eliminated and that the sbb can be extended up to a wider type. When
310 // this happens, it is great. However, if we are left with an 8-bit sbb and an
311 // and, we might as well just match it as a setb.
312 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
315 // (add OP, SETB) -> (adc OP, 0)
316 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
317 (ADC8ri GR8:$op, 0)>;
318 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
319 (ADC32ri8 GR32:$op, 0)>;
320 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
321 (ADC64ri8 GR64:$op, 0)>;
323 // (sub OP, SETB) -> (sbb OP, 0)
324 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
325 (SBB8ri GR8:$op, 0)>;
326 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
327 (SBB32ri8 GR32:$op, 0)>;
328 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
329 (SBB64ri8 GR64:$op, 0)>;
331 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
332 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
333 (ADC8ri GR8:$op, 0)>;
334 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
335 (ADC32ri8 GR32:$op, 0)>;
336 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
337 (ADC64ri8 GR64:$op, 0)>;
339 //===----------------------------------------------------------------------===//
340 // String Pseudo Instructions
342 let SchedRW = [WriteMicrocoded] in {
343 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
344 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
345 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
346 Requires<[Not64BitMode]>;
347 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
348 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
349 Requires<[Not64BitMode]>;
350 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
351 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
352 Requires<[Not64BitMode]>;
355 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
356 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
357 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
358 Requires<[In64BitMode]>;
359 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
360 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
361 Requires<[In64BitMode]>;
362 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
363 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
364 Requires<[In64BitMode]>;
365 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
366 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
367 Requires<[In64BitMode]>;
370 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
371 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
372 let Uses = [AL,ECX,EDI] in
373 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
374 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
375 Requires<[Not64BitMode]>;
376 let Uses = [AX,ECX,EDI] in
377 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
378 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
379 Requires<[Not64BitMode]>;
380 let Uses = [EAX,ECX,EDI] in
381 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
382 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
383 Requires<[Not64BitMode]>;
386 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
387 let Uses = [AL,RCX,RDI] in
388 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
389 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
390 Requires<[In64BitMode]>;
391 let Uses = [AX,RCX,RDI] in
392 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
393 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
394 Requires<[In64BitMode]>;
395 let Uses = [RAX,RCX,RDI] in
396 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
397 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
398 Requires<[In64BitMode]>;
400 let Uses = [RAX,RCX,RDI] in
401 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
402 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
403 Requires<[In64BitMode]>;
407 //===----------------------------------------------------------------------===//
408 // Thread Local Storage Instructions
412 // All calls clobber the non-callee saved registers. ESP is marked as
413 // a use to prevent stack-pointer assignments that appear immediately
414 // before calls from potentially appearing dead.
415 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
416 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
417 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
418 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
419 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
421 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
423 [(X86tlsaddr tls32addr:$sym)]>,
424 Requires<[Not64BitMode]>;
425 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
427 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
428 Requires<[Not64BitMode]>;
431 // All calls clobber the non-callee saved registers. RSP is marked as
432 // a use to prevent stack-pointer assignments that appear immediately
433 // before calls from potentially appearing dead.
434 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
435 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
436 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
437 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
438 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
439 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
441 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
443 [(X86tlsaddr tls64addr:$sym)]>,
444 Requires<[In64BitMode]>;
445 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
447 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
448 Requires<[In64BitMode]>;
451 // Darwin TLS Support
452 // For i386, the address of the thunk is passed on the stack, on return the
453 // address of the variable is in %eax. %ecx is trashed during the function
454 // call. All other registers are preserved.
455 let Defs = [EAX, ECX, EFLAGS],
457 usesCustomInserter = 1 in
458 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
460 [(X86TLSCall addr:$sym)]>,
461 Requires<[Not64BitMode]>;
463 // For x86_64, the address of the thunk is passed in %rdi, on return
464 // the address of the variable is in %rax. All other registers are preserved.
465 let Defs = [RAX, EFLAGS],
467 usesCustomInserter = 1 in
468 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
470 [(X86TLSCall addr:$sym)]>,
471 Requires<[In64BitMode]>;
474 //===----------------------------------------------------------------------===//
475 // Conditional Move Pseudo Instructions
477 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
478 // instruction selection into a branch sequence.
479 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
480 def CMOV#NAME : I<0, Pseudo,
481 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
482 "#CMOV_"#NAME#" PSEUDO!",
483 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
487 let usesCustomInserter = 1, Uses = [EFLAGS] in {
488 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
489 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
490 // however that requires promoting the operands, and can induce additional
491 // i8 register pressure.
492 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
494 let Predicates = [NoCMov] in {
495 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
496 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
497 } // Predicates = [NoCMov]
499 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
501 let Predicates = [FPStackf32] in
502 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
504 let Predicates = [FPStackf64] in
505 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
507 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
509 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
510 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
511 defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
512 defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
513 defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
514 defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
515 defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
516 defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
517 defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
518 defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
519 defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
520 defm _V8I1 : CMOVrr_PSEUDO<VK8, v8i1>;
521 defm _V16I1 : CMOVrr_PSEUDO<VK16, v16i1>;
522 defm _V32I1 : CMOVrr_PSEUDO<VK32, v32i1>;
523 defm _V64I1 : CMOVrr_PSEUDO<VK64, v64i1>;
524 } // usesCustomInserter = 1, Uses = [EFLAGS]
526 //===----------------------------------------------------------------------===//
527 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
528 //===----------------------------------------------------------------------===//
530 // FIXME: Use normal instructions and add lock prefix dynamically.
534 // TODO: Get this to fold the constant into the instruction.
535 let isCodeGenOnly = 1, Defs = [EFLAGS] in
536 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
537 "or{l}\t{$zero, $dst|$dst, $zero}",
538 [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
539 Sched<[WriteALULd, WriteRMW]>;
541 let hasSideEffects = 1 in
542 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
544 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
546 // RegOpc corresponds to the mr version of the instruction
547 // ImmOpc corresponds to the mi version of the instruction
548 // ImmOpc8 corresponds to the mi8 version of the instruction
549 // ImmMod corresponds to the instruction format of the mi and mi8 versions
550 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
551 Format ImmMod, string mnemonic> {
552 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
553 SchedRW = [WriteALULd, WriteRMW] in {
555 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
556 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
557 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
558 !strconcat(mnemonic, "{b}\t",
559 "{$src2, $dst|$dst, $src2}"),
560 [], IIC_ALU_NONMEM>, LOCK;
561 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
562 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
563 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
564 !strconcat(mnemonic, "{w}\t",
565 "{$src2, $dst|$dst, $src2}"),
566 [], IIC_ALU_NONMEM>, OpSize16, LOCK;
567 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
568 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
569 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
570 !strconcat(mnemonic, "{l}\t",
571 "{$src2, $dst|$dst, $src2}"),
572 [], IIC_ALU_NONMEM>, OpSize32, LOCK;
573 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
574 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
575 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
576 !strconcat(mnemonic, "{q}\t",
577 "{$src2, $dst|$dst, $src2}"),
578 [], IIC_ALU_NONMEM>, LOCK;
580 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
581 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
582 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
583 !strconcat(mnemonic, "{b}\t",
584 "{$src2, $dst|$dst, $src2}"),
585 [], IIC_ALU_MEM>, LOCK;
587 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
588 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
589 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
590 !strconcat(mnemonic, "{w}\t",
591 "{$src2, $dst|$dst, $src2}"),
592 [], IIC_ALU_MEM>, OpSize16, LOCK;
594 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
595 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
596 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
597 !strconcat(mnemonic, "{l}\t",
598 "{$src2, $dst|$dst, $src2}"),
599 [], IIC_ALU_MEM>, OpSize32, LOCK;
601 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
602 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
603 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
604 !strconcat(mnemonic, "{q}\t",
605 "{$src2, $dst|$dst, $src2}"),
606 [], IIC_ALU_MEM>, LOCK;
608 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
609 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
610 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
611 !strconcat(mnemonic, "{w}\t",
612 "{$src2, $dst|$dst, $src2}"),
613 [], IIC_ALU_MEM>, OpSize16, LOCK;
614 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
615 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
616 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
617 !strconcat(mnemonic, "{l}\t",
618 "{$src2, $dst|$dst, $src2}"),
619 [], IIC_ALU_MEM>, OpSize32, LOCK;
620 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
621 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
622 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
623 !strconcat(mnemonic, "{q}\t",
624 "{$src2, $dst|$dst, $src2}"),
625 [], IIC_ALU_MEM>, LOCK;
631 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
632 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
633 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
634 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
635 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
637 // Optimized codegen when the non-memory output is not used.
638 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
640 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
641 SchedRW = [WriteALULd, WriteRMW] in {
643 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
644 !strconcat(mnemonic, "{b}\t$dst"),
645 [], IIC_UNARY_MEM>, LOCK;
646 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
647 !strconcat(mnemonic, "{w}\t$dst"),
648 [], IIC_UNARY_MEM>, OpSize16, LOCK;
649 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
650 !strconcat(mnemonic, "{l}\t$dst"),
651 [], IIC_UNARY_MEM>, OpSize32, LOCK;
652 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
653 !strconcat(mnemonic, "{q}\t$dst"),
654 [], IIC_UNARY_MEM>, LOCK;
658 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
659 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
661 // Atomic compare and swap.
662 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
663 SDPatternOperator frag, X86MemOperand x86memop,
664 InstrItinClass itin> {
665 let isCodeGenOnly = 1 in {
666 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
667 !strconcat(mnemonic, "\t$ptr"),
668 [(frag addr:$ptr)], itin>, TB, LOCK;
672 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
673 string mnemonic, SDPatternOperator frag,
674 InstrItinClass itin8, InstrItinClass itin> {
675 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
676 let Defs = [AL, EFLAGS], Uses = [AL] in
677 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
678 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
679 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
680 let Defs = [AX, EFLAGS], Uses = [AX] in
681 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
682 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
683 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
684 let Defs = [EAX, EFLAGS], Uses = [EAX] in
685 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
686 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
687 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
688 let Defs = [RAX, EFLAGS], Uses = [RAX] in
689 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
690 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
691 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
695 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
696 SchedRW = [WriteALULd, WriteRMW] in {
697 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
702 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
703 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
704 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
706 IIC_CMPX_LOCK_16B>, REX_W;
709 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
710 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
712 // Atomic exchange and add
713 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
715 InstrItinClass itin8, InstrItinClass itin> {
716 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
717 SchedRW = [WriteALULd, WriteRMW] in {
718 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
719 (ins GR8:$val, i8mem:$ptr),
720 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
722 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
724 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
725 (ins GR16:$val, i16mem:$ptr),
726 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
729 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
731 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
732 (ins GR32:$val, i32mem:$ptr),
733 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
736 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
738 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
739 (ins GR64:$val, i64mem:$ptr),
740 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
743 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
748 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
749 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
752 /* The following multiclass tries to make sure that in code like
753 * x.store (immediate op x.load(acquire), release)
755 * x.store (register op x.load(acquire), release)
756 * an operation directly on memory is generated instead of wasting a register.
757 * It is not automatic as atomic_store/load are only lowered to MOV instructions
758 * extremely late to prevent them from being accidentally reordered in the backend
759 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
761 multiclass RELEASE_BINOP_MI<SDNode op> {
762 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
763 "#BINOP "#NAME#"8mi PSEUDO!",
764 [(atomic_store_8 addr:$dst, (op
765 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
766 def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
767 "#BINOP "#NAME#"8mr PSEUDO!",
768 [(atomic_store_8 addr:$dst, (op
769 (atomic_load_8 addr:$dst), GR8:$src))]>;
770 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
771 // costly and avoided as far as possible by this backend anyway
772 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
773 "#BINOP "#NAME#"32mi PSEUDO!",
774 [(atomic_store_32 addr:$dst, (op
775 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
776 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
777 "#BINOP "#NAME#"32mr PSEUDO!",
778 [(atomic_store_32 addr:$dst, (op
779 (atomic_load_32 addr:$dst), GR32:$src))]>;
780 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
781 "#BINOP "#NAME#"64mi32 PSEUDO!",
782 [(atomic_store_64 addr:$dst, (op
783 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
784 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
785 "#BINOP "#NAME#"64mr PSEUDO!",
786 [(atomic_store_64 addr:$dst, (op
787 (atomic_load_64 addr:$dst), GR64:$src))]>;
789 let Defs = [EFLAGS] in {
790 defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
791 defm RELEASE_AND : RELEASE_BINOP_MI<and>;
792 defm RELEASE_OR : RELEASE_BINOP_MI<or>;
793 defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
794 // Note: we don't deal with sub, because substractions of constants are
795 // optimized into additions before this code can run.
798 // Same as above, but for floating-point.
799 // FIXME: imm version.
800 // FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
801 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
802 let usesCustomInserter = 1 in {
803 multiclass RELEASE_FP_BINOP_MI<SDNode op> {
804 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
805 "#BINOP "#NAME#"32mr PSEUDO!",
806 [(atomic_store_32 addr:$dst,
808 (f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
809 FR32:$src))))]>, Requires<[HasSSE1]>;
810 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
811 "#BINOP "#NAME#"64mr PSEUDO!",
812 [(atomic_store_64 addr:$dst,
814 (f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
815 FR64:$src))))]>, Requires<[HasSSE2]>;
817 defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
818 // FIXME: Add fsub, fmul, fdiv, ...
821 multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
822 def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
823 "#UNOP "#NAME#"8m PSEUDO!",
824 [(atomic_store_8 addr:$dst, dag8)]>;
825 def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
826 "#UNOP "#NAME#"16m PSEUDO!",
827 [(atomic_store_16 addr:$dst, dag16)]>;
828 def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
829 "#UNOP "#NAME#"32m PSEUDO!",
830 [(atomic_store_32 addr:$dst, dag32)]>;
831 def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
832 "#UNOP "#NAME#"64m PSEUDO!",
833 [(atomic_store_64 addr:$dst, dag64)]>;
836 let Defs = [EFLAGS] in {
837 defm RELEASE_INC : RELEASE_UNOP<
838 (add (atomic_load_8 addr:$dst), (i8 1)),
839 (add (atomic_load_16 addr:$dst), (i16 1)),
840 (add (atomic_load_32 addr:$dst), (i32 1)),
841 (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
842 defm RELEASE_DEC : RELEASE_UNOP<
843 (add (atomic_load_8 addr:$dst), (i8 -1)),
844 (add (atomic_load_16 addr:$dst), (i16 -1)),
845 (add (atomic_load_32 addr:$dst), (i32 -1)),
846 (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
849 TODO: These don't work because the type inference of TableGen fails.
850 TODO: find a way to fix it.
851 let Defs = [EFLAGS] in {
852 defm RELEASE_NEG : RELEASE_UNOP<
853 (ineg (atomic_load_8 addr:$dst)),
854 (ineg (atomic_load_16 addr:$dst)),
855 (ineg (atomic_load_32 addr:$dst)),
856 (ineg (atomic_load_64 addr:$dst))>;
858 // NOT doesn't set flags.
859 defm RELEASE_NOT : RELEASE_UNOP<
860 (not (atomic_load_8 addr:$dst)),
861 (not (atomic_load_16 addr:$dst)),
862 (not (atomic_load_32 addr:$dst)),
863 (not (atomic_load_64 addr:$dst))>;
866 def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
867 "#RELEASE_MOV8mi PSEUDO!",
868 [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
869 def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
870 "#RELEASE_MOV16mi PSEUDO!",
871 [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
872 def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
873 "#RELEASE_MOV32mi PSEUDO!",
874 [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
875 def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
876 "#RELEASE_MOV64mi32 PSEUDO!",
877 [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
879 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
880 "#RELEASE_MOV8mr PSEUDO!",
881 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
882 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
883 "#RELEASE_MOV16mr PSEUDO!",
884 [(atomic_store_16 addr:$dst, GR16:$src)]>;
885 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
886 "#RELEASE_MOV32mr PSEUDO!",
887 [(atomic_store_32 addr:$dst, GR32:$src)]>;
888 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
889 "#RELEASE_MOV64mr PSEUDO!",
890 [(atomic_store_64 addr:$dst, GR64:$src)]>;
892 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
893 "#ACQUIRE_MOV8rm PSEUDO!",
894 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
895 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
896 "#ACQUIRE_MOV16rm PSEUDO!",
897 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
898 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
899 "#ACQUIRE_MOV32rm PSEUDO!",
900 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
901 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
902 "#ACQUIRE_MOV64rm PSEUDO!",
903 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
905 //===----------------------------------------------------------------------===//
906 // DAG Pattern Matching Rules
907 //===----------------------------------------------------------------------===//
909 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
910 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
911 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
912 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
913 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
914 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
915 def : Pat<(i32 (X86Wrapper mcsym:$dst)), (MOV32ri mcsym:$dst)>;
916 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
918 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
919 (ADD32ri GR32:$src1, tconstpool:$src2)>;
920 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
921 (ADD32ri GR32:$src1, tjumptable:$src2)>;
922 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
923 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
924 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
925 (ADD32ri GR32:$src1, texternalsym:$src2)>;
926 def : Pat<(add GR32:$src1, (X86Wrapper mcsym:$src2)),
927 (ADD32ri GR32:$src1, mcsym:$src2)>;
928 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
929 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
931 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
932 (MOV32mi addr:$dst, tglobaladdr:$src)>;
933 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
934 (MOV32mi addr:$dst, texternalsym:$src)>;
935 def : Pat<(store (i32 (X86Wrapper mcsym:$src)), addr:$dst),
936 (MOV32mi addr:$dst, mcsym:$src)>;
937 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
938 (MOV32mi addr:$dst, tblockaddress:$src)>;
940 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
941 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
942 // 'movabs' predicate should handle this sort of thing.
943 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
944 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
945 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
946 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
947 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
948 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
949 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
950 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
951 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
952 (MOV64ri mcsym:$dst)>, Requires<[FarData]>;
953 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
954 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
956 // In kernel code model, we can get the address of a label
957 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
958 // the MOV64ri32 should accept these.
959 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
960 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
961 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
962 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
963 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
964 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
965 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
966 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
967 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
968 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
969 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
970 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
972 // If we have small model and -static mode, it is safe to store global addresses
973 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
974 // for MOV64mi32 should handle this sort of thing.
975 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
976 (MOV64mi32 addr:$dst, tconstpool:$src)>,
977 Requires<[NearData, IsStatic]>;
978 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
979 (MOV64mi32 addr:$dst, tjumptable:$src)>,
980 Requires<[NearData, IsStatic]>;
981 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
982 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
983 Requires<[NearData, IsStatic]>;
984 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
985 (MOV64mi32 addr:$dst, texternalsym:$src)>,
986 Requires<[NearData, IsStatic]>;
987 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
988 (MOV64mi32 addr:$dst, mcsym:$src)>,
989 Requires<[NearData, IsStatic]>;
990 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
991 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
992 Requires<[NearData, IsStatic]>;
994 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
995 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
999 // tls has some funny stuff here...
1000 // This corresponds to movabs $foo@tpoff, %rax
1001 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1002 (MOV64ri32 tglobaltlsaddr :$dst)>;
1003 // This corresponds to add $foo@tpoff, %rax
1004 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1005 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1008 // Direct PC relative function call for small code model. 32-bit displacement
1009 // sign extended to 64-bit.
1010 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1011 (CALL64pcrel32 tglobaladdr:$dst)>;
1012 def : Pat<(X86call (i64 texternalsym:$dst)),
1013 (CALL64pcrel32 texternalsym:$dst)>;
1015 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1016 // can never use callee-saved registers. That is the purpose of the GR64_TC
1017 // register classes.
1019 // The only volatile register that is never used by the calling convention is
1020 // %r11. This happens when calling a vararg function with 6 arguments.
1022 // Match an X86tcret that uses less than 7 volatile registers.
1023 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1024 (X86tcret node:$ptr, node:$off), [{
1025 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1026 unsigned NumRegs = 0;
1027 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1028 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1033 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1034 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1035 Requires<[Not64BitMode]>;
1037 // FIXME: This is disabled for 32-bit PIC mode because the global base
1038 // register which is part of the address mode may be assigned a
1039 // callee-saved register.
1040 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1041 (TCRETURNmi addr:$dst, imm:$off)>,
1042 Requires<[Not64BitMode, IsNotPIC]>;
1044 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1045 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
1046 Requires<[NotLP64]>;
1048 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1049 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1050 Requires<[NotLP64]>;
1052 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1053 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1054 Requires<[In64BitMode]>;
1056 // Don't fold loads into X86tcret requiring more than 6 regs.
1057 // There wouldn't be enough scratch registers for base+index.
1058 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1059 (TCRETURNmi64 addr:$dst, imm:$off)>,
1060 Requires<[In64BitMode]>;
1062 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1063 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1066 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1067 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1070 // Normal calls, with various flavors of addresses.
1071 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1072 (CALLpcrel32 tglobaladdr:$dst)>;
1073 def : Pat<(X86call (i32 texternalsym:$dst)),
1074 (CALLpcrel32 texternalsym:$dst)>;
1075 def : Pat<(X86call (i32 imm:$dst)),
1076 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1080 // TEST R,R is smaller than CMP R,0
1081 def : Pat<(X86cmp GR8:$src1, 0),
1082 (TEST8rr GR8:$src1, GR8:$src1)>;
1083 def : Pat<(X86cmp GR16:$src1, 0),
1084 (TEST16rr GR16:$src1, GR16:$src1)>;
1085 def : Pat<(X86cmp GR32:$src1, 0),
1086 (TEST32rr GR32:$src1, GR32:$src1)>;
1087 def : Pat<(X86cmp GR64:$src1, 0),
1088 (TEST64rr GR64:$src1, GR64:$src1)>;
1090 // Conditional moves with folded loads with operands swapped and conditions
1092 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1093 Instruction Inst64> {
1094 let Predicates = [HasCMov] in {
1095 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1096 (Inst16 GR16:$src2, addr:$src1)>;
1097 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1098 (Inst32 GR32:$src2, addr:$src1)>;
1099 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1100 (Inst64 GR64:$src2, addr:$src1)>;
1104 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1105 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1106 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1107 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1108 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1109 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1110 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1111 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1112 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1113 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1114 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1115 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1116 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1117 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1118 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1119 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1121 // zextload bool -> zextload byte
1122 def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
1123 def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
1124 def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
1125 def : Pat<(zextloadi64i1 addr:$src),
1126 (SUBREG_TO_REG (i64 0),
1127 (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
1129 // extload bool -> extload byte
1130 // When extloading from 16-bit and smaller memory locations into 64-bit
1131 // registers, use zero-extending loads so that the entire 64-bit register is
1132 // defined, avoiding partial-register updates.
1134 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1135 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1136 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1137 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1138 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1139 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1141 // For other extloads, use subregs, since the high contents of the register are
1142 // defined after an extload.
1143 def : Pat<(extloadi64i1 addr:$src),
1144 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1145 def : Pat<(extloadi64i8 addr:$src),
1146 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1147 def : Pat<(extloadi64i16 addr:$src),
1148 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1149 def : Pat<(extloadi64i32 addr:$src),
1150 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1152 // anyext. Define these to do an explicit zero-extend to
1153 // avoid partial-register updates.
1154 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1155 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1156 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1158 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1159 def : Pat<(i32 (anyext GR16:$src)),
1160 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1162 def : Pat<(i64 (anyext GR8 :$src)),
1163 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1164 def : Pat<(i64 (anyext GR16:$src)),
1165 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1166 def : Pat<(i64 (anyext GR32:$src)),
1167 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1170 // Any instruction that defines a 32-bit result leaves the high half of the
1171 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1172 // be copying from a truncate. And x86's cmov doesn't do anything if the
1173 // condition is false. But any other 32-bit operation will zero-extend
1175 def def32 : PatLeaf<(i32 GR32:$src), [{
1176 return N->getOpcode() != ISD::TRUNCATE &&
1177 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1178 N->getOpcode() != ISD::CopyFromReg &&
1179 N->getOpcode() != ISD::AssertSext &&
1180 N->getOpcode() != X86ISD::CMOV;
1183 // In the case of a 32-bit def that is known to implicitly zero-extend,
1184 // we can use a SUBREG_TO_REG.
1185 def : Pat<(i64 (zext def32:$src)),
1186 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1188 //===----------------------------------------------------------------------===//
1189 // Pattern match OR as ADD
1190 //===----------------------------------------------------------------------===//
1192 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1193 // 3-addressified into an LEA instruction to avoid copies. However, we also
1194 // want to finally emit these instructions as an or at the end of the code
1195 // generator to make the generated code easier to read. To do this, we select
1196 // into "disjoint bits" pseudo ops.
1198 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1199 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1200 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1201 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1203 APInt KnownZero0, KnownOne0;
1204 CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1205 APInt KnownZero1, KnownOne1;
1206 CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1207 return (~KnownZero0 & ~KnownZero1) == 0;
1211 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1212 // Try this before the selecting to OR.
1213 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1215 let isConvertibleToThreeAddress = 1,
1216 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1217 let isCommutable = 1 in {
1218 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1219 "", // orw/addw REG, REG
1220 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1221 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1222 "", // orl/addl REG, REG
1223 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1224 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1225 "", // orq/addq REG, REG
1226 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1229 // NOTE: These are order specific, we want the ri8 forms to be listed
1230 // first so that they are slightly preferred to the ri forms.
1232 def ADD16ri8_DB : I<0, Pseudo,
1233 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1234 "", // orw/addw REG, imm8
1235 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1236 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1237 "", // orw/addw REG, imm
1238 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1240 def ADD32ri8_DB : I<0, Pseudo,
1241 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1242 "", // orl/addl REG, imm8
1243 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1244 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1245 "", // orl/addl REG, imm
1246 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1249 def ADD64ri8_DB : I<0, Pseudo,
1250 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1251 "", // orq/addq REG, imm8
1252 [(set GR64:$dst, (or_is_add GR64:$src1,
1253 i64immSExt8:$src2))]>;
1254 def ADD64ri32_DB : I<0, Pseudo,
1255 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1256 "", // orq/addq REG, imm
1257 [(set GR64:$dst, (or_is_add GR64:$src1,
1258 i64immSExt32:$src2))]>;
1260 } // AddedComplexity, SchedRW
1263 //===----------------------------------------------------------------------===//
1265 //===----------------------------------------------------------------------===//
1267 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1268 // +128 doesn't, so in this special case use a sub instead of an add.
1269 def : Pat<(add GR16:$src1, 128),
1270 (SUB16ri8 GR16:$src1, -128)>;
1271 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1272 (SUB16mi8 addr:$dst, -128)>;
1274 def : Pat<(add GR32:$src1, 128),
1275 (SUB32ri8 GR32:$src1, -128)>;
1276 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1277 (SUB32mi8 addr:$dst, -128)>;
1279 def : Pat<(add GR64:$src1, 128),
1280 (SUB64ri8 GR64:$src1, -128)>;
1281 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1282 (SUB64mi8 addr:$dst, -128)>;
1284 // The same trick applies for 32-bit immediate fields in 64-bit
1286 def : Pat<(add GR64:$src1, 0x0000000080000000),
1287 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1288 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1289 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1291 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1292 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1293 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1294 // represented with a sign extension of a 8 bit constant, use that.
1295 // This can also reduce instruction size by eliminating the need for the REX
1298 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1299 let AddedComplexity = 1 in {
1300 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1304 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1305 (i32 (GetLo8XForm imm:$imm))),
1308 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1312 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1313 (i32 (GetLo32XForm imm:$imm))),
1315 } // AddedComplexity = 1
1318 // AddedComplexity is needed due to the increased complexity on the
1319 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1320 // the MOVZX patterns keeps thems together in DAGIsel tables.
1321 let AddedComplexity = 1 in {
1322 // r & (2^16-1) ==> movz
1323 def : Pat<(and GR32:$src1, 0xffff),
1324 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1325 // r & (2^8-1) ==> movz
1326 def : Pat<(and GR32:$src1, 0xff),
1327 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1330 Requires<[Not64BitMode]>;
1331 // r & (2^8-1) ==> movz
1332 def : Pat<(and GR16:$src1, 0xff),
1333 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1334 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1336 Requires<[Not64BitMode]>;
1338 // r & (2^32-1) ==> movz
1339 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1340 (SUBREG_TO_REG (i64 0),
1341 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1343 // r & (2^16-1) ==> movz
1344 def : Pat<(and GR64:$src, 0xffff),
1345 (SUBREG_TO_REG (i64 0),
1346 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1348 // r & (2^8-1) ==> movz
1349 def : Pat<(and GR64:$src, 0xff),
1350 (SUBREG_TO_REG (i64 0),
1351 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1353 // r & (2^8-1) ==> movz
1354 def : Pat<(and GR32:$src1, 0xff),
1355 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1356 Requires<[In64BitMode]>;
1357 // r & (2^8-1) ==> movz
1358 def : Pat<(and GR16:$src1, 0xff),
1359 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1360 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1361 Requires<[In64BitMode]>;
1362 } // AddedComplexity = 1
1365 // sext_inreg patterns
1366 def : Pat<(sext_inreg GR32:$src, i16),
1367 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1368 def : Pat<(sext_inreg GR32:$src, i8),
1369 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1372 Requires<[Not64BitMode]>;
1374 def : Pat<(sext_inreg GR16:$src, i8),
1375 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1376 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1378 Requires<[Not64BitMode]>;
1380 def : Pat<(sext_inreg GR64:$src, i32),
1381 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1382 def : Pat<(sext_inreg GR64:$src, i16),
1383 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1384 def : Pat<(sext_inreg GR64:$src, i8),
1385 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1386 def : Pat<(sext_inreg GR32:$src, i8),
1387 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1388 Requires<[In64BitMode]>;
1389 def : Pat<(sext_inreg GR16:$src, i8),
1390 (EXTRACT_SUBREG (MOVSX32rr8
1391 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1392 Requires<[In64BitMode]>;
1394 // sext, sext_load, zext, zext_load
1395 def: Pat<(i16 (sext GR8:$src)),
1396 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1397 def: Pat<(sextloadi16i8 addr:$src),
1398 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1399 def: Pat<(i16 (zext GR8:$src)),
1400 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1401 def: Pat<(zextloadi16i8 addr:$src),
1402 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1405 def : Pat<(i16 (trunc GR32:$src)),
1406 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1407 def : Pat<(i8 (trunc GR32:$src)),
1408 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1410 Requires<[Not64BitMode]>;
1411 def : Pat<(i8 (trunc GR16:$src)),
1412 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1414 Requires<[Not64BitMode]>;
1415 def : Pat<(i32 (trunc GR64:$src)),
1416 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1417 def : Pat<(i16 (trunc GR64:$src)),
1418 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1419 def : Pat<(i8 (trunc GR64:$src)),
1420 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1421 def : Pat<(i8 (trunc GR32:$src)),
1422 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1423 Requires<[In64BitMode]>;
1424 def : Pat<(i8 (trunc GR16:$src)),
1425 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1426 Requires<[In64BitMode]>;
1428 // h-register tricks
1429 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1430 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1432 Requires<[Not64BitMode]>;
1433 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1434 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1436 Requires<[Not64BitMode]>;
1437 def : Pat<(srl GR16:$src, (i8 8)),
1440 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1443 Requires<[Not64BitMode]>;
1444 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1445 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1448 Requires<[Not64BitMode]>;
1449 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1450 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1453 Requires<[Not64BitMode]>;
1454 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1455 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1458 Requires<[Not64BitMode]>;
1459 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1460 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1463 Requires<[Not64BitMode]>;
1465 // h-register tricks.
1466 // For now, be conservative on x86-64 and use an h-register extract only if the
1467 // value is immediately zero-extended or stored, which are somewhat common
1468 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1469 // from being allocated in the same instruction as the h register, as there's
1470 // currently no way to describe this requirement to the register allocator.
1472 // h-register extract and zero-extend.
1473 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1477 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1480 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1482 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1484 Requires<[In64BitMode]>;
1485 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1486 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1489 Requires<[In64BitMode]>;
1490 def : Pat<(srl GR16:$src, (i8 8)),
1493 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1496 Requires<[In64BitMode]>;
1497 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1499 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1501 Requires<[In64BitMode]>;
1502 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1504 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1506 Requires<[In64BitMode]>;
1507 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1511 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1514 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1518 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1522 // h-register extract and store.
1523 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1526 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1528 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1531 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1533 Requires<[In64BitMode]>;
1534 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1537 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1539 Requires<[In64BitMode]>;
1542 // (shl x, 1) ==> (add x, x)
1543 // Note that if x is undef (immediate or otherwise), we could theoretically
1544 // end up with the two uses of x getting different values, producing a result
1545 // where the least significant bit is not 0. However, the probability of this
1546 // happening is considered low enough that this is officially not a
1548 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1549 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1550 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1551 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1553 // Helper imms that check if a mask doesn't change significant shift bits.
1554 def immShift32 : ImmLeaf<i8, [{
1555 return countTrailingOnes<uint64_t>(Imm) >= 5;
1557 def immShift64 : ImmLeaf<i8, [{
1558 return countTrailingOnes<uint64_t>(Imm) >= 6;
1561 // Shift amount is implicitly masked.
1562 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1563 // (shift x (and y, 31)) ==> (shift x, y)
1564 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1565 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1566 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1567 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1568 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1569 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1570 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1571 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1572 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1573 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1574 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1575 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1577 // (shift x (and y, 63)) ==> (shift x, y)
1578 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1579 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1580 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1581 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1584 defm : MaskedShiftAmountPats<shl, "SHL">;
1585 defm : MaskedShiftAmountPats<srl, "SHR">;
1586 defm : MaskedShiftAmountPats<sra, "SAR">;
1587 defm : MaskedShiftAmountPats<rotl, "ROL">;
1588 defm : MaskedShiftAmountPats<rotr, "ROR">;
1590 // (anyext (setcc_carry)) -> (setcc_carry)
1591 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1593 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1595 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1601 //===----------------------------------------------------------------------===//
1602 // EFLAGS-defining Patterns
1603 //===----------------------------------------------------------------------===//
1606 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1607 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1608 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1611 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1612 (ADD8rm GR8:$src1, addr:$src2)>;
1613 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1614 (ADD16rm GR16:$src1, addr:$src2)>;
1615 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1616 (ADD32rm GR32:$src1, addr:$src2)>;
1619 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1620 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1621 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1622 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1623 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1624 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1625 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1628 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1629 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1630 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1633 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1634 (SUB8rm GR8:$src1, addr:$src2)>;
1635 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1636 (SUB16rm GR16:$src1, addr:$src2)>;
1637 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1638 (SUB32rm GR32:$src1, addr:$src2)>;
1641 def : Pat<(sub GR8:$src1, imm:$src2),
1642 (SUB8ri GR8:$src1, imm:$src2)>;
1643 def : Pat<(sub GR16:$src1, imm:$src2),
1644 (SUB16ri GR16:$src1, imm:$src2)>;
1645 def : Pat<(sub GR32:$src1, imm:$src2),
1646 (SUB32ri GR32:$src1, imm:$src2)>;
1647 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1648 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1649 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1650 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1653 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1654 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1655 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1656 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1659 def : Pat<(mul GR16:$src1, GR16:$src2),
1660 (IMUL16rr GR16:$src1, GR16:$src2)>;
1661 def : Pat<(mul GR32:$src1, GR32:$src2),
1662 (IMUL32rr GR32:$src1, GR32:$src2)>;
1665 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1666 (IMUL16rm GR16:$src1, addr:$src2)>;
1667 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1668 (IMUL32rm GR32:$src1, addr:$src2)>;
1671 def : Pat<(mul GR16:$src1, imm:$src2),
1672 (IMUL16rri GR16:$src1, imm:$src2)>;
1673 def : Pat<(mul GR32:$src1, imm:$src2),
1674 (IMUL32rri GR32:$src1, imm:$src2)>;
1675 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1676 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1677 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1678 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1680 // reg = mul mem, imm
1681 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1682 (IMUL16rmi addr:$src1, imm:$src2)>;
1683 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1684 (IMUL32rmi addr:$src1, imm:$src2)>;
1685 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1686 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1687 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1688 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1690 // Patterns for nodes that do not produce flags, for instructions that do.
1693 def : Pat<(add GR64:$src1, GR64:$src2),
1694 (ADD64rr GR64:$src1, GR64:$src2)>;
1695 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1696 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1697 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1698 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1699 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1700 (ADD64rm GR64:$src1, addr:$src2)>;
1703 def : Pat<(sub GR64:$src1, GR64:$src2),
1704 (SUB64rr GR64:$src1, GR64:$src2)>;
1705 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1706 (SUB64rm GR64:$src1, addr:$src2)>;
1707 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1708 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1709 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1710 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1713 def : Pat<(mul GR64:$src1, GR64:$src2),
1714 (IMUL64rr GR64:$src1, GR64:$src2)>;
1715 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1716 (IMUL64rm GR64:$src1, addr:$src2)>;
1717 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1718 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1719 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1720 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1721 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1722 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1723 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1724 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1726 // Increment/Decrement reg.
1727 // Do not make INC/DEC if it is slow
1728 let Predicates = [NotSlowIncDec] in {
1729 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
1730 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
1731 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
1732 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1733 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
1734 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
1735 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
1736 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1740 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1741 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1742 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1743 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1746 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1747 (OR8rm GR8:$src1, addr:$src2)>;
1748 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1749 (OR16rm GR16:$src1, addr:$src2)>;
1750 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1751 (OR32rm GR32:$src1, addr:$src2)>;
1752 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1753 (OR64rm GR64:$src1, addr:$src2)>;
1756 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1757 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1758 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1759 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1760 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1761 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1762 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1763 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1764 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1765 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1766 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1769 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1770 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1771 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1772 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1775 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1776 (XOR8rm GR8:$src1, addr:$src2)>;
1777 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1778 (XOR16rm GR16:$src1, addr:$src2)>;
1779 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1780 (XOR32rm GR32:$src1, addr:$src2)>;
1781 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1782 (XOR64rm GR64:$src1, addr:$src2)>;
1785 def : Pat<(xor GR8:$src1, imm:$src2),
1786 (XOR8ri GR8:$src1, imm:$src2)>;
1787 def : Pat<(xor GR16:$src1, imm:$src2),
1788 (XOR16ri GR16:$src1, imm:$src2)>;
1789 def : Pat<(xor GR32:$src1, imm:$src2),
1790 (XOR32ri GR32:$src1, imm:$src2)>;
1791 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1792 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1793 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1794 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1795 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1796 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1797 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1798 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1801 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1802 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1803 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1804 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1807 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1808 (AND8rm GR8:$src1, addr:$src2)>;
1809 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1810 (AND16rm GR16:$src1, addr:$src2)>;
1811 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1812 (AND32rm GR32:$src1, addr:$src2)>;
1813 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1814 (AND64rm GR64:$src1, addr:$src2)>;
1817 def : Pat<(and GR8:$src1, imm:$src2),
1818 (AND8ri GR8:$src1, imm:$src2)>;
1819 def : Pat<(and GR16:$src1, imm:$src2),
1820 (AND16ri GR16:$src1, imm:$src2)>;
1821 def : Pat<(and GR32:$src1, imm:$src2),
1822 (AND32ri GR32:$src1, imm:$src2)>;
1823 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1824 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1825 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1826 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1827 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1828 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1829 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1830 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1832 // Bit scan instruction patterns to match explicit zero-undef behavior.
1833 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1834 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1835 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1836 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1837 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1838 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1840 // When HasMOVBE is enabled it is possible to get a non-legalized
1841 // register-register 16 bit bswap. This maps it to a ROL instruction.
1842 let Predicates = [HasMOVBE] in {
1843 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;