1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[In32BitMode]>;
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[In32BitMode]>;
56 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57 // a stack adjustment and the codegen must know that they may modify the stack
58 // pointer before prolog-epilog rewriting occurs.
59 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60 // sub / add which can clobber EFLAGS.
61 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
74 // x86-64 va_start lowering magic.
75 let usesCustomInserter = 1 in {
76 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
79 i64imm:$regsavefi, i64imm:$offset,
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
86 // The VAARG_64 pseudo-instruction takes the address of the va_list,
87 // and places the address of the next argument into a register.
88 let Defs = [EFLAGS] in
89 def VAARG_64 : I<0, Pseudo,
91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
92 "#VAARG_64 $dst, $ap, $size, $mode, $align",
94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
97 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
98 // targets. These calls are needed to probe the stack when allocating more than
99 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
100 // ensure that the guard pages used by the OS virtual memory manager are
101 // allocated in correct sequence.
102 // The main point of having separate instruction are extra unmodelled effects
103 // (compared to ordinary calls) like stack pointer change.
105 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
107 "# dynamic stack allocation",
110 // When using segmented stacks these are lowered into instructions which first
111 // check if the current stacklet has enough free memory. If it does, memory is
112 // allocated by bumping the stack pointer. Otherwise memory is allocated from
115 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
116 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
117 "# variable sized alloca for segmented stacks",
119 (X86SegAlloca GR32:$size))]>,
120 Requires<[In32BitMode]>;
122 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
123 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
124 "# variable sized alloca for segmented stacks",
126 (X86SegAlloca GR64:$size))]>,
127 Requires<[In64BitMode]>;
133 //===----------------------------------------------------------------------===//
134 // EH Pseudo Instructions
136 let isTerminator = 1, isReturn = 1, isBarrier = 1,
137 hasCtrlDep = 1, isCodeGenOnly = 1 in {
138 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
139 "ret\t#eh_return, addr: $addr",
140 [(X86ehret GR32:$addr)]>;
144 let isTerminator = 1, isReturn = 1, isBarrier = 1,
145 hasCtrlDep = 1, isCodeGenOnly = 1 in {
146 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
147 "ret\t#eh_return, addr: $addr",
148 [(X86ehret GR64:$addr)]>;
152 //===----------------------------------------------------------------------===//
153 // Pseudo instructions used by segmented stacks.
156 // This is lowered into a RET instruction by MCInstLower. We need
157 // this so that we don't have to have a MachineBasicBlock which ends
158 // with a RET and also has successors.
159 let isPseudo = 1 in {
160 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
163 // This instruction is lowered to a RET followed by a MOV. The two
164 // instructions are not generated on a higher level since then the
165 // verifier sees a MachineBasicBlock ending with a non-terminator.
166 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
170 //===----------------------------------------------------------------------===//
171 // Alias Instructions
172 //===----------------------------------------------------------------------===//
174 // Alias instructions that map movr0 to xor.
175 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
176 // FIXME: Set encoding to pseudo.
177 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
178 isCodeGenOnly = 1 in {
179 def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
180 [(set GR8:$dst, 0)]>;
182 // We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
183 // encoding and avoids a partial-register update sometimes, but doing so
184 // at isel time interferes with rematerialization in the current register
185 // allocator. For now, this is rewritten when the instruction is lowered
187 def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
189 [(set GR16:$dst, 0)]>, OpSize;
191 // FIXME: Set encoding to pseudo.
192 def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
193 [(set GR32:$dst, 0)]>;
196 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
197 // smaller encoding, but doing so at isel time interferes with rematerialization
198 // in the current register allocator. For now, this is rewritten when the
199 // instruction is lowered to an MCInst.
200 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
201 // when we have a better way to specify isel priority.
202 let Defs = [EFLAGS], isCodeGenOnly=1,
203 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
204 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
205 [(set GR64:$dst, 0)]>;
207 // Materialize i64 constant where top 32-bits are zero. This could theoretically
208 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
209 // that would make it more difficult to rematerialize.
210 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
212 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
213 "", [(set GR64:$dst, i64immZExt32:$src)]>;
215 // Use sbb to materialize carry bit.
216 let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
217 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
218 // However, Pat<> can't replicate the destination reg into the inputs of the
220 // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
222 def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
223 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
224 def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
225 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
227 def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
228 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
229 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
230 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
234 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
236 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
238 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
241 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
243 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
245 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
248 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
249 // will be eliminated and that the sbb can be extended up to a wider type. When
250 // this happens, it is great. However, if we are left with an 8-bit sbb and an
251 // and, we might as well just match it as a setb.
252 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
255 // (add OP, SETB) -> (adc OP, 0)
256 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
257 (ADC8ri GR8:$op, 0)>;
258 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
259 (ADC32ri8 GR32:$op, 0)>;
260 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
261 (ADC64ri8 GR64:$op, 0)>;
263 // (sub OP, SETB) -> (sbb OP, 0)
264 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
265 (SBB8ri GR8:$op, 0)>;
266 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
267 (SBB32ri8 GR32:$op, 0)>;
268 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
269 (SBB64ri8 GR64:$op, 0)>;
271 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
272 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
273 (ADC8ri GR8:$op, 0)>;
274 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
275 (ADC32ri8 GR32:$op, 0)>;
276 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
277 (ADC64ri8 GR64:$op, 0)>;
279 //===----------------------------------------------------------------------===//
280 // String Pseudo Instructions
282 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
283 def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
284 [(X86rep_movs i8)]>, REP;
285 def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
286 [(X86rep_movs i16)]>, REP, OpSize;
287 def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
288 [(X86rep_movs i32)]>, REP;
291 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
292 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
293 [(X86rep_movs i64)]>, REP;
296 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
297 let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
298 def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
299 [(X86rep_stos i8)]>, REP;
300 let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
301 def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
302 [(X86rep_stos i16)]>, REP, OpSize;
303 let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
304 def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
305 [(X86rep_stos i32)]>, REP;
307 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
308 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
309 [(X86rep_stos i64)]>, REP;
312 //===----------------------------------------------------------------------===//
313 // Thread Local Storage Instructions
317 // All calls clobber the non-callee saved registers. ESP is marked as
318 // a use to prevent stack-pointer assignments that appear immediately
319 // before calls from potentially appearing dead.
320 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
321 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
322 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
323 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
325 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
327 [(X86tlsaddr tls32addr:$sym)]>,
328 Requires<[In32BitMode]>;
330 // All calls clobber the non-callee saved registers. RSP is marked as
331 // a use to prevent stack-pointer assignments that appear immediately
332 // before calls from potentially appearing dead.
333 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
334 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
335 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
336 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
337 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
339 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
341 [(X86tlsaddr tls64addr:$sym)]>,
342 Requires<[In64BitMode]>;
344 // Darwin TLS Support
345 // For i386, the address of the thunk is passed on the stack, on return the
346 // address of the variable is in %eax. %ecx is trashed during the function
347 // call. All other registers are preserved.
348 let Defs = [EAX, ECX, EFLAGS],
350 usesCustomInserter = 1 in
351 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
353 [(X86TLSCall addr:$sym)]>,
354 Requires<[In32BitMode]>;
356 // For x86_64, the address of the thunk is passed in %rdi, on return
357 // the address of the variable is in %rax. All other registers are preserved.
358 let Defs = [RAX, EFLAGS],
360 usesCustomInserter = 1 in
361 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
363 [(X86TLSCall addr:$sym)]>,
364 Requires<[In64BitMode]>;
367 //===----------------------------------------------------------------------===//
368 // Conditional Move Pseudo Instructions
370 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
371 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
372 // however that requires promoting the operands, and can induce additional
373 // i8 register pressure.
374 let usesCustomInserter = 1, Uses = [EFLAGS] in {
375 def CMOV_GR8 : I<0, Pseudo,
376 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
378 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
379 imm:$cond, EFLAGS))]>;
381 let Predicates = [NoCMov] in {
382 def CMOV_GR32 : I<0, Pseudo,
383 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
384 "#CMOV_GR32* PSEUDO!",
386 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
387 def CMOV_GR16 : I<0, Pseudo,
388 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
389 "#CMOV_GR16* PSEUDO!",
391 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
392 def CMOV_RFP32 : I<0, Pseudo,
394 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
395 "#CMOV_RFP32 PSEUDO!",
397 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
399 def CMOV_RFP64 : I<0, Pseudo,
401 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
402 "#CMOV_RFP64 PSEUDO!",
404 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
406 def CMOV_RFP80 : I<0, Pseudo,
408 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
409 "#CMOV_RFP80 PSEUDO!",
411 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
413 } // Predicates = [NoCMov]
414 } // UsesCustomInserter = 1, Uses = [EFLAGS]
417 //===----------------------------------------------------------------------===//
418 // Atomic Instruction Pseudo Instructions
419 //===----------------------------------------------------------------------===//
421 // Atomic exchange, and, or, xor
422 let Constraints = "$val = $dst", Defs = [EFLAGS],
423 usesCustomInserter = 1 in {
425 def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
427 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
428 def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
430 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
431 def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
433 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
434 def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
435 "#ATOMNAND8 PSEUDO!",
436 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
438 def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
439 "#ATOMAND16 PSEUDO!",
440 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
441 def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
443 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
444 def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
445 "#ATOMXOR16 PSEUDO!",
446 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
447 def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
448 "#ATOMNAND16 PSEUDO!",
449 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
450 def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
451 "#ATOMMIN16 PSEUDO!",
452 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
453 def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
454 "#ATOMMAX16 PSEUDO!",
455 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
456 def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
457 "#ATOMUMIN16 PSEUDO!",
458 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
459 def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
460 "#ATOMUMAX16 PSEUDO!",
461 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
464 def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
465 "#ATOMAND32 PSEUDO!",
466 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
467 def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
469 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
470 def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
471 "#ATOMXOR32 PSEUDO!",
472 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
473 def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
474 "#ATOMNAND32 PSEUDO!",
475 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
476 def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
477 "#ATOMMIN32 PSEUDO!",
478 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
479 def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
480 "#ATOMMAX32 PSEUDO!",
481 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
482 def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
483 "#ATOMUMIN32 PSEUDO!",
484 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
485 def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
486 "#ATOMUMAX32 PSEUDO!",
487 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
491 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
492 "#ATOMAND64 PSEUDO!",
493 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
494 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
496 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
497 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
498 "#ATOMXOR64 PSEUDO!",
499 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
500 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
501 "#ATOMNAND64 PSEUDO!",
502 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
503 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
504 "#ATOMMIN64 PSEUDO!",
505 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
506 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
507 "#ATOMMAX64 PSEUDO!",
508 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
509 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
510 "#ATOMUMIN64 PSEUDO!",
511 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
512 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
513 "#ATOMUMAX64 PSEUDO!",
514 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
517 let Constraints = "$val1 = $dst1, $val2 = $dst2",
518 Defs = [EFLAGS, EAX, EBX, ECX, EDX],
519 Uses = [EAX, EBX, ECX, EDX],
520 mayLoad = 1, mayStore = 1,
521 usesCustomInserter = 1 in {
522 def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
523 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
524 "#ATOMAND6432 PSEUDO!", []>;
525 def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
526 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
527 "#ATOMOR6432 PSEUDO!", []>;
528 def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
529 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
530 "#ATOMXOR6432 PSEUDO!", []>;
531 def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
532 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
533 "#ATOMNAND6432 PSEUDO!", []>;
534 def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
535 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
536 "#ATOMADD6432 PSEUDO!", []>;
537 def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
538 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
539 "#ATOMSUB6432 PSEUDO!", []>;
540 def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
541 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
542 "#ATOMSWAP6432 PSEUDO!", []>;
545 //===----------------------------------------------------------------------===//
546 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
547 //===----------------------------------------------------------------------===//
549 // FIXME: Use normal instructions and add lock prefix dynamically.
553 // TODO: Get this to fold the constant into the instruction.
554 let isCodeGenOnly = 1, Defs = [EFLAGS] in
555 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
557 "or{l}\t{$zero, $dst|$dst, $zero}",
558 []>, Requires<[In32BitMode]>, LOCK;
560 let hasSideEffects = 1 in
561 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
565 // RegOpc corresponds to the mr version of the instruction
566 // ImmOpc corresponds to the mi version of the instruction
567 // ImmOpc8 corresponds to the mi8 version of the instruction
568 // ImmMod corresponds to the instruction format of the mi and mi8 versions
569 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
570 Format ImmMod, string mnemonic> {
571 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
573 def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
574 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
575 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
576 !strconcat("lock\n\t", mnemonic, "{b}\t",
577 "{$src2, $dst|$dst, $src2}"),
579 def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
580 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
581 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
582 !strconcat("lock\n\t", mnemonic, "{w}\t",
583 "{$src2, $dst|$dst, $src2}"),
585 def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
586 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
587 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
588 !strconcat("lock\n\t", mnemonic, "{l}\t",
589 "{$src2, $dst|$dst, $src2}"),
591 def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
592 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
593 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
594 !strconcat("lock\n\t", mnemonic, "{q}\t",
595 "{$src2, $dst|$dst, $src2}"),
598 def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
599 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
600 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
601 !strconcat("lock\n\t", mnemonic, "{b}\t",
602 "{$src2, $dst|$dst, $src2}"),
605 def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
606 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
607 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
608 !strconcat("lock\n\t", mnemonic, "{w}\t",
609 "{$src2, $dst|$dst, $src2}"),
612 def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
613 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
614 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
615 !strconcat("lock\n\t", mnemonic, "{l}\t",
616 "{$src2, $dst|$dst, $src2}"),
619 def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
620 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
621 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
622 !strconcat("lock\n\t", mnemonic, "{q}\t",
623 "{$src2, $dst|$dst, $src2}"),
626 def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
627 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
628 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
629 !strconcat("lock\n\t", mnemonic, "{w}\t",
630 "{$src2, $dst|$dst, $src2}"),
632 def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
633 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
634 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
635 !strconcat("lock\n\t", mnemonic, "{l}\t",
636 "{$src2, $dst|$dst, $src2}"),
638 def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
639 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
640 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
641 !strconcat("lock\n\t", mnemonic, "{q}\t",
642 "{$src2, $dst|$dst, $src2}"),
649 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
650 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
651 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
652 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
653 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
655 // Optimized codegen when the non-memory output is not used.
656 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
658 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
660 "inc{b}\t$dst", []>, LOCK;
661 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
663 "inc{w}\t$dst", []>, OpSize, LOCK;
664 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
666 "inc{l}\t$dst", []>, LOCK;
667 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
669 "inc{q}\t$dst", []>, LOCK;
671 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
673 "dec{b}\t$dst", []>, LOCK;
674 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
676 "dec{w}\t$dst", []>, OpSize, LOCK;
677 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
679 "dec{l}\t$dst", []>, LOCK;
680 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
682 "dec{q}\t$dst", []>, LOCK;
685 // Atomic compare and swap.
686 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
688 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
691 [(X86cas8 addr:$ptr)]>, TB, LOCK;
693 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
695 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
698 [(X86cas16 addr:$ptr)]>, TB, LOCK,
699 Requires<[HasCmpxchg16b]>;
701 let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
702 def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
704 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
705 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
708 let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
709 def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
711 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
712 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
715 let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
716 def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
718 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
719 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
722 let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
723 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
725 "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}",
726 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
729 // Atomic exchange and add
730 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
731 def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
733 "xadd{b}\t{$val, $ptr|$ptr, $val}",
734 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
736 def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
738 "xadd{w}\t{$val, $ptr|$ptr, $val}",
739 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
741 def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
743 "xadd{l}\t{$val, $ptr|$ptr, $val}",
744 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
746 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
748 "xadd{q}\t{$val, $ptr|$ptr, $val}",
749 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
753 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
754 "#ACQUIRE_MOV PSEUDO!",
755 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
756 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
757 "#ACQUIRE_MOV PSEUDO!",
758 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
759 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
760 "#ACQUIRE_MOV PSEUDO!",
761 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
762 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
763 "#ACQUIRE_MOV PSEUDO!",
764 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
766 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
767 "#RELEASE_MOV PSEUDO!",
768 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
769 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
770 "#RELEASE_MOV PSEUDO!",
771 [(atomic_store_16 addr:$dst, GR16:$src)]>;
772 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
773 "#RELEASE_MOV PSEUDO!",
774 [(atomic_store_32 addr:$dst, GR32:$src)]>;
775 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
776 "#RELEASE_MOV PSEUDO!",
777 [(atomic_store_64 addr:$dst, GR64:$src)]>;
779 //===----------------------------------------------------------------------===//
780 // Conditional Move Pseudo Instructions.
781 //===----------------------------------------------------------------------===//
784 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
785 // instruction selection into a branch sequence.
786 let Uses = [EFLAGS], usesCustomInserter = 1 in {
787 def CMOV_FR32 : I<0, Pseudo,
788 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
789 "#CMOV_FR32 PSEUDO!",
790 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
792 def CMOV_FR64 : I<0, Pseudo,
793 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
794 "#CMOV_FR64 PSEUDO!",
795 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
797 def CMOV_V4F32 : I<0, Pseudo,
798 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
799 "#CMOV_V4F32 PSEUDO!",
801 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
803 def CMOV_V2F64 : I<0, Pseudo,
804 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
805 "#CMOV_V2F64 PSEUDO!",
807 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
809 def CMOV_V2I64 : I<0, Pseudo,
810 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
811 "#CMOV_V2I64 PSEUDO!",
813 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
815 def CMOV_V8F32 : I<0, Pseudo,
816 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
817 "#CMOV_V8F32 PSEUDO!",
819 (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
821 def CMOV_V4F64 : I<0, Pseudo,
822 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
823 "#CMOV_V4F64 PSEUDO!",
825 (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
827 def CMOV_V4I64 : I<0, Pseudo,
828 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
829 "#CMOV_V4I64 PSEUDO!",
831 (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
836 //===----------------------------------------------------------------------===//
837 // DAG Pattern Matching Rules
838 //===----------------------------------------------------------------------===//
840 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
841 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
842 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
843 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
844 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
845 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
846 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
848 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
849 (ADD32ri GR32:$src1, tconstpool:$src2)>;
850 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
851 (ADD32ri GR32:$src1, tjumptable:$src2)>;
852 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
853 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
854 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
855 (ADD32ri GR32:$src1, texternalsym:$src2)>;
856 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
857 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
859 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
860 (MOV32mi addr:$dst, tglobaladdr:$src)>;
861 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
862 (MOV32mi addr:$dst, texternalsym:$src)>;
863 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
864 (MOV32mi addr:$dst, tblockaddress:$src)>;
868 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
869 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
870 // 'movabs' predicate should handle this sort of thing.
871 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
872 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
873 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
874 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
875 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
876 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
877 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
878 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
879 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
880 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
882 // In static codegen with small code model, we can get the address of a label
883 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
884 // the MOV64ri64i32 should accept these.
885 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
886 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
887 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
888 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
889 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
890 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
891 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
892 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
893 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
894 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
896 // In kernel code model, we can get the address of a label
897 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
898 // the MOV64ri32 should accept these.
899 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
900 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
901 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
902 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
903 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
904 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
905 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
906 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
907 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
908 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
910 // If we have small model and -static mode, it is safe to store global addresses
911 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
912 // for MOV64mi32 should handle this sort of thing.
913 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
914 (MOV64mi32 addr:$dst, tconstpool:$src)>,
915 Requires<[NearData, IsStatic]>;
916 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
917 (MOV64mi32 addr:$dst, tjumptable:$src)>,
918 Requires<[NearData, IsStatic]>;
919 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
920 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
921 Requires<[NearData, IsStatic]>;
922 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
923 (MOV64mi32 addr:$dst, texternalsym:$src)>,
924 Requires<[NearData, IsStatic]>;
925 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
926 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
927 Requires<[NearData, IsStatic]>;
933 // tls has some funny stuff here...
934 // This corresponds to movabs $foo@tpoff, %rax
935 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
936 (MOV64ri tglobaltlsaddr :$dst)>;
937 // This corresponds to add $foo@tpoff, %rax
938 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
939 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
940 // This corresponds to mov foo@tpoff(%rbx), %eax
941 def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
942 (MOV64rm tglobaltlsaddr :$dst)>;
945 // Direct PC relative function call for small code model. 32-bit displacement
946 // sign extended to 64-bit.
947 def : Pat<(X86call (i64 tglobaladdr:$dst)),
948 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
949 def : Pat<(X86call (i64 texternalsym:$dst)),
950 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
952 def : Pat<(X86call (i64 tglobaladdr:$dst)),
953 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
954 def : Pat<(X86call (i64 texternalsym:$dst)),
955 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
958 def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
959 (TCRETURNri GR32_TC:$dst, imm:$off)>,
960 Requires<[In32BitMode]>;
962 // FIXME: This is disabled for 32-bit PIC mode because the global base
963 // register which is part of the address mode may be assigned a
964 // callee-saved register.
965 def : Pat<(X86tcret (load addr:$dst), imm:$off),
966 (TCRETURNmi addr:$dst, imm:$off)>,
967 Requires<[In32BitMode, IsNotPIC]>;
969 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
970 (TCRETURNdi texternalsym:$dst, imm:$off)>,
971 Requires<[In32BitMode]>;
973 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
974 (TCRETURNdi texternalsym:$dst, imm:$off)>,
975 Requires<[In32BitMode]>;
977 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
978 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
979 Requires<[In64BitMode]>;
981 def : Pat<(X86tcret (load addr:$dst), imm:$off),
982 (TCRETURNmi64 addr:$dst, imm:$off)>,
983 Requires<[In64BitMode]>;
985 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
986 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
987 Requires<[In64BitMode]>;
989 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
990 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
991 Requires<[In64BitMode]>;
993 // Normal calls, with various flavors of addresses.
994 def : Pat<(X86call (i32 tglobaladdr:$dst)),
995 (CALLpcrel32 tglobaladdr:$dst)>;
996 def : Pat<(X86call (i32 texternalsym:$dst)),
997 (CALLpcrel32 texternalsym:$dst)>;
998 def : Pat<(X86call (i32 imm:$dst)),
999 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1003 // TEST R,R is smaller than CMP R,0
1004 def : Pat<(X86cmp GR8:$src1, 0),
1005 (TEST8rr GR8:$src1, GR8:$src1)>;
1006 def : Pat<(X86cmp GR16:$src1, 0),
1007 (TEST16rr GR16:$src1, GR16:$src1)>;
1008 def : Pat<(X86cmp GR32:$src1, 0),
1009 (TEST32rr GR32:$src1, GR32:$src1)>;
1010 def : Pat<(X86cmp GR64:$src1, 0),
1011 (TEST64rr GR64:$src1, GR64:$src1)>;
1013 // Conditional moves with folded loads with operands swapped and conditions
1015 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1016 Instruction Inst64> {
1017 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1018 (Inst16 GR16:$src2, addr:$src1)>;
1019 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1020 (Inst32 GR32:$src2, addr:$src1)>;
1021 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1022 (Inst64 GR64:$src2, addr:$src1)>;
1025 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1026 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1027 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1028 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1029 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1030 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1031 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1032 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1033 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1034 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1035 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1036 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1037 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1038 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1039 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1040 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1042 // zextload bool -> zextload byte
1043 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1044 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1045 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1046 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1048 // extload bool -> extload byte
1049 // When extloading from 16-bit and smaller memory locations into 64-bit
1050 // registers, use zero-extending loads so that the entire 64-bit register is
1051 // defined, avoiding partial-register updates.
1053 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1054 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1055 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1056 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1057 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1058 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1060 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1061 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1062 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1063 // For other extloads, use subregs, since the high contents of the register are
1064 // defined after an extload.
1065 def : Pat<(extloadi64i32 addr:$src),
1066 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1069 // anyext. Define these to do an explicit zero-extend to
1070 // avoid partial-register updates.
1071 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1072 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1073 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1075 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1076 def : Pat<(i32 (anyext GR16:$src)),
1077 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1079 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1080 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1081 def : Pat<(i64 (anyext GR32:$src)),
1082 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1085 // Any instruction that defines a 32-bit result leaves the high half of the
1086 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1087 // be copying from a truncate. And x86's cmov doesn't do anything if the
1088 // condition is false. But any other 32-bit operation will zero-extend
1090 def def32 : PatLeaf<(i32 GR32:$src), [{
1091 return N->getOpcode() != ISD::TRUNCATE &&
1092 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1093 N->getOpcode() != ISD::CopyFromReg &&
1094 N->getOpcode() != X86ISD::CMOV;
1097 // In the case of a 32-bit def that is known to implicitly zero-extend,
1098 // we can use a SUBREG_TO_REG.
1099 def : Pat<(i64 (zext def32:$src)),
1100 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1102 //===----------------------------------------------------------------------===//
1103 // Pattern match OR as ADD
1104 //===----------------------------------------------------------------------===//
1106 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1107 // 3-addressified into an LEA instruction to avoid copies. However, we also
1108 // want to finally emit these instructions as an or at the end of the code
1109 // generator to make the generated code easier to read. To do this, we select
1110 // into "disjoint bits" pseudo ops.
1112 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1113 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1114 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1115 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1117 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
1118 APInt Mask = APInt::getAllOnesValue(BitWidth);
1119 APInt KnownZero0, KnownOne0;
1120 CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
1121 APInt KnownZero1, KnownOne1;
1122 CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
1123 return (~KnownZero0 & ~KnownZero1) == 0;
1127 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1128 let AddedComplexity = 5 in { // Try this before the selecting to OR
1130 let isConvertibleToThreeAddress = 1,
1131 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1132 let isCommutable = 1 in {
1133 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1134 "", // orw/addw REG, REG
1135 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1136 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1137 "", // orl/addl REG, REG
1138 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1139 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1140 "", // orq/addq REG, REG
1141 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1144 // NOTE: These are order specific, we want the ri8 forms to be listed
1145 // first so that they are slightly preferred to the ri forms.
1147 def ADD16ri8_DB : I<0, Pseudo,
1148 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1149 "", // orw/addw REG, imm8
1150 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1151 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1152 "", // orw/addw REG, imm
1153 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1155 def ADD32ri8_DB : I<0, Pseudo,
1156 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1157 "", // orl/addl REG, imm8
1158 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1159 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1160 "", // orl/addl REG, imm
1161 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1164 def ADD64ri8_DB : I<0, Pseudo,
1165 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1166 "", // orq/addq REG, imm8
1167 [(set GR64:$dst, (or_is_add GR64:$src1,
1168 i64immSExt8:$src2))]>;
1169 def ADD64ri32_DB : I<0, Pseudo,
1170 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1171 "", // orq/addq REG, imm
1172 [(set GR64:$dst, (or_is_add GR64:$src1,
1173 i64immSExt32:$src2))]>;
1175 } // AddedComplexity
1178 //===----------------------------------------------------------------------===//
1180 //===----------------------------------------------------------------------===//
1182 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1183 // +128 doesn't, so in this special case use a sub instead of an add.
1184 def : Pat<(add GR16:$src1, 128),
1185 (SUB16ri8 GR16:$src1, -128)>;
1186 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1187 (SUB16mi8 addr:$dst, -128)>;
1189 def : Pat<(add GR32:$src1, 128),
1190 (SUB32ri8 GR32:$src1, -128)>;
1191 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1192 (SUB32mi8 addr:$dst, -128)>;
1194 def : Pat<(add GR64:$src1, 128),
1195 (SUB64ri8 GR64:$src1, -128)>;
1196 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1197 (SUB64mi8 addr:$dst, -128)>;
1199 // The same trick applies for 32-bit immediate fields in 64-bit
1201 def : Pat<(add GR64:$src1, 0x0000000080000000),
1202 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1203 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1204 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1206 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1207 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1208 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1209 // represented with a sign extension of a 8 bit constant, use that.
1211 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1215 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1216 (i32 (GetLo8XForm imm:$imm))),
1219 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1223 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1224 (i32 (GetLo32XForm imm:$imm))),
1228 // r & (2^16-1) ==> movz
1229 def : Pat<(and GR32:$src1, 0xffff),
1230 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1231 // r & (2^8-1) ==> movz
1232 def : Pat<(and GR32:$src1, 0xff),
1233 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1236 Requires<[In32BitMode]>;
1237 // r & (2^8-1) ==> movz
1238 def : Pat<(and GR16:$src1, 0xff),
1239 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1240 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1242 Requires<[In32BitMode]>;
1244 // r & (2^32-1) ==> movz
1245 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1246 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1247 // r & (2^16-1) ==> movz
1248 def : Pat<(and GR64:$src, 0xffff),
1249 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1250 // r & (2^8-1) ==> movz
1251 def : Pat<(and GR64:$src, 0xff),
1252 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1253 // r & (2^8-1) ==> movz
1254 def : Pat<(and GR32:$src1, 0xff),
1255 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1256 Requires<[In64BitMode]>;
1257 // r & (2^8-1) ==> movz
1258 def : Pat<(and GR16:$src1, 0xff),
1259 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1260 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1261 Requires<[In64BitMode]>;
1264 // sext_inreg patterns
1265 def : Pat<(sext_inreg GR32:$src, i16),
1266 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1267 def : Pat<(sext_inreg GR32:$src, i8),
1268 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1271 Requires<[In32BitMode]>;
1273 def : Pat<(sext_inreg GR16:$src, i8),
1274 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1275 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1277 Requires<[In32BitMode]>;
1279 def : Pat<(sext_inreg GR64:$src, i32),
1280 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1281 def : Pat<(sext_inreg GR64:$src, i16),
1282 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1283 def : Pat<(sext_inreg GR64:$src, i8),
1284 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1285 def : Pat<(sext_inreg GR32:$src, i8),
1286 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1287 Requires<[In64BitMode]>;
1288 def : Pat<(sext_inreg GR16:$src, i8),
1289 (EXTRACT_SUBREG (MOVSX32rr8
1290 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1291 Requires<[In64BitMode]>;
1293 // sext, sext_load, zext, zext_load
1294 def: Pat<(i16 (sext GR8:$src)),
1295 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1296 def: Pat<(sextloadi16i8 addr:$src),
1297 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1298 def: Pat<(i16 (zext GR8:$src)),
1299 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1300 def: Pat<(zextloadi16i8 addr:$src),
1301 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1304 def : Pat<(i16 (trunc GR32:$src)),
1305 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1306 def : Pat<(i8 (trunc GR32:$src)),
1307 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1309 Requires<[In32BitMode]>;
1310 def : Pat<(i8 (trunc GR16:$src)),
1311 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1313 Requires<[In32BitMode]>;
1314 def : Pat<(i32 (trunc GR64:$src)),
1315 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1316 def : Pat<(i16 (trunc GR64:$src)),
1317 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1318 def : Pat<(i8 (trunc GR64:$src)),
1319 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1320 def : Pat<(i8 (trunc GR32:$src)),
1321 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1322 Requires<[In64BitMode]>;
1323 def : Pat<(i8 (trunc GR16:$src)),
1324 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1325 Requires<[In64BitMode]>;
1327 // h-register tricks
1328 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1329 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1331 Requires<[In32BitMode]>;
1332 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1333 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1335 Requires<[In32BitMode]>;
1336 def : Pat<(srl GR16:$src, (i8 8)),
1339 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1342 Requires<[In32BitMode]>;
1343 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1344 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1347 Requires<[In32BitMode]>;
1348 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1349 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1352 Requires<[In32BitMode]>;
1353 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1354 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1357 Requires<[In32BitMode]>;
1358 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1359 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1362 Requires<[In32BitMode]>;
1364 // h-register tricks.
1365 // For now, be conservative on x86-64 and use an h-register extract only if the
1366 // value is immediately zero-extended or stored, which are somewhat common
1367 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1368 // from being allocated in the same instruction as the h register, as there's
1369 // currently no way to describe this requirement to the register allocator.
1371 // h-register extract and zero-extend.
1372 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1376 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1379 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1381 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1383 Requires<[In64BitMode]>;
1384 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1385 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1388 Requires<[In64BitMode]>;
1389 def : Pat<(srl GR16:$src, (i8 8)),
1392 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1395 Requires<[In64BitMode]>;
1396 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1398 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1400 Requires<[In64BitMode]>;
1401 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1403 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1405 Requires<[In64BitMode]>;
1406 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1410 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1413 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1417 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1421 // h-register extract and store.
1422 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1425 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1427 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1430 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1432 Requires<[In64BitMode]>;
1433 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1436 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1438 Requires<[In64BitMode]>;
1441 // (shl x, 1) ==> (add x, x)
1442 // Note that if x is undef (immediate or otherwise), we could theoretically
1443 // end up with the two uses of x getting different values, producing a result
1444 // where the least significant bit is not 0. However, the probability of this
1445 // happening is considered low enough that this is officially not a
1447 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1448 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1449 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1450 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1452 // Helper imms that check if a mask doesn't change significant shift bits.
1453 def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
1454 def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
1456 // (shl x (and y, 31)) ==> (shl x, y)
1457 def : Pat<(shl GR8:$src1, (and CL, immShift32)),
1458 (SHL8rCL GR8:$src1)>;
1459 def : Pat<(shl GR16:$src1, (and CL, immShift32)),
1460 (SHL16rCL GR16:$src1)>;
1461 def : Pat<(shl GR32:$src1, (and CL, immShift32)),
1462 (SHL32rCL GR32:$src1)>;
1463 def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1464 (SHL8mCL addr:$dst)>;
1465 def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1466 (SHL16mCL addr:$dst)>;
1467 def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1468 (SHL32mCL addr:$dst)>;
1470 def : Pat<(srl GR8:$src1, (and CL, immShift32)),
1471 (SHR8rCL GR8:$src1)>;
1472 def : Pat<(srl GR16:$src1, (and CL, immShift32)),
1473 (SHR16rCL GR16:$src1)>;
1474 def : Pat<(srl GR32:$src1, (and CL, immShift32)),
1475 (SHR32rCL GR32:$src1)>;
1476 def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1477 (SHR8mCL addr:$dst)>;
1478 def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1479 (SHR16mCL addr:$dst)>;
1480 def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1481 (SHR32mCL addr:$dst)>;
1483 def : Pat<(sra GR8:$src1, (and CL, immShift32)),
1484 (SAR8rCL GR8:$src1)>;
1485 def : Pat<(sra GR16:$src1, (and CL, immShift32)),
1486 (SAR16rCL GR16:$src1)>;
1487 def : Pat<(sra GR32:$src1, (and CL, immShift32)),
1488 (SAR32rCL GR32:$src1)>;
1489 def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1490 (SAR8mCL addr:$dst)>;
1491 def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1492 (SAR16mCL addr:$dst)>;
1493 def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1494 (SAR32mCL addr:$dst)>;
1496 // (shl x (and y, 63)) ==> (shl x, y)
1497 def : Pat<(shl GR64:$src1, (and CL, immShift64)),
1498 (SHL64rCL GR64:$src1)>;
1499 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1500 (SHL64mCL addr:$dst)>;
1502 def : Pat<(srl GR64:$src1, (and CL, immShift64)),
1503 (SHR64rCL GR64:$src1)>;
1504 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1505 (SHR64mCL addr:$dst)>;
1507 def : Pat<(sra GR64:$src1, (and CL, immShift64)),
1508 (SAR64rCL GR64:$src1)>;
1509 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1510 (SAR64mCL addr:$dst)>;
1513 // (anyext (setcc_carry)) -> (setcc_carry)
1514 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1516 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1518 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1524 //===----------------------------------------------------------------------===//
1525 // EFLAGS-defining Patterns
1526 //===----------------------------------------------------------------------===//
1529 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1530 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1531 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1534 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1535 (ADD8rm GR8:$src1, addr:$src2)>;
1536 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1537 (ADD16rm GR16:$src1, addr:$src2)>;
1538 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1539 (ADD32rm GR32:$src1, addr:$src2)>;
1542 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1543 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1544 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1545 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1546 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1547 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1548 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1551 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1552 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1553 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1556 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1557 (SUB8rm GR8:$src1, addr:$src2)>;
1558 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1559 (SUB16rm GR16:$src1, addr:$src2)>;
1560 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1561 (SUB32rm GR32:$src1, addr:$src2)>;
1564 def : Pat<(sub GR8:$src1, imm:$src2),
1565 (SUB8ri GR8:$src1, imm:$src2)>;
1566 def : Pat<(sub GR16:$src1, imm:$src2),
1567 (SUB16ri GR16:$src1, imm:$src2)>;
1568 def : Pat<(sub GR32:$src1, imm:$src2),
1569 (SUB32ri GR32:$src1, imm:$src2)>;
1570 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1571 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1572 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1573 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1576 def : Pat<(mul GR16:$src1, GR16:$src2),
1577 (IMUL16rr GR16:$src1, GR16:$src2)>;
1578 def : Pat<(mul GR32:$src1, GR32:$src2),
1579 (IMUL32rr GR32:$src1, GR32:$src2)>;
1582 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1583 (IMUL16rm GR16:$src1, addr:$src2)>;
1584 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1585 (IMUL32rm GR32:$src1, addr:$src2)>;
1588 def : Pat<(mul GR16:$src1, imm:$src2),
1589 (IMUL16rri GR16:$src1, imm:$src2)>;
1590 def : Pat<(mul GR32:$src1, imm:$src2),
1591 (IMUL32rri GR32:$src1, imm:$src2)>;
1592 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1593 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1594 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1595 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1597 // reg = mul mem, imm
1598 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1599 (IMUL16rmi addr:$src1, imm:$src2)>;
1600 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1601 (IMUL32rmi addr:$src1, imm:$src2)>;
1602 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1603 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1604 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1605 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1607 // Patterns for nodes that do not produce flags, for instructions that do.
1610 def : Pat<(add GR64:$src1, GR64:$src2),
1611 (ADD64rr GR64:$src1, GR64:$src2)>;
1612 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1613 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1614 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1615 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1616 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1617 (ADD64rm GR64:$src1, addr:$src2)>;
1620 def : Pat<(sub GR64:$src1, GR64:$src2),
1621 (SUB64rr GR64:$src1, GR64:$src2)>;
1622 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1623 (SUB64rm GR64:$src1, addr:$src2)>;
1624 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1625 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1626 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1627 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1630 def : Pat<(mul GR64:$src1, GR64:$src2),
1631 (IMUL64rr GR64:$src1, GR64:$src2)>;
1632 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1633 (IMUL64rm GR64:$src1, addr:$src2)>;
1634 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1635 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1636 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1637 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1638 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1639 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1640 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1641 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1644 def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1645 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
1646 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1647 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
1648 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1649 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1652 def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1653 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
1654 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1655 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
1656 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1657 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1660 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1661 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1662 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1663 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1666 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1667 (OR8rm GR8:$src1, addr:$src2)>;
1668 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1669 (OR16rm GR16:$src1, addr:$src2)>;
1670 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1671 (OR32rm GR32:$src1, addr:$src2)>;
1672 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1673 (OR64rm GR64:$src1, addr:$src2)>;
1676 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1677 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1678 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1679 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1680 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1681 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1682 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1683 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1684 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1685 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1686 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1689 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1690 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1691 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1692 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1695 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1696 (XOR8rm GR8:$src1, addr:$src2)>;
1697 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1698 (XOR16rm GR16:$src1, addr:$src2)>;
1699 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1700 (XOR32rm GR32:$src1, addr:$src2)>;
1701 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1702 (XOR64rm GR64:$src1, addr:$src2)>;
1705 def : Pat<(xor GR8:$src1, imm:$src2),
1706 (XOR8ri GR8:$src1, imm:$src2)>;
1707 def : Pat<(xor GR16:$src1, imm:$src2),
1708 (XOR16ri GR16:$src1, imm:$src2)>;
1709 def : Pat<(xor GR32:$src1, imm:$src2),
1710 (XOR32ri GR32:$src1, imm:$src2)>;
1711 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1712 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1713 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1714 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1715 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1716 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1717 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1718 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1721 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1722 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1723 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1724 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1727 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1728 (AND8rm GR8:$src1, addr:$src2)>;
1729 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1730 (AND16rm GR16:$src1, addr:$src2)>;
1731 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1732 (AND32rm GR32:$src1, addr:$src2)>;
1733 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1734 (AND64rm GR64:$src1, addr:$src2)>;
1737 def : Pat<(and GR8:$src1, imm:$src2),
1738 (AND8ri GR8:$src1, imm:$src2)>;
1739 def : Pat<(and GR16:$src1, imm:$src2),
1740 (AND16ri GR16:$src1, imm:$src2)>;
1741 def : Pat<(and GR32:$src1, imm:$src2),
1742 (AND32ri GR32:$src1, imm:$src2)>;
1743 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1744 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1745 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1746 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1747 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1748 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1749 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1750 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1752 // Bit scan instruction patterns to match explicit zero-undef behavior.
1753 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1754 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1755 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1756 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1757 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1758 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;