1 //===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes FMA (Fused Multiply-Add) instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // FMA3 - Intel 3 operand Fused Multiply-Add instructions
16 //===----------------------------------------------------------------------===//
18 let Constraints = "$src1 = $dst" in {
19 multiclass fma3p_rm<bits<8> opc, string OpcodeStr> {
20 def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
21 (ins VR128:$src1, VR128:$src2, VR128:$src3),
22 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
25 def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
26 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
27 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
29 def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
30 (ins VR256:$src1, VR256:$src2, VR256:$src3),
31 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
34 def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
35 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
36 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
40 // Intrinsic for 132 pattern
41 multiclass fma3p_rm_int<bits<8> opc, string OpcodeStr,
42 PatFrag MemFrag128, PatFrag MemFrag256,
43 Intrinsic Int128, Intrinsic Int256> {
44 def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
45 (ins VR128:$src1, VR128:$src2, VR128:$src3),
46 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
47 [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src3, VR128:$src2))]>;
49 def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
50 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
51 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
52 [(set VR128:$dst, (Int128 VR128:$src1, (MemFrag128 addr:$src3), VR128:$src2))]>;
53 def rY_Int : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
54 (ins VR256:$src1, VR256:$src2, VR256:$src3),
55 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
56 [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src3, VR256:$src2))]>;
58 def mY_Int : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
59 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
60 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
61 [(set VR256:$dst, (Int256 VR256:$src1, (MemFrag256 addr:$src3), VR256:$src2))]>;
65 multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
66 string OpcodeStr, string PackTy,
67 PatFrag MemFrag128, PatFrag MemFrag256,
68 Intrinsic Int128, Intrinsic Int256> {
69 defm r132 : fma3p_rm_int <opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy)),
70 MemFrag128, MemFrag256, Int128, Int256>;
71 defm r132 : fma3p_rm <opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
72 defm r213 : fma3p_rm <opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
73 defm r231 : fma3p_rm <opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
77 let ExeDomain = SSEPackedSingle in {
78 defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32, memopv8f32,
79 int_x86_fma4_vfmadd_ps, int_x86_fma4_vfmadd_ps_256>;
80 defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32, memopv8f32,
81 int_x86_fma4_vfmsub_ps, int_x86_fma4_vfmsub_ps_256>;
82 defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", memopv4f32, memopv8f32,
83 int_x86_fma4_vfmaddsub_ps, int_x86_fma4_vfmaddsub_ps_256>;
84 defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", memopv4f32, memopv8f32,
85 int_x86_fma4_vfmsubadd_ps, int_x86_fma4_vfmaddsub_ps_256>;
88 let ExeDomain = SSEPackedDouble in {
89 defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64, memopv4f64,
90 int_x86_fma4_vfmadd_pd, int_x86_fma4_vfmadd_pd_256>, VEX_W;
91 defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64, memopv4f64,
92 int_x86_fma4_vfmsub_pd, int_x86_fma4_vfmsub_pd_256>, VEX_W;
93 defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", memopv2f64, memopv4f64,
94 int_x86_fma4_vfmaddsub_pd, int_x86_fma4_vfmaddsub_pd_256>, VEX_W;
95 defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", memopv2f64, memopv4f64,
96 int_x86_fma4_vfmsubadd_pd, int_x86_fma4_vfmsubadd_pd_256>, VEX_W;
99 // Fused Negative Multiply-Add
100 let ExeDomain = SSEPackedSingle in {
101 defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32, memopv8f32,
102 int_x86_fma4_vfnmadd_ps, int_x86_fma4_vfnmadd_ps_256>;
103 defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32, memopv8f32,
104 int_x86_fma4_vfnmsub_ps, int_x86_fma4_vfnmsub_ps_256>;
106 let ExeDomain = SSEPackedDouble in {
107 defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64, memopv4f64,
108 int_x86_fma4_vfnmadd_pd, int_x86_fma4_vfnmadd_pd_256>, VEX_W;
109 defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", memopv2f64, memopv4f64,
110 int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W;
113 let Predicates = [HasFMA3], AddedComplexity = 20 in {
115 // FP double precision ADD - 256
118 // FMA231: src1 = src2*src3 + src1
119 def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
120 (VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
122 // FMA231: src1 = src2*src3 + src1
123 def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
124 (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
128 // FP double precision ADD - 128
132 // FMA231: src1 = src2*src3 + src1
133 def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
134 (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
136 // FMA231: src1 = src2*src3 + src1
137 def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
138 (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
141 // FP double precision SUB - 256
143 // FMA231: src1 = src2*src3 - src1
144 def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
145 (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
147 // FMA231: src1 = src2*src3 - src1
148 def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
149 (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
153 // FP double precision SUB - 128
156 // FMA231: src1 = src2*src3 - src1
157 def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
158 (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
160 // FMA231: src1 = src2*src3 - src1
161 def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
162 (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
165 // FP double precision FNMADD - 256
167 // FMA231: src1 = - src2*src3 + src1
168 def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))),
169 (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
171 // FMA231: src1 = - src2*src3 + src1
172 def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
173 (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
176 // FP double precision FNMADD - 128
179 // FMA231: src1 = - src2*src3 + src1
180 def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))),
181 (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
183 // FMA231: src1 = - src2*src3 + src1
184 def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
185 (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
188 // FP single precision ADD - 256
191 // FMA231: src1 = src2*src3 + src1
192 def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
193 (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
195 // FMA213 : src1 = src2*src1 + src3
196 def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))),
197 (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>;
199 // FMA231: src1 = src2*src3 + src1
200 def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)),
201 (VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
203 // FMA213: src1 = src2*src1 + src3
204 def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)),
205 (VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>;
208 // FP single precision ADD - 128
211 // FMA231 : src1 = src2*src3 + src1
212 def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
213 (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
215 // FMA231 : src1 = src2*src3 + src1
216 def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
217 (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
220 // FP single precision SUB - 256
222 // FMA231: src1 = src2*src3 - src1
223 def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)),
224 (VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
226 // FMA231: src1 = src2*src3 - src1
227 def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
228 (VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
231 // FP single precision SUB - 128
233 // FMA231 : src1 = src2*src3 - src1
234 def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
235 (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
237 // FMA231 : src1 = src2*src3 - src1
238 def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
239 (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
242 // FP single precision FNMADD - 256
244 // FMA231: src1 = - src2*src3 + src1
245 def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))),
246 (VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
248 // FMA231: src1 = - src2*src3 + src1
249 def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
250 (VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
253 // FP single precision FNMADD - 128
256 // FMA231 : src1 = src2*src3 - src1
257 def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))),
258 (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
260 // FMA231 : src1 = src2*src3 - src1
261 def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
262 (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
266 //------------------------------
268 //------------------------------
270 let Constraints = "$src1 = $dst" in {
271 multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop, RegisterClass RC> {
272 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
273 (ins RC:$src1, RC:$src2, RC:$src3),
274 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
276 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
277 (ins RC:$src1, RC:$src2, x86memop:$src3),
278 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
282 multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr,X86MemOperand x86memop, RegisterClass RC,
284 def r_Int : FMA3<opc, MRMSrcReg, (outs RC:$dst),
285 (ins RC:$src1, RC:$src2, RC:$src3),
286 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
287 [(set RC:$dst, (IntId RC:$src1, RC:$src3, RC:$src2))]>;
288 def m_Int : FMA3<opc, MRMSrcMem, (outs RC:$dst),
289 (ins RC:$src1, VR128:$src2, x86memop:$src3),
290 !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
291 [(set RC:$dst, (IntId RC:$src1, (load addr:$src3), RC:$src2))]>;
295 multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
296 string OpcodeStr, string PackTy, X86MemOperand MemOp,
297 RegisterClass RC, Intrinsic IntId> {
298 defm r132 : fma3s_rm <opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy)), MemOp, RC>;
299 defm r213 : fma3s_rm <opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy)), MemOp, RC>;
300 defm r231 : fma3s_rm <opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy)), MemOp, RC>;
301 defm r132_Int: fma3s_rm_int <opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy)), MemOp, VR128, IntId>;
304 defm VFMADDSS : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "ss", f32mem, FR32, int_x86_fma4_vfmadd_ss>, VEX_LIG;
305 defm VFMADDSD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "sd", f64mem, FR64, int_x86_fma4_vfmadd_sd>, VEX_W, VEX_LIG;
306 defm VFMSUBSS : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "ss", f32mem, FR32, int_x86_fma4_vfmsub_ss>, VEX_LIG;
307 defm VFMSUBSD : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "sd", f64mem, FR64, int_x86_fma4_vfmsub_sd>, VEX_W, VEX_LIG;
309 defm VFNMADDSS : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "ss", f32mem, FR32, int_x86_fma4_vfnmadd_ss>, VEX_LIG;
310 defm VFNMADDSD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "sd", f64mem, FR64, int_x86_fma4_vfnmadd_sd>, VEX_W, VEX_LIG;
311 defm VFNMSUBSS : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "ss", f32mem, FR32, int_x86_fma4_vfnmsub_ss>, VEX_LIG;
312 defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64, int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG;
315 let Predicates = [HasFMA3], AddedComplexity = 20 in {
322 // FMADD231 : src1 = src2*src3 + src1
323 def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
324 (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
326 def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
327 (VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
329 def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
330 (VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
332 def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
333 (VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
338 // FP scalar SUB src2*src3 - src1
341 def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
342 (VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
344 def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
345 (VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
347 def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
348 (VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
350 def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
351 (VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
354 // FP scalar NADD src1 - src2*src3
357 def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))),
358 (VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
360 def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))),
361 (VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
363 def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))),
364 (VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
366 def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))),
367 (VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
371 //===----------------------------------------------------------------------===//
372 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
373 //===----------------------------------------------------------------------===//
376 multiclass fma4s<bits<8> opc, string OpcodeStr, Operand memop,
377 ComplexPattern mem_cpat, Intrinsic Int> {
378 def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
379 (ins VR128:$src1, VR128:$src2, VR128:$src3),
380 !strconcat(OpcodeStr,
381 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
383 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
384 def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
385 (ins VR128:$src1, VR128:$src2, memop:$src3),
386 !strconcat(OpcodeStr,
387 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
389 (Int VR128:$src1, VR128:$src2, mem_cpat:$src3))]>, VEX_W, MemOp4;
390 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
391 (ins VR128:$src1, memop:$src2, VR128:$src3),
392 !strconcat(OpcodeStr,
393 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
395 (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>;
397 let isCodeGenOnly = 1 in
398 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
399 (ins VR128:$src1, VR128:$src2, VR128:$src3),
400 !strconcat(OpcodeStr,
401 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
404 multiclass fma4p<bits<8> opc, string OpcodeStr,
405 Intrinsic Int128, Intrinsic Int256,
406 PatFrag ld_frag128, PatFrag ld_frag256> {
407 def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
408 (ins VR128:$src1, VR128:$src2, VR128:$src3),
409 !strconcat(OpcodeStr,
410 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
412 (Int128 VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
413 def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
414 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
415 !strconcat(OpcodeStr,
416 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
417 [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2,
418 (ld_frag128 addr:$src3)))]>, VEX_W, MemOp4;
419 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
420 (ins VR128:$src1, f128mem:$src2, VR128:$src3),
421 !strconcat(OpcodeStr,
422 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
424 (Int128 VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
425 def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
426 (ins VR256:$src1, VR256:$src2, VR256:$src3),
427 !strconcat(OpcodeStr,
428 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
430 (Int256 VR256:$src1, VR256:$src2, VR256:$src3))]>, VEX_W, MemOp4;
431 def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
432 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
433 !strconcat(OpcodeStr,
434 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
435 [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2,
436 (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4;
437 def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
438 (ins VR256:$src1, f256mem:$src2, VR256:$src3),
439 !strconcat(OpcodeStr,
440 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
442 (Int256 VR256:$src1, (ld_frag256 addr:$src2), VR256:$src3))]>;
444 let isCodeGenOnly = 1 in {
445 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
446 (ins VR128:$src1, VR128:$src2, VR128:$src3),
447 !strconcat(OpcodeStr,
448 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
449 def rrY_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
450 (ins VR256:$src1, VR256:$src2, VR256:$src3),
451 !strconcat(OpcodeStr,
452 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
453 } // isCodeGenOnly = 1
456 let Predicates = [HasFMA4] in {
458 defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32,
459 int_x86_fma4_vfmadd_ss>;
460 defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64,
461 int_x86_fma4_vfmadd_sd>;
462 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma4_vfmadd_ps,
463 int_x86_fma4_vfmadd_ps_256, memopv4f32, memopv8f32>;
464 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma4_vfmadd_pd,
465 int_x86_fma4_vfmadd_pd_256, memopv2f64, memopv4f64>;
466 defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem, sse_load_f32,
467 int_x86_fma4_vfmsub_ss>;
468 defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem, sse_load_f64,
469 int_x86_fma4_vfmsub_sd>;
470 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma4_vfmsub_ps,
471 int_x86_fma4_vfmsub_ps_256, memopv4f32, memopv8f32>;
472 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma4_vfmsub_pd,
473 int_x86_fma4_vfmsub_pd_256, memopv2f64, memopv4f64>;
474 defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem, sse_load_f32,
475 int_x86_fma4_vfnmadd_ss>;
476 defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
477 int_x86_fma4_vfnmadd_sd>;
478 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma4_vfnmadd_ps,
479 int_x86_fma4_vfnmadd_ps_256, memopv4f32, memopv8f32>;
480 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma4_vfnmadd_pd,
481 int_x86_fma4_vfnmadd_pd_256, memopv2f64, memopv4f64>;
482 defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem, sse_load_f32,
483 int_x86_fma4_vfnmsub_ss>;
484 defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
485 int_x86_fma4_vfnmsub_sd>;
486 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma4_vfnmsub_ps,
487 int_x86_fma4_vfnmsub_ps_256, memopv4f32, memopv8f32>;
488 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma4_vfnmsub_pd,
489 int_x86_fma4_vfnmsub_pd_256, memopv2f64, memopv4f64>;
490 defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma4_vfmaddsub_ps,
491 int_x86_fma4_vfmaddsub_ps_256, memopv4f32, memopv8f32>;
492 defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma4_vfmaddsub_pd,
493 int_x86_fma4_vfmaddsub_pd_256, memopv2f64, memopv4f64>;
494 defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma4_vfmsubadd_ps,
495 int_x86_fma4_vfmsubadd_ps_256, memopv4f32, memopv8f32>;
496 defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma4_vfmsubadd_pd,
497 int_x86_fma4_vfmsubadd_pd_256, memopv2f64, memopv4f64>;