1 //===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes FMA (Fused Multiply-Add) instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // FMA3 - Intel 3 operand Fused Multiply-Add instructions
16 //===----------------------------------------------------------------------===//
18 let Constraints = "$src1 = $dst" in {
19 multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
20 PatFrag MemFrag128, PatFrag MemFrag256,
21 ValueType OpVT128, ValueType OpVT256,
22 SDPatternOperator Op = null_frag> {
23 let isCommutable = 1, usesCustomInserter = 1 in
24 def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
25 (ins VR128:$src1, VR128:$src2, VR128:$src3),
27 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
28 [(set VR128:$dst, (OpVT128 (Op VR128:$src2,
29 VR128:$src1, VR128:$src3)))]>;
32 def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
33 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
35 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
36 [(set VR128:$dst, (OpVT128 (Op VR128:$src2, VR128:$src1,
37 (MemFrag128 addr:$src3))))]>;
39 let isCommutable = 1, usesCustomInserter = 1 in
40 def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
41 (ins VR256:$src1, VR256:$src2, VR256:$src3),
43 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
44 [(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1,
45 VR256:$src3)))]>, VEX_L;
48 def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
49 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
51 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
53 (OpVT256 (Op VR256:$src2, VR256:$src1,
54 (MemFrag256 addr:$src3))))]>, VEX_L;
56 } // Constraints = "$src1 = $dst"
58 multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
59 string OpcodeStr, string PackTy,
60 PatFrag MemFrag128, PatFrag MemFrag256,
61 SDNode Op, ValueType OpTy128, ValueType OpTy256> {
62 defm r213 : fma3p_rm<opc213,
63 !strconcat(OpcodeStr, "213", PackTy),
64 MemFrag128, MemFrag256, OpTy128, OpTy256, Op>;
65 let neverHasSideEffects = 1 in {
66 defm r132 : fma3p_rm<opc132,
67 !strconcat(OpcodeStr, "132", PackTy),
68 MemFrag128, MemFrag256, OpTy128, OpTy256>;
69 defm r231 : fma3p_rm<opc231,
70 !strconcat(OpcodeStr, "231", PackTy),
71 MemFrag128, MemFrag256, OpTy128, OpTy256>;
72 } // neverHasSideEffects = 1
76 let ExeDomain = SSEPackedSingle in {
77 defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", loadv4f32,
78 loadv8f32, X86Fmadd, v4f32, v8f32>;
79 defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", loadv4f32,
80 loadv8f32, X86Fmsub, v4f32, v8f32>;
81 defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps",
82 loadv4f32, loadv8f32, X86Fmaddsub,
84 defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps",
85 loadv4f32, loadv8f32, X86Fmsubadd,
89 let ExeDomain = SSEPackedDouble in {
90 defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", loadv2f64,
91 loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
92 defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", loadv2f64,
93 loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
94 defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd",
95 loadv2f64, loadv4f64, X86Fmaddsub,
97 defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd",
98 loadv2f64, loadv4f64, X86Fmsubadd,
102 // Fused Negative Multiply-Add
103 let ExeDomain = SSEPackedSingle in {
104 defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", loadv4f32,
105 loadv8f32, X86Fnmadd, v4f32, v8f32>;
106 defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", loadv4f32,
107 loadv8f32, X86Fnmsub, v4f32, v8f32>;
109 let ExeDomain = SSEPackedDouble in {
110 defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", loadv2f64,
111 loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
112 defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd",
113 loadv2f64, loadv4f64, X86Fnmsub, v2f64,
117 let Constraints = "$src1 = $dst" in {
118 multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
119 RegisterClass RC, ValueType OpVT, PatFrag mem_frag,
120 SDPatternOperator OpNode = null_frag> {
121 let isCommutable = 1, usesCustomInserter = 1 in
122 def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
123 (ins RC:$src1, RC:$src2, RC:$src3),
124 !strconcat(OpcodeStr,
125 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
127 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
130 def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
131 (ins RC:$src1, RC:$src2, x86memop:$src3),
132 !strconcat(OpcodeStr,
133 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
135 (OpVT (OpNode RC:$src2, RC:$src1,
136 (mem_frag addr:$src3))))]>;
138 } // Constraints = "$src1 = $dst"
140 multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
141 string OpStr, string PackTy, string PT2, Intrinsic Int,
142 SDNode OpNode, RegisterClass RC, ValueType OpVT,
143 X86MemOperand x86memop, Operand memop, PatFrag mem_frag,
144 ComplexPattern mem_cpat> {
145 let neverHasSideEffects = 1 in {
146 defm r132 : fma3s_rm<opc132, !strconcat(OpStr, "132", PackTy),
147 x86memop, RC, OpVT, mem_frag>;
148 defm r231 : fma3s_rm<opc231, !strconcat(OpStr, "231", PackTy),
149 x86memop, RC, OpVT, mem_frag>;
152 defm r213 : fma3s_rm<opc213, !strconcat(OpStr, "213", PackTy),
153 x86memop, RC, OpVT, mem_frag, OpNode>;
156 multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
157 string OpStr, Intrinsic IntF32, Intrinsic IntF64,
159 defm SS : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", IntF32, OpNode,
160 FR32, f32, f32mem, ssmem, loadf32, sse_load_f32>;
161 defm SD : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "PD", IntF64, OpNode,
162 FR64, f64, f64mem, sdmem, loadf64, sse_load_f64>, VEX_W;
164 def : Pat<(IntF32 VR128:$src1, VR128:$src2, VR128:$src3),
166 (!cast<Instruction>(NAME#"SSr213r")
167 (COPY_TO_REGCLASS $src2, FR32),
168 (COPY_TO_REGCLASS $src1, FR32),
169 (COPY_TO_REGCLASS $src3, FR32)),
172 def : Pat<(IntF64 VR128:$src1, VR128:$src2, VR128:$src3),
174 (!cast<Instruction>(NAME#"SDr213r")
175 (COPY_TO_REGCLASS $src2, FR64),
176 (COPY_TO_REGCLASS $src1, FR64),
177 (COPY_TO_REGCLASS $src3, FR64)),
181 defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
182 int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG;
183 defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss,
184 int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG;
186 defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss,
187 int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG;
188 defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss,
189 int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG;
192 //===----------------------------------------------------------------------===//
193 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
194 //===----------------------------------------------------------------------===//
197 multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
198 X86MemOperand x86memop, ValueType OpVT, SDNode OpNode,
200 let isCommutable = 1 in
201 def rr : FMA4<opc, MRMSrcReg, (outs RC:$dst),
202 (ins RC:$src1, RC:$src2, RC:$src3),
203 !strconcat(OpcodeStr,
204 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
206 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
207 def rm : FMA4<opc, MRMSrcMem, (outs RC:$dst),
208 (ins RC:$src1, RC:$src2, x86memop:$src3),
209 !strconcat(OpcodeStr,
210 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
211 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
212 (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG, MemOp4;
213 def mr : FMA4<opc, MRMSrcMem, (outs RC:$dst),
214 (ins RC:$src1, x86memop:$src2, RC:$src3),
215 !strconcat(OpcodeStr,
216 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
218 (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG;
220 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
221 def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
222 (ins RC:$src1, RC:$src2, RC:$src3),
223 !strconcat(OpcodeStr,
224 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
228 multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
229 ComplexPattern mem_cpat, Intrinsic Int> {
230 let isCodeGenOnly = 1 in {
231 let isCommutable = 1 in
232 def rr_Int : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
233 (ins VR128:$src1, VR128:$src2, VR128:$src3),
234 !strconcat(OpcodeStr,
235 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
237 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, VEX_LIG, MemOp4;
238 def rm_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
239 (ins VR128:$src1, VR128:$src2, memop:$src3),
240 !strconcat(OpcodeStr,
241 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
242 [(set VR128:$dst, (Int VR128:$src1, VR128:$src2,
243 mem_cpat:$src3))]>, VEX_W, VEX_LIG, MemOp4;
244 def mr_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
245 (ins VR128:$src1, memop:$src2, VR128:$src3),
246 !strconcat(OpcodeStr,
247 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
249 (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>, VEX_LIG;
250 } // isCodeGenOnly = 1
253 multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
254 ValueType OpVT128, ValueType OpVT256,
255 PatFrag ld_frag128, PatFrag ld_frag256> {
256 let isCommutable = 1 in
257 def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
258 (ins VR128:$src1, VR128:$src2, VR128:$src3),
259 !strconcat(OpcodeStr,
260 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
262 (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
264 def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
265 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
266 !strconcat(OpcodeStr,
267 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
268 [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
269 (ld_frag128 addr:$src3)))]>, VEX_W, MemOp4;
270 def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
271 (ins VR128:$src1, f128mem:$src2, VR128:$src3),
272 !strconcat(OpcodeStr,
273 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
275 (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
276 let isCommutable = 1 in
277 def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
278 (ins VR256:$src1, VR256:$src2, VR256:$src3),
279 !strconcat(OpcodeStr,
280 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
282 (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
283 VEX_W, MemOp4, VEX_L;
284 def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
285 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
286 !strconcat(OpcodeStr,
287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
288 [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
289 (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4, VEX_L;
290 def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
291 (ins VR256:$src1, f256mem:$src2, VR256:$src3),
292 !strconcat(OpcodeStr,
293 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
294 [(set VR256:$dst, (OpNode VR256:$src1,
295 (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L;
297 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
298 def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
299 (ins VR128:$src1, VR128:$src2, VR128:$src3),
300 !strconcat(OpcodeStr,
301 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
302 def rrY_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
303 (ins VR256:$src1, VR256:$src2, VR256:$src3),
304 !strconcat(OpcodeStr,
305 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
307 } // isCodeGenOnly = 1
310 defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32>,
311 fma4s_int<0x6A, "vfmaddss", ssmem, sse_load_f32,
312 int_x86_fma_vfmadd_ss>;
313 defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64>,
314 fma4s_int<0x6B, "vfmaddsd", sdmem, sse_load_f64,
315 int_x86_fma_vfmadd_sd>;
316 defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32>,
317 fma4s_int<0x6E, "vfmsubss", ssmem, sse_load_f32,
318 int_x86_fma_vfmsub_ss>;
319 defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64>,
320 fma4s_int<0x6F, "vfmsubsd", sdmem, sse_load_f64,
321 int_x86_fma_vfmsub_sd>;
322 defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
324 fma4s_int<0x7A, "vfnmaddss", ssmem, sse_load_f32,
325 int_x86_fma_vfnmadd_ss>;
326 defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
328 fma4s_int<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
329 int_x86_fma_vfnmadd_sd>;
330 defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
332 fma4s_int<0x7E, "vfnmsubss", ssmem, sse_load_f32,
333 int_x86_fma_vfnmsub_ss>;
334 defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
336 fma4s_int<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
337 int_x86_fma_vfnmsub_sd>;
339 let ExeDomain = SSEPackedSingle in {
340 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
341 loadv4f32, loadv8f32>;
342 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
343 loadv4f32, loadv8f32>;
344 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
345 loadv4f32, loadv8f32>;
346 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
347 loadv4f32, loadv8f32>;
348 defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
349 loadv4f32, loadv8f32>;
350 defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
351 loadv4f32, loadv8f32>;
354 let ExeDomain = SSEPackedDouble in {
355 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
356 loadv2f64, loadv4f64>;
357 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
358 loadv2f64, loadv4f64>;
359 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
360 loadv2f64, loadv4f64>;
361 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
362 loadv2f64, loadv4f64>;
363 defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
364 loadv2f64, loadv4f64>;
365 defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
366 loadv2f64, loadv4f64>;