Fix load size for FMA4 SS/SD instructions. They need to use f32 and f64 size, but...
[oota-llvm.git] / lib / Target / X86 / X86InstrFMA.td
1 //====- X86InstrFMA.td - Describe the X86 Instruction Set --*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes FMA (Fused Multiply-Add) instructions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // FMA3 - Intel 3 operand Fused Multiply-Add instructions
16 //===----------------------------------------------------------------------===//
17
18 multiclass fma3p_rm<bits<8> opc, string OpcodeStr> {
19   def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
20            (ins VR128:$src1, VR128:$src2),
21            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
22            []>;
23   def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
24            (ins VR128:$src1, f128mem:$src2),
25            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
26            []>;
27   def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
28            (ins VR256:$src1, VR256:$src2),
29            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
30            []>;
31   def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
32            (ins VR256:$src1, f256mem:$src2),
33            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
34            []>;
35 }
36
37 multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
38                        string OpcodeStr, string PackTy> {
39   defm r132 : fma3p_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
40   defm r213 : fma3p_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
41   defm r231 : fma3p_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
42 }
43
44 // Fused Multiply-Add
45 let ExeDomain = SSEPackedSingle in {
46   defm VFMADDPS    : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">;
47   defm VFMSUBPS    : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">;
48   defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">;
49   defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">;
50 }
51
52 let ExeDomain = SSEPackedDouble in {
53   defm VFMADDPD    : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W;
54   defm VFMSUBPD    : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W;
55   defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W;
56   defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W;
57 }
58
59 // Fused Negative Multiply-Add
60 let ExeDomain = SSEPackedSingle in {
61   defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">;
62   defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">;
63 }
64 let ExeDomain = SSEPackedDouble in {
65   defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W;
66   defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W;
67 }
68
69 multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop> {
70   def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
71            (ins VR128:$src1, VR128:$src2),
72            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
73            []>;
74   def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
75            (ins VR128:$src1, x86memop:$src2),
76            !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
77            []>;
78 }
79
80 multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
81                        string OpcodeStr> {
82   defm SSr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132ss"), f32mem>;
83   defm SSr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213ss"), f32mem>;
84   defm SSr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231ss"), f32mem>;
85   defm SDr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132sd"), f64mem>, VEX_W;
86   defm SDr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213sd"), f64mem>, VEX_W;
87   defm SDr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231sd"), f64mem>, VEX_W;
88 }
89
90 defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd">;
91 defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub">;
92
93 defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd">;
94 defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub">;
95
96 //===----------------------------------------------------------------------===//
97 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
98 //===----------------------------------------------------------------------===//
99
100
101 multiclass fma4s<bits<8> opc, string OpcodeStr, Operand memop> {
102   def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
103            (ins VR128:$src1, VR128:$src2, VR128:$src3),
104            !strconcat(OpcodeStr,
105            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
106            []>, XOP_W;
107   def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
108            (ins VR128:$src1, VR128:$src2, memop:$src3),
109            !strconcat(OpcodeStr,
110            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
111            []>, XOP_W;
112   def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
113            (ins VR128:$src1, memop:$src2, VR128:$src3),
114            !strconcat(OpcodeStr,
115            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
116            []>;
117 }
118
119 multiclass fma4p<bits<8> opc, string OpcodeStr> {
120   def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
121            (ins VR128:$src1, VR128:$src2, VR128:$src3),
122            !strconcat(OpcodeStr,
123            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
124            []>, XOP_W;
125   def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
126            (ins VR128:$src1, VR128:$src2, f128mem:$src3),
127            !strconcat(OpcodeStr,
128            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
129            []>, XOP_W;
130   def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
131            (ins VR128:$src1, f128mem:$src2, VR128:$src3),
132            !strconcat(OpcodeStr,
133            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
134            []>;
135   def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
136            (ins VR256:$src1, VR256:$src2, VR256:$src3),
137            !strconcat(OpcodeStr,
138            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
139            []>, XOP_W;
140   def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
141            (ins VR256:$src1, VR256:$src2, f256mem:$src3),
142            !strconcat(OpcodeStr,
143            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
144            []>, XOP_W;
145   def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
146            (ins VR256:$src1, f256mem:$src2, VR256:$src3),
147            !strconcat(OpcodeStr,
148            "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
149            []>;
150 }
151
152 let isAsmParserOnly = 1 in {
153   defm VFMADDSS4    : fma4s<0x6A, "vfmaddss", ssmem>;
154   defm VFMADDSD4    : fma4s<0x6B, "vfmaddsd", sdmem>;
155   defm VFMADDPS4    : fma4p<0x68, "vfmaddps">;
156   defm VFMADDPD4    : fma4p<0x69, "vfmaddpd">;
157   defm VFMSUBSS4    : fma4s<0x6E, "vfmsubss", ssmem>;
158   defm VFMSUBSD4    : fma4s<0x6F, "vfmsubsd", sdmem>;
159   defm VFMSUBPS4    : fma4p<0x6C, "vfmsubps">;
160   defm VFMSUBPD4    : fma4p<0x6D, "vfmsubpd">;
161   defm VFNMADDSS4   : fma4s<0x7A, "vfnmaddss", ssmem>;
162   defm VFNMADDSD4   : fma4s<0x7B, "vfnmaddsd", sdmem>;
163   defm VFNMADDPS4   : fma4p<0x78, "vfnmaddps">;
164   defm VFNMADDPD4   : fma4p<0x79, "vfnmaddpd">;
165   defm VFNMSUBSS4   : fma4s<0x7E, "vfnmsubss", ssmem>;
166   defm VFNMSUBSD4   : fma4s<0x7F, "vfnmsubsd", sdmem>;
167   defm VFNMSUBPS4   : fma4p<0x7C, "vfnmsubps">;
168   defm VFNMSUBPD4   : fma4p<0x7D, "vfnmsubpd">;
169   defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps">;
170   defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd">;
171   defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps">;
172   defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd">;
173 }
174
175 // FMA4 Intrinsics patterns
176
177 // VFMADD
178 def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, VR128:$src2, VR128:$src3),
179           (VFMADDSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
180 def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3),
181           (VFMADDSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
182 def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3),
183           (VFMADDSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>;
184
185 def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, VR128:$src2, VR128:$src3),
186           (VFMADDSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
187 def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3),
188           (VFMADDSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
189 def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3),
190           (VFMADDSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>;
191
192 def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
193           (VFMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
194 def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, VR128:$src2,
195                                   (alignedloadv4f32 addr:$src3)),
196           (VFMADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
197 def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
198                                   VR128:$src3),
199           (VFMADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
200
201 def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
202           (VFMADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
203 def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, VR128:$src2,
204                                   (alignedloadv2f64 addr:$src3)),
205           (VFMADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
206 def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
207                                   VR128:$src3),
208           (VFMADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
209
210 def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
211           (VFMADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
212 def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1, VR256:$src2,
213                                   (alignedloadv8f32 addr:$src3)),
214           (VFMADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
215 def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1,
216                                       (alignedloadv8f32 addr:$src2),
217                                       VR256:$src3),
218           (VFMADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
219
220 def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
221           (VFMADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
222 def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, VR256:$src2,
223                                   (alignedloadv4f64 addr:$src3)),
224           (VFMADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
225 def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1,
226                                       (alignedloadv4f64 addr:$src2),
227                                       VR256:$src3),
228           (VFMADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
229
230 // VFMSUB
231 def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, VR128:$src2, VR128:$src3),
232           (VFMSUBSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
233 def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3),
234           (VFMSUBSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
235 def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3),
236           (VFMSUBSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>;
237
238 def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, VR128:$src2, VR128:$src3),
239           (VFMSUBSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
240 def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3),
241           (VFMSUBSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
242 def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3),
243           (VFMSUBSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>;
244
245 def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
246           (VFMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
247 def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, VR128:$src2,
248                                   (alignedloadv4f32 addr:$src3)),
249           (VFMSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
250 def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
251                                   VR128:$src3),
252           (VFMSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
253
254 def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
255           (VFMSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
256 def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, VR128:$src2,
257                                   (alignedloadv2f64 addr:$src3)),
258           (VFMSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
259 def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
260                                   VR128:$src3),
261           (VFMSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
262
263 def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
264           (VFMSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
265 def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1, VR256:$src2,
266                                   (alignedloadv8f32 addr:$src3)),
267           (VFMSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
268 def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1,
269                                       (alignedloadv8f32 addr:$src2),
270                                       VR256:$src3),
271           (VFMSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
272
273 def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
274           (VFMSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
275 def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, VR256:$src2,
276                                   (alignedloadv4f64 addr:$src3)),
277           (VFMSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
278 def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1,
279                                       (alignedloadv4f64 addr:$src2),
280                                       VR256:$src3),
281           (VFMSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
282
283 // VFNMADD
284 def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, VR128:$src2, VR128:$src3),
285           (VFNMADDSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
286 def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3),
287           (VFNMADDSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
288 def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3),
289           (VFNMADDSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>;
290
291 def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, VR128:$src2, VR128:$src3),
292           (VFNMADDSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
293 def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3),
294           (VFNMADDSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
295 def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3),
296           (VFNMADDSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>;
297
298 def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
299           (VFNMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
300 def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, VR128:$src2,
301                                   (alignedloadv4f32 addr:$src3)),
302           (VFNMADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
303 def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
304                                   VR128:$src3),
305           (VFNMADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
306
307 def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
308           (VFNMADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
309 def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, VR128:$src2,
310                                   (alignedloadv2f64 addr:$src3)),
311           (VFNMADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
312 def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
313                                   VR128:$src3),
314           (VFNMADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
315
316 def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
317           (VFNMADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
318 def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1, VR256:$src2,
319                                   (alignedloadv8f32 addr:$src3)),
320           (VFNMADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
321 def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1,
322                                       (alignedloadv8f32 addr:$src2),
323                                       VR256:$src3),
324           (VFNMADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
325
326 def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
327           (VFNMADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
328 def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, VR256:$src2,
329                                   (alignedloadv4f64 addr:$src3)),
330           (VFNMADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
331 def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1,
332                                       (alignedloadv4f64 addr:$src2),
333                                       VR256:$src3),
334           (VFNMADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
335
336 // VFNMSUB
337 def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, VR128:$src2, VR128:$src3),
338           (VFNMSUBSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
339 def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3),
340           (VFNMSUBSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
341 def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3),
342           (VFNMSUBSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>;
343
344 def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, VR128:$src2, VR128:$src3),
345           (VFNMSUBSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
346 def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3),
347           (VFNMSUBSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
348 def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3),
349           (VFNMSUBSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>;
350
351 def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
352           (VFNMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
353 def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, VR128:$src2,
354                                   (alignedloadv4f32 addr:$src3)),
355           (VFNMSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
356 def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
357                                   VR128:$src3),
358           (VFNMSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
359
360 def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
361           (VFNMSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
362 def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, VR128:$src2,
363                                   (alignedloadv2f64 addr:$src3)),
364           (VFNMSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
365 def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
366                                   VR128:$src3),
367           (VFNMSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
368
369 def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
370           (VFNMSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
371 def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1, VR256:$src2,
372                                   (alignedloadv8f32 addr:$src3)),
373           (VFNMSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
374 def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1,
375                                       (alignedloadv8f32 addr:$src2),
376                                       VR256:$src3),
377           (VFNMSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
378
379 def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
380           (VFNMSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
381 def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1, VR256:$src2,
382                                   (alignedloadv4f64 addr:$src3)),
383           (VFNMSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
384 def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1,
385                                       (alignedloadv4f64 addr:$src2),
386                                       VR256:$src3),
387           (VFNMSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
388
389 // VFMADDSUB
390 def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
391           (VFMADDSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
392 def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, VR128:$src2,
393                                   (alignedloadv4f32 addr:$src3)),
394           (VFMADDSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
395 def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
396                                   VR128:$src3),
397           (VFMADDSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
398
399 def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
400           (VFMADDSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
401 def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, VR128:$src2,
402                                   (alignedloadv2f64 addr:$src3)),
403           (VFMADDSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
404 def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
405                                   VR128:$src3),
406           (VFMADDSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
407
408 def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
409           (VFMADDSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
410 def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1, VR256:$src2,
411                                   (alignedloadv8f32 addr:$src3)),
412           (VFMADDSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
413 def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1,
414                                       (alignedloadv8f32 addr:$src2),
415                                       VR256:$src3),
416           (VFMADDSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
417
418 def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
419           (VFMADDSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
420 def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1, VR256:$src2,
421                                   (alignedloadv4f64 addr:$src3)),
422           (VFMADDSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
423 def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1,
424                                       (alignedloadv4f64 addr:$src2),
425                                       VR256:$src3),
426           (VFMADDSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
427
428 // VFMSUBADD
429 def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
430           (VFMSUBADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
431 def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, VR128:$src2,
432                                   (alignedloadv4f32 addr:$src3)),
433           (VFMSUBADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
434 def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
435                                   VR128:$src3),
436           (VFMSUBADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
437
438 def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
439           (VFMSUBADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
440 def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, VR128:$src2,
441                                   (alignedloadv2f64 addr:$src3)),
442           (VFMSUBADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
443 def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
444                                   VR128:$src3),
445           (VFMSUBADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
446
447 def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
448           (VFMSUBADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
449 def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1, VR256:$src2,
450                                   (alignedloadv8f32 addr:$src3)),
451           (VFMSUBADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
452 def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1,
453                                       (alignedloadv8f32 addr:$src2),
454                                       VR256:$src3),
455           (VFMSUBADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
456
457 def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
458           (VFMSUBADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
459 def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1, VR256:$src2,
460                                   (alignedloadv4f64 addr:$src3)),
461           (VFMSUBADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
462 def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1,
463                                       (alignedloadv4f64 addr:$src2),
464                                       VR256:$src3),
465           (VFMSUBADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;