1 ; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math < %s | FileCheck %s --check-prefix=SSE
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math < %s | FileCheck %s --check-prefix=AVX
4 ; Verify that the first two adds are independent regardless of how the inputs are
5 ; commuted. The destination registers are used as source registers for the third add.
7 define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
8 ; SSE-LABEL: reassociate_adds1:
10 ; SSE-NEXT: addss %xmm1, %xmm0
11 ; SSE-NEXT: addss %xmm3, %xmm2
12 ; SSE-NEXT: addss %xmm2, %xmm0
15 ; AVX-LABEL: reassociate_adds1:
17 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
18 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
19 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
21 %t0 = fadd float %x0, %x1
22 %t1 = fadd float %t0, %x2
23 %t2 = fadd float %t1, %x3
27 define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
28 ; SSE-LABEL: reassociate_adds2:
30 ; SSE-NEXT: addss %xmm1, %xmm0
31 ; SSE-NEXT: addss %xmm3, %xmm2
32 ; SSE-NEXT: addss %xmm2, %xmm0
35 ; AVX-LABEL: reassociate_adds2:
37 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
38 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
39 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
41 %t0 = fadd float %x0, %x1
42 %t1 = fadd float %x2, %t0
43 %t2 = fadd float %t1, %x3
47 define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
48 ; SSE-LABEL: reassociate_adds3:
50 ; SSE-NEXT: addss %xmm1, %xmm0
51 ; SSE-NEXT: addss %xmm3, %xmm2
52 ; SSE-NEXT: addss %xmm2, %xmm0
55 ; AVX-LABEL: reassociate_adds3:
57 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
58 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
59 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
61 %t0 = fadd float %x0, %x1
62 %t1 = fadd float %t0, %x2
63 %t2 = fadd float %x3, %t1
67 define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
68 ; SSE-LABEL: reassociate_adds4:
70 ; SSE-NEXT: addss %xmm1, %xmm0
71 ; SSE-NEXT: addss %xmm3, %xmm2
72 ; SSE-NEXT: addss %xmm2, %xmm0
75 ; AVX-LABEL: reassociate_adds4:
77 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
78 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
79 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
81 %t0 = fadd float %x0, %x1
82 %t1 = fadd float %x2, %t0
83 %t2 = fadd float %x3, %t1
87 ; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not
88 ; produced because that would cost more compile time.
90 define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
91 ; SSE-LABEL: reassociate_adds5:
93 ; SSE-NEXT: addss %xmm1, %xmm0
94 ; SSE-NEXT: addss %xmm3, %xmm2
95 ; SSE-NEXT: addss %xmm2, %xmm0
96 ; SSE-NEXT: addss %xmm5, %xmm4
97 ; SSE-NEXT: addss %xmm6, %xmm4
98 ; SSE-NEXT: addss %xmm4, %xmm0
99 ; SSE-NEXT: addss %xmm7, %xmm0
102 ; AVX-LABEL: reassociate_adds5:
104 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
105 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
106 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
107 ; AVX-NEXT: vaddss %xmm5, %xmm4, %xmm1
108 ; AVX-NEXT: vaddss %xmm6, %xmm1, %xmm1
109 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
110 ; AVX-NEXT: vaddss %xmm7, %xmm0, %xmm0
112 %t0 = fadd float %x0, %x1
113 %t1 = fadd float %t0, %x2
114 %t2 = fadd float %t1, %x3
115 %t3 = fadd float %t2, %x4
116 %t4 = fadd float %t3, %x5
117 %t5 = fadd float %t4, %x6
118 %t6 = fadd float %t5, %x7
122 ; Verify that we only need two associative operations to reassociate the operands.
123 ; Also, we should reassociate such that the result of the high latency division
124 ; is used by the final 'add' rather than reassociating the %x3 operand with the
125 ; division. The latter reassociation would not improve anything.
127 define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
128 ; SSE-LABEL: reassociate_adds6:
130 ; SSE-NEXT: divss %xmm1, %xmm0
131 ; SSE-NEXT: addss %xmm3, %xmm2
132 ; SSE-NEXT: addss %xmm2, %xmm0
135 ; AVX-LABEL: reassociate_adds6:
137 ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
138 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
139 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
141 %t0 = fdiv float %x0, %x1
142 %t1 = fadd float %x2, %t0
143 %t2 = fadd float %x3, %t1
147 ; Verify that SSE and AVX scalar single-precision multiplies are reassociated.
149 define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
150 ; SSE-LABEL: reassociate_muls1:
152 ; SSE-NEXT: divss %xmm1, %xmm0
153 ; SSE-NEXT: mulss %xmm3, %xmm2
154 ; SSE-NEXT: mulss %xmm2, %xmm0
157 ; AVX-LABEL: reassociate_muls1:
159 ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
160 ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm1
161 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
163 %t0 = fdiv float %x0, %x1
164 %t1 = fmul float %x2, %t0
165 %t2 = fmul float %x3, %t1
169 ; Verify that SSE and AVX scalar double-precision adds are reassociated.
171 define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
172 ; SSE-LABEL: reassociate_adds_double:
174 ; SSE-NEXT: divsd %xmm1, %xmm0
175 ; SSE-NEXT: addsd %xmm3, %xmm2
176 ; SSE-NEXT: addsd %xmm2, %xmm0
179 ; AVX-LABEL: reassociate_adds_double:
181 ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
182 ; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1
183 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
185 %t0 = fdiv double %x0, %x1
186 %t1 = fadd double %x2, %t0
187 %t2 = fadd double %x3, %t1
191 ; Verify that SSE and AVX scalar double-precision multiplies are reassociated.
193 define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) {
194 ; SSE-LABEL: reassociate_muls_double:
196 ; SSE-NEXT: divsd %xmm1, %xmm0
197 ; SSE-NEXT: mulsd %xmm3, %xmm2
198 ; SSE-NEXT: mulsd %xmm2, %xmm0
201 ; AVX-LABEL: reassociate_muls_double:
203 ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
204 ; AVX-NEXT: vmulsd %xmm3, %xmm2, %xmm1
205 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
207 %t0 = fdiv double %x0, %x1
208 %t1 = fmul double %x2, %t0
209 %t2 = fmul double %x3, %t1
213 ; Verify that SSE and AVX 128-bit vector single-precision adds are reassociated.
215 define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
216 ; SSE-LABEL: reassociate_adds_v4f32:
218 ; SSE-NEXT: mulps %xmm1, %xmm0
219 ; SSE-NEXT: addps %xmm3, %xmm2
220 ; SSE-NEXT: addps %xmm2, %xmm0
223 ; AVX-LABEL: reassociate_adds_v4f32:
225 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
226 ; AVX-NEXT: vaddps %xmm3, %xmm2, %xmm1
227 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
229 %t0 = fmul <4 x float> %x0, %x1
230 %t1 = fadd <4 x float> %x2, %t0
231 %t2 = fadd <4 x float> %x3, %t1
235 ; Verify that SSE and AVX 128-bit vector double-precision adds are reassociated.
237 define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
238 ; SSE-LABEL: reassociate_adds_v2f64:
240 ; SSE-NEXT: mulpd %xmm1, %xmm0
241 ; SSE-NEXT: addpd %xmm3, %xmm2
242 ; SSE-NEXT: addpd %xmm2, %xmm0
245 ; AVX-LABEL: reassociate_adds_v2f64:
247 ; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
248 ; AVX-NEXT: vaddpd %xmm3, %xmm2, %xmm1
249 ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
251 %t0 = fmul <2 x double> %x0, %x1
252 %t1 = fadd <2 x double> %x2, %t0
253 %t2 = fadd <2 x double> %x3, %t1
257 ; Verify that SSE and AVX 128-bit vector single-precision multiplies are reassociated.
259 define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
260 ; SSE-LABEL: reassociate_muls_v4f32:
262 ; SSE-NEXT: addps %xmm1, %xmm0
263 ; SSE-NEXT: mulps %xmm3, %xmm2
264 ; SSE-NEXT: mulps %xmm2, %xmm0
267 ; AVX-LABEL: reassociate_muls_v4f32:
269 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
270 ; AVX-NEXT: vmulps %xmm3, %xmm2, %xmm1
271 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
273 %t0 = fadd <4 x float> %x0, %x1
274 %t1 = fmul <4 x float> %x2, %t0
275 %t2 = fmul <4 x float> %x3, %t1
279 ; Verify that SSE and AVX 128-bit vector double-precision multiplies are reassociated.
281 define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
282 ; SSE-LABEL: reassociate_muls_v2f64:
284 ; SSE-NEXT: addpd %xmm1, %xmm0
285 ; SSE-NEXT: mulpd %xmm3, %xmm2
286 ; SSE-NEXT: mulpd %xmm2, %xmm0
289 ; AVX-LABEL: reassociate_muls_v2f64:
291 ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
292 ; AVX-NEXT: vmulpd %xmm3, %xmm2, %xmm1
293 ; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
295 %t0 = fadd <2 x double> %x0, %x1
296 %t1 = fmul <2 x double> %x2, %t0
297 %t2 = fmul <2 x double> %x3, %t1
301 ; Verify that AVX 256-bit vector single-precision adds are reassociated.
303 define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
304 ; AVX-LABEL: reassociate_adds_v8f32:
306 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
307 ; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1
308 ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
310 %t0 = fmul <8 x float> %x0, %x1
311 %t1 = fadd <8 x float> %x2, %t0
312 %t2 = fadd <8 x float> %x3, %t1
316 ; Verify that AVX 256-bit vector double-precision adds are reassociated.
318 define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
319 ; AVX-LABEL: reassociate_adds_v4f64:
321 ; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
322 ; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1
323 ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
325 %t0 = fmul <4 x double> %x0, %x1
326 %t1 = fadd <4 x double> %x2, %t0
327 %t2 = fadd <4 x double> %x3, %t1
331 ; Verify that AVX 256-bit vector single-precision multiplies are reassociated.
333 define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
334 ; AVX-LABEL: reassociate_muls_v8f32:
336 ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
337 ; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1
338 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
340 %t0 = fadd <8 x float> %x0, %x1
341 %t1 = fmul <8 x float> %x2, %t0
342 %t2 = fmul <8 x float> %x3, %t1
346 ; Verify that AVX 256-bit vector double-precision multiplies are reassociated.
348 define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
349 ; AVX-LABEL: reassociate_muls_v4f64:
351 ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
352 ; AVX-NEXT: vmulpd %ymm3, %ymm2, %ymm1
353 ; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
355 %t0 = fadd <4 x double> %x0, %x1
356 %t1 = fmul <4 x double> %x2, %t0
357 %t2 = fmul <4 x double> %x3, %t1
361 ; Verify that SSE and AVX scalar single-precision minimum ops are reassociated.
363 define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3) {
364 ; SSE-LABEL: reassociate_mins_single:
366 ; SSE-NEXT: divss %xmm1, %xmm0
367 ; SSE-NEXT: minss %xmm3, %xmm2
368 ; SSE-NEXT: minss %xmm2, %xmm0
371 ; AVX-LABEL: reassociate_mins_single:
373 ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
374 ; AVX-NEXT: vminss %xmm3, %xmm2, %xmm1
375 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
377 %t0 = fdiv float %x0, %x1
378 %cmp1 = fcmp olt float %x2, %t0
379 %sel1 = select i1 %cmp1, float %x2, float %t0
380 %cmp2 = fcmp olt float %x3, %sel1
381 %sel2 = select i1 %cmp2, float %x3, float %sel1
385 ; Verify that SSE and AVX scalar single-precision maximum ops are reassociated.
387 define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3) {
388 ; SSE-LABEL: reassociate_maxs_single:
390 ; SSE-NEXT: divss %xmm1, %xmm0
391 ; SSE-NEXT: maxss %xmm3, %xmm2
392 ; SSE-NEXT: maxss %xmm2, %xmm0
395 ; AVX-LABEL: reassociate_maxs_single:
397 ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
398 ; AVX-NEXT: vmaxss %xmm3, %xmm2, %xmm1
399 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
401 %t0 = fdiv float %x0, %x1
402 %cmp1 = fcmp ogt float %x2, %t0
403 %sel1 = select i1 %cmp1, float %x2, float %t0
404 %cmp2 = fcmp ogt float %x3, %sel1
405 %sel2 = select i1 %cmp2, float %x3, float %sel1
409 ; Verify that SSE and AVX scalar double-precision minimum ops are reassociated.
411 define double @reassociate_mins_double(double %x0, double %x1, double %x2, double %x3) {
412 ; SSE-LABEL: reassociate_mins_double:
414 ; SSE-NEXT: divsd %xmm1, %xmm0
415 ; SSE-NEXT: minsd %xmm3, %xmm2
416 ; SSE-NEXT: minsd %xmm2, %xmm0
419 ; AVX-LABEL: reassociate_mins_double:
421 ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
422 ; AVX-NEXT: vminsd %xmm3, %xmm2, %xmm1
423 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
425 %t0 = fdiv double %x0, %x1
426 %cmp1 = fcmp olt double %x2, %t0
427 %sel1 = select i1 %cmp1, double %x2, double %t0
428 %cmp2 = fcmp olt double %x3, %sel1
429 %sel2 = select i1 %cmp2, double %x3, double %sel1
433 ; Verify that SSE and AVX scalar double-precision maximum ops are reassociated.
435 define double @reassociate_maxs_double(double %x0, double %x1, double %x2, double %x3) {
436 ; SSE-LABEL: reassociate_maxs_double:
438 ; SSE-NEXT: divsd %xmm1, %xmm0
439 ; SSE-NEXT: maxsd %xmm3, %xmm2
440 ; SSE-NEXT: maxsd %xmm2, %xmm0
443 ; AVX-LABEL: reassociate_maxs_double:
445 ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
446 ; AVX-NEXT: vmaxsd %xmm3, %xmm2, %xmm1
447 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
449 %t0 = fdiv double %x0, %x1
450 %cmp1 = fcmp ogt double %x2, %t0
451 %sel1 = select i1 %cmp1, double %x2, double %t0
452 %cmp2 = fcmp ogt double %x3, %sel1
453 %sel2 = select i1 %cmp2, double %x3, double %sel1
457 ; Verify that SSE and AVX 128-bit vector single-precision minimum ops are reassociated.
459 define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
460 ; SSE-LABEL: reassociate_mins_v4f32:
462 ; SSE-NEXT: addps %xmm1, %xmm0
463 ; SSE-NEXT: minps %xmm3, %xmm2
464 ; SSE-NEXT: minps %xmm2, %xmm0
467 ; AVX-LABEL: reassociate_mins_v4f32:
469 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
470 ; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1
471 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
473 %t0 = fadd <4 x float> %x0, %x1
474 %cmp1 = fcmp olt <4 x float> %x2, %t0
475 %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
476 %cmp2 = fcmp olt <4 x float> %x3, %sel1
477 %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
478 ret <4 x float> %sel2
481 ; Verify that SSE and AVX 128-bit vector single-precision maximum ops are reassociated.
483 define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
484 ; SSE-LABEL: reassociate_maxs_v4f32:
486 ; SSE-NEXT: addps %xmm1, %xmm0
487 ; SSE-NEXT: maxps %xmm3, %xmm2
488 ; SSE-NEXT: maxps %xmm2, %xmm0
491 ; AVX-LABEL: reassociate_maxs_v4f32:
493 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
494 ; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1
495 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
497 %t0 = fadd <4 x float> %x0, %x1
498 %cmp1 = fcmp ogt <4 x float> %x2, %t0
499 %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
500 %cmp2 = fcmp ogt <4 x float> %x3, %sel1
501 %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
502 ret <4 x float> %sel2
505 ; Verify that SSE and AVX 128-bit vector double-precision minimum ops are reassociated.
507 define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
508 ; SSE-LABEL: reassociate_mins_v2f64:
510 ; SSE-NEXT: addpd %xmm1, %xmm0
511 ; SSE-NEXT: minpd %xmm3, %xmm2
512 ; SSE-NEXT: minpd %xmm2, %xmm0
515 ; AVX-LABEL: reassociate_mins_v2f64:
517 ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
518 ; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1
519 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
521 %t0 = fadd <2 x double> %x0, %x1
522 %cmp1 = fcmp olt <2 x double> %x2, %t0
523 %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
524 %cmp2 = fcmp olt <2 x double> %x3, %sel1
525 %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
526 ret <2 x double> %sel2
529 ; Verify that SSE and AVX 128-bit vector double-precision maximum ops are reassociated.
531 define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
532 ; SSE-LABEL: reassociate_maxs_v2f64:
534 ; SSE-NEXT: addpd %xmm1, %xmm0
535 ; SSE-NEXT: maxpd %xmm3, %xmm2
536 ; SSE-NEXT: maxpd %xmm2, %xmm0
539 ; AVX-LABEL: reassociate_maxs_v2f64:
541 ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
542 ; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1
543 ; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
545 %t0 = fadd <2 x double> %x0, %x1
546 %cmp1 = fcmp ogt <2 x double> %x2, %t0
547 %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
548 %cmp2 = fcmp ogt <2 x double> %x3, %sel1
549 %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
550 ret <2 x double> %sel2
553 ; Verify that AVX 256-bit vector single-precision minimum ops are reassociated.
555 define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
556 ; AVX-LABEL: reassociate_mins_v8f32:
558 ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
559 ; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1
560 ; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
562 %t0 = fadd <8 x float> %x0, %x1
563 %cmp1 = fcmp olt <8 x float> %x2, %t0
564 %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
565 %cmp2 = fcmp olt <8 x float> %x3, %sel1
566 %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
567 ret <8 x float> %sel2
570 ; Verify that AVX 256-bit vector single-precision maximum ops are reassociated.
572 define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
573 ; AVX-LABEL: reassociate_maxs_v8f32:
575 ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
576 ; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1
577 ; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
579 %t0 = fadd <8 x float> %x0, %x1
580 %cmp1 = fcmp ogt <8 x float> %x2, %t0
581 %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
582 %cmp2 = fcmp ogt <8 x float> %x3, %sel1
583 %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
584 ret <8 x float> %sel2
587 ; Verify that AVX 256-bit vector double-precision minimum ops are reassociated.
589 define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
590 ; AVX-LABEL: reassociate_mins_v4f64:
592 ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
593 ; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1
594 ; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
596 %t0 = fadd <4 x double> %x0, %x1
597 %cmp1 = fcmp olt <4 x double> %x2, %t0
598 %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
599 %cmp2 = fcmp olt <4 x double> %x3, %sel1
600 %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
601 ret <4 x double> %sel2
604 ; Verify that AVX 256-bit vector double-precision maximum ops are reassociated.
606 define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
607 ; AVX-LABEL: reassociate_maxs_v4f64:
609 ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
610 ; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1
611 ; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
613 %t0 = fadd <4 x double> %x0, %x1
614 %cmp1 = fcmp ogt <4 x double> %x2, %t0
615 %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
616 %cmp2 = fcmp ogt <4 x double> %x3, %sel1
617 %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
618 ret <4 x double> %sel2
621 ; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016
622 ; Verify that reassociation is not happening needlessly or wrongly.
624 declare double @bar()
626 define double @reassociate_adds_from_calls() {
627 ; AVX-LABEL: reassociate_adds_from_calls:
629 ; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
630 ; AVX-NEXT: callq bar
631 ; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
632 ; AVX-NEXT: callq bar
633 ; AVX-NEXT: vmovsd %xmm0, (%rsp)
634 ; AVX-NEXT: callq bar
635 ; AVX-NEXT: vmovsd (%rsp), %xmm1
636 ; AVX: vaddsd 8(%rsp), %xmm1, %xmm1
637 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
638 ; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0
640 %x0 = call double @bar()
641 %x1 = call double @bar()
642 %x2 = call double @bar()
643 %x3 = call double @bar()
644 %t0 = fadd double %x0, %x1
645 %t1 = fadd double %t0, %x2
646 %t2 = fadd double %t1, %x3
650 define double @already_reassociated() {
651 ; AVX-LABEL: already_reassociated:
653 ; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
654 ; AVX-NEXT: callq bar
655 ; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
656 ; AVX-NEXT: callq bar
657 ; AVX-NEXT: vmovsd %xmm0, (%rsp)
658 ; AVX-NEXT: callq bar
659 ; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
660 ; AVX-NEXT: vaddsd 8(%rsp), %xmm0, %xmm0
661 ; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0
663 %x0 = call double @bar()
664 %x1 = call double @bar()
665 %x2 = call double @bar()
666 %x3 = call double @bar()
667 %t0 = fadd double %x0, %x1
668 %t1 = fadd double %x2, %x3
669 %t2 = fadd double %t0, %t1