declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
; CHECK-LABEL: @test_fmaxf
; SSE: movaps %xmm0, %xmm2
ret float %z
}
-; FIXME: Doubles should be inlined similarly to floats.
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
; CHECK-LABEL: @test_fmax
-; CHECK: jmp fmax
+; SSE: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define double @test_fmax(double %x, double %y) {
%z = call double @fmax(double %x, double %y) readnone
ret double %z
ret float %z
}
-; FIXME: Doubles should be inlined similarly to floats.
; CHECK-LABEL: @test_intrinsic_fmax
-; CHECK: jmp fmax
+; SSE: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define double @test_intrinsic_fmax(double %x, double %y) {
%z = call double @llvm.maxnum.f64(double %x, double %y) readnone
ret double %z
ret x86_fp80 %z
}
-; FIXME: This should not be doing 4 scalar ops on a 2 element vector.
-; FIXME: This should use vector ops (maxps / cmpps).
-
; CHECK-LABEL: @test_intrinsic_fmax_v2f32
; SSE: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; SSE-NEXT: movaps %xmm3, %xmm4
-; SSE-NEXT: cmpunordss %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, %xmm5
-; SSE-NEXT: andps %xmm2, %xmm5
-; SSE-NEXT: maxss %xmm3, %xmm2
-; SSE-NEXT: andnps %xmm2, %xmm4
-; SSE-NEXT: orps %xmm5, %xmm4
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE-NEXT: movaps %xmm0, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; SSE-NEXT: movaps %xmm5, %xmm3
-; SSE-NEXT: cmpunordss %xmm3, %xmm3
-; SSE-NEXT: movaps %xmm3, %xmm6
-; SSE-NEXT: andps %xmm2, %xmm6
-; SSE-NEXT: maxss %xmm5, %xmm2
-; SSE-NEXT: andnps %xmm2, %xmm3
-; SSE-NEXT: orps %xmm6, %xmm3
-; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm4
-; SSE-NEXT: andps %xmm1, %xmm4
-; SSE-NEXT: movaps %xmm1, %xmm5
-; SSE-NEXT: maxss %xmm0, %xmm5
-; SSE-NEXT: andnps %xmm5, %xmm2
-; SSE-NEXT: orps %xmm4, %xmm2
-; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT: movapd %xmm0, %xmm4
-; SSE-NEXT: cmpunordss %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, %xmm5
-; SSE-NEXT: andps %xmm1, %xmm5
-; SSE-NEXT: maxss %xmm0, %xmm1
-; SSE-NEXT: andnps %xmm1, %xmm4
-; SSE-NEXT: orps %xmm5, %xmm4
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: maxps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX: vmaxss %xmm0, %xmm1, %xmm2
-; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm2
-; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5
-; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
-; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5
-; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm3
-; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm3, %xmm0
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX: vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) {
%z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
ret <2 x float> %z
}
-; FIXME: This should use vector ops (maxps / cmpps).
-
; CHECK-LABEL: @test_intrinsic_fmax_v4f32
; SSE: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; SSE-NEXT: movaps %xmm3, %xmm4
-; SSE-NEXT: cmpunordss %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, %xmm5
-; SSE-NEXT: andps %xmm2, %xmm5
-; SSE-NEXT: maxss %xmm3, %xmm2
-; SSE-NEXT: andnps %xmm2, %xmm4
-; SSE-NEXT: orps %xmm5, %xmm4
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE-NEXT: movaps %xmm0, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; SSE-NEXT: movaps %xmm5, %xmm3
-; SSE-NEXT: cmpunordss %xmm3, %xmm3
-; SSE-NEXT: movaps %xmm3, %xmm6
-; SSE-NEXT: andps %xmm2, %xmm6
-; SSE-NEXT: maxss %xmm5, %xmm2
-; SSE-NEXT: andnps %xmm2, %xmm3
-; SSE-NEXT: orps %xmm6, %xmm3
-; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm4
-; SSE-NEXT: andps %xmm1, %xmm4
-; SSE-NEXT: movaps %xmm1, %xmm5
-; SSE-NEXT: maxss %xmm0, %xmm5
-; SSE-NEXT: andnps %xmm5, %xmm2
-; SSE-NEXT: orps %xmm4, %xmm2
-; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT: movapd %xmm0, %xmm4
-; SSE-NEXT: cmpunordss %xmm4, %xmm4
-; SSE-NEXT: movaps %xmm4, %xmm5
-; SSE-NEXT: andps %xmm1, %xmm5
-; SSE-NEXT: maxss %xmm0, %xmm1
-; SSE-NEXT: andnps %xmm1, %xmm4
-; SSE-NEXT: orps %xmm5, %xmm4
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: maxps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX: vmaxss %xmm0, %xmm1, %xmm2
-; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm2
-; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5
-; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
-; AVX-NEXT: vmaxss %xmm3, %xmm4, %xmm5
-; AVX-NEXT: vcmpunordss %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm3
-; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm3, %xmm0
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX: vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) {
%z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
ret <4 x float> %z
}
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
; CHECK-LABEL: @test_intrinsic_fmax_v2f64
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm1, %xmm2
+; SSE-NEXT: maxpd %xmm0, %xmm2
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm2, %xmm0
+; SSE-NEXT: orpd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define <2 x double> @test_intrinsic_fmax_v2f64(<2 x double> %x, <2 x double> %y) {
%z = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
ret <2 x double> %z
}
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
; CHECK-LABEL: @test_intrinsic_fmax_v4f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm2, %xmm4
+; SSE-NEXT: maxpd %xmm0, %xmm4
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm4, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
+; SSE-NEXT: movapd %xmm3, %xmm2
+; SSE-NEXT: maxpd %xmm1, %xmm2
+; SSE-NEXT: cmpunordpd %xmm1, %xmm1
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: andnpd %xmm2, %xmm1
+; SSE-NEXT: orpd %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %ymm0, %ymm1, %ymm2
+; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
+; AVX-NEXT: retq
define <4 x double> @test_intrinsic_fmax_v4f64(<4 x double> %x, <4 x double> %y) {
%z = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
ret <4 x double> %z
}
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
; CHECK-LABEL: @test_intrinsic_fmax_v8f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm4, %xmm8
+; SSE-NEXT: maxpd %xmm0, %xmm8
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm4
+; SSE-NEXT: andnpd %xmm8, %xmm0
+; SSE-NEXT: orpd %xmm4, %xmm0
+; SSE-NEXT: movapd %xmm5, %xmm4
+; SSE-NEXT: maxpd %xmm1, %xmm4
+; SSE-NEXT: cmpunordpd %xmm1, %xmm1
+; SSE-NEXT: andpd %xmm1, %xmm5
+; SSE-NEXT: andnpd %xmm4, %xmm1
+; SSE-NEXT: orpd %xmm5, %xmm1
+; SSE-NEXT: movapd %xmm6, %xmm4
+; SSE-NEXT: maxpd %xmm2, %xmm4
+; SSE-NEXT: cmpunordpd %xmm2, %xmm2
+; SSE-NEXT: andpd %xmm2, %xmm6
+; SSE-NEXT: andnpd %xmm4, %xmm2
+; SSE-NEXT: orpd %xmm6, %xmm2
+; SSE-NEXT: movapd %xmm7, %xmm4
+; SSE-NEXT: maxpd %xmm3, %xmm4
+; SSE-NEXT: cmpunordpd %xmm3, %xmm3
+; SSE-NEXT: andpd %xmm3, %xmm7
+; SSE-NEXT: andnpd %xmm4, %xmm3
+; SSE-NEXT: orpd %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %ymm0, %ymm2, %ymm4
+; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
+; AVX-NEXT: vmaxpd %ymm1, %ymm3, %ymm2
+; AVX-NEXT: vcmpunordpd %ymm1, %ymm1, %ymm1
+; AVX-NEXT: vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
+; AVX-NEXT: retq
define <8 x double> @test_intrinsic_fmax_v8f64(<8 x double> %x, <8 x double> %y) {
%z = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
ret <8 x double> %z