declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
; CHECK-LABEL: @test_fmaxf
-; CHECK: jmp fmaxf
+; SSE: movaps %xmm0, %xmm2
+; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: maxss %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define float @test_fmaxf(float %x, float %y) {
%z = call float @fmaxf(float %x, float %y) readnone
ret float %z
}
+; CHECK-LABEL: @test_fmaxf_minsize
+; CHECK: jmp fmaxf
+define float @test_fmaxf_minsize(float %x, float %y) minsize {
+ %z = call float @fmaxf(float %x, float %y) readnone
+ ret float %z
+}
+
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
+
; CHECK-LABEL: @test_fmax
-; CHECK: jmp fmax
+; SSE: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define double @test_fmax(double %x, double %y) {
%z = call double @fmax(double %x, double %y) readnone
ret double %z
}
; CHECK-LABEL: @test_intrinsic_fmaxf
-; CHECK: jmp fmaxf
+; SSE: movaps %xmm0, %xmm2
+; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: maxss %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define float @test_intrinsic_fmaxf(float %x, float %y) {
%z = call float @llvm.maxnum.f32(float %x, float %y) readnone
ret float %z
}
+
; CHECK-LABEL: @test_intrinsic_fmax
-; CHECK: jmp fmax
+; SSE: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define double @test_intrinsic_fmax(double %x, double %y) {
%z = call double @llvm.maxnum.f64(double %x, double %y) readnone
ret double %z
}
; CHECK-LABEL: @test_intrinsic_fmax_v2f32
-; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE: movaps %xmm1, %xmm0
-; SSE-NEXT: addq $72, %rsp
+; SSE: movaps %xmm1, %xmm2
+; SSE-NEXT: maxps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT: callq fmaxf
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT: addq $56, %rsp
+; AVX: vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) {
%z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
}
; CHECK-LABEL: @test_intrinsic_fmax_v4f32
-; SSE: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT: callq fmaxf
-; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE-NEXT: unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE: movaps %xmm1, %xmm0
-; SSE-NEXT: addq $72, %rsp
+; SSE: movaps %xmm1, %xmm2
+; SSE-NEXT: maxps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT: callq fmaxf
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX: vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX: callq fmaxf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT: addq $56, %rsp
+; AVX: vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) {
%z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
}
; CHECK-LABEL: @test_intrinsic_fmax_v2f64
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm1, %xmm2
+; SSE-NEXT: maxpd %xmm0, %xmm2
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm2, %xmm0
+; SSE-NEXT: orpd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
define <2 x double> @test_intrinsic_fmax_v2f64(<2 x double> %x, <2 x double> %y) {
%z = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
ret <2 x double> %z
}
; CHECK-LABEL: @test_intrinsic_fmax_v4f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm2, %xmm4
+; SSE-NEXT: maxpd %xmm0, %xmm4
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm4, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
+; SSE-NEXT: movapd %xmm3, %xmm2
+; SSE-NEXT: maxpd %xmm1, %xmm2
+; SSE-NEXT: cmpunordpd %xmm1, %xmm1
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: andnpd %xmm2, %xmm1
+; SSE-NEXT: orpd %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %ymm0, %ymm1, %ymm2
+; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
+; AVX-NEXT: retq
define <4 x double> @test_intrinsic_fmax_v4f64(<4 x double> %x, <4 x double> %y) {
%z = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
ret <4 x double> %z
}
; CHECK-LABEL: @test_intrinsic_fmax_v8f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE: movapd %xmm4, %xmm8
+; SSE-NEXT: maxpd %xmm0, %xmm8
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm4
+; SSE-NEXT: andnpd %xmm8, %xmm0
+; SSE-NEXT: orpd %xmm4, %xmm0
+; SSE-NEXT: movapd %xmm5, %xmm4
+; SSE-NEXT: maxpd %xmm1, %xmm4
+; SSE-NEXT: cmpunordpd %xmm1, %xmm1
+; SSE-NEXT: andpd %xmm1, %xmm5
+; SSE-NEXT: andnpd %xmm4, %xmm1
+; SSE-NEXT: orpd %xmm5, %xmm1
+; SSE-NEXT: movapd %xmm6, %xmm4
+; SSE-NEXT: maxpd %xmm2, %xmm4
+; SSE-NEXT: cmpunordpd %xmm2, %xmm2
+; SSE-NEXT: andpd %xmm2, %xmm6
+; SSE-NEXT: andnpd %xmm4, %xmm2
+; SSE-NEXT: orpd %xmm6, %xmm2
+; SSE-NEXT: movapd %xmm7, %xmm4
+; SSE-NEXT: maxpd %xmm3, %xmm4
+; SSE-NEXT: cmpunordpd %xmm3, %xmm3
+; SSE-NEXT: andpd %xmm3, %xmm7
+; SSE-NEXT: andnpd %xmm4, %xmm3
+; SSE-NEXT: orpd %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX: vmaxpd %ymm0, %ymm2, %ymm4
+; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
+; AVX-NEXT: vmaxpd %ymm1, %ymm3, %ymm2
+; AVX-NEXT: vcmpunordpd %ymm1, %ymm1, %ymm1
+; AVX-NEXT: vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
+; AVX-NEXT: retq
define <8 x double> @test_intrinsic_fmax_v8f64(<8 x double> %x, <8 x double> %y) {
%z = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
ret <8 x double> %z