From: Chad Rosier Date: Mon, 9 Dec 2013 22:47:38 +0000 (+0000) Subject: [AArch64] Refactor the NEON scalar reduce pairwise intrinsics, so that they use X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=e02fa056d91064c00182e8fed8c585f992e4100b;p=oota-llvm.git [AArch64] Refactor the NEON scalar reduce pairwise intrinsics, so that they use float/double rather than the vector equivalents when appropriate. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196833 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td index bb1025e3c1b..d90618a07c0 100644 --- a/include/llvm/IR/IntrinsicsAArch64.td +++ b/include/llvm/IR/IntrinsicsAArch64.td @@ -230,19 +230,19 @@ def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic; def int_aarch64_neon_vpadd : Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>; def int_aarch64_neon_vpfadd : - Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; // Scalar Reduce Pairwise Floating Point Max/Min. def int_aarch64_neon_vpmax : - Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; def int_aarch64_neon_vpmin : - Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; // Scalar Reduce Pairwise Floating Point Maxnm/Minnm. def int_aarch64_neon_vpfmaxnm : - Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; def int_aarch64_neon_vpfminnm : - Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>; // Scalar Signed Integer Convert To Floating-point def int_aarch64_neon_vcvtf32_s32 : diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index d9901942647..aa010c5c47c 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -5310,9 +5310,9 @@ defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>; multiclass Neon_ScalarPair_SD_size_patterns { - def : Pat<(v1f32 (opnode (v2f32 VPR64:$Rn))), + def : Pat<(f32 (opnode (v2f32 VPR64:$Rn))), (INSTS VPR64:$Rn)>; - def : Pat<(v1f64 (opnode (v2f64 VPR128:$Rn))), + def : Pat<(f64 (opnode (v2f64 VPR128:$Rn))), (INSTD VPR128:$Rn)>; } @@ -5333,7 +5333,7 @@ defm : Neon_ScalarPair_SD_size_patterns; -def : Pat<(v1f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))), +def : Pat<(f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))), (FADDPvv_S_2S (v2f32 (EXTRACT_SUBREG (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))), diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll index 3da90365d6d..33ce5cf6ce6 100644 --- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll +++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll @@ -9,204 +9,193 @@ define <1 x i64> @test_addp_v1i64(<2 x i64> %a) { ret <1 x i64> %val } -declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float>) +declare float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float>) -define <1 x float> @test_faddp_v1f32(<2 x float> %a) { -; CHECK: test_faddp_v1f32: +define float @test_faddp_f32(<2 x float> %a) { +; CHECK: test_faddp_f32: ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s - %val = call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a) - ret <1 x float> %val + %val = call float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float> %a) + ret float %val } -declare <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double>) +declare double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double>) -define <1 x double> @test_faddp_v1f64(<2 x double> %a) { -; CHECK: test_faddp_v1f64: +define double @test_faddp_f64(<2 x double> %a) { +; CHECK: test_faddp_f64: ; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d - %val = call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a) - ret <1 x double> %val + %val = call double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double> %a) + ret double %val } -declare <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float>) +declare float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float>) -define <1 x float> @test_fmaxp_v1f32(<2 x float> %a) { -; CHECK: test_fmaxp_v1f32: +define float @test_fmaxp_f32(<2 x float> %a) { +; CHECK: test_fmaxp_f32: ; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s - %val = call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a) - ret <1 x float> %val + %val = call float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float> %a) + ret float %val } -declare <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double>) +declare double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double>) -define <1 x double> @test_fmaxp_v1f64(<2 x double> %a) { -; CHECK: test_fmaxp_v1f64: +define double @test_fmaxp_f64(<2 x double> %a) { +; CHECK: test_fmaxp_f64: ; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d - %val = call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a) - ret <1 x double> %val + %val = call double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double> %a) + ret double %val } -declare <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float>) +declare float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float>) -define <1 x float> @test_fminp_v1f32(<2 x float> %a) { -; CHECK: test_fminp_v1f32: +define float @test_fminp_f32(<2 x float> %a) { +; CHECK: test_fminp_f32: ; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s - %val = call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a) - ret <1 x float> %val + %val = call float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float> %a) + ret float %val } -declare <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double>) +declare double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double>) -define <1 x double> @test_fminp_v1f64(<2 x double> %a) { -; CHECK: test_fminp_v1f64: +define double @test_fminp_f64(<2 x double> %a) { +; CHECK: test_fminp_f64: ; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d - %val = call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a) - ret <1 x double> %val + %val = call double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double> %a) + ret double %val } -declare <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float>) +declare float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float>) -define <1 x float> @test_fmaxnmp_v1f32(<2 x float> %a) { -; CHECK: test_fmaxnmp_v1f32: +define float @test_fmaxnmp_f32(<2 x float> %a) { +; CHECK: test_fmaxnmp_f32: ; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a) - ret <1 x float> %val + %val = call float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float> %a) + ret float %val } -declare <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double>) +declare double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double>) -define <1 x double> @test_fmaxnmp_v1f64(<2 x double> %a) { -; CHECK: test_fmaxnmp_v1f64: +define double @test_fmaxnmp_f64(<2 x double> %a) { +; CHECK: test_fmaxnmp_f64: ; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a) - ret <1 x double> %val + %val = call double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double> %a) + ret double %val } -declare <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float>) +declare float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float>) -define <1 x float> @test_fminnmp_v1f32(<2 x float> %a) { -; CHECK: test_fminnmp_v1f32: +define float @test_fminnmp_f32(<2 x float> %a) { +; CHECK: test_fminnmp_f32: ; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %val = call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a) - ret <1 x float> %val + %val = call float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float> %a) + ret float %val } -declare <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double>) +declare double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double>) -define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) { -; CHECK: test_fminnmp_v1f64: +define double @test_fminnmp_f64(<2 x double> %a) { +; CHECK: test_fminnmp_f64: ; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %val = call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a) - ret <1 x double> %val + %val = call double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double> %a) + ret double %val } define float @test_vaddv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vaddv_f32 ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float> %a) + ret float %1 } define float @test_vaddvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vaddvq_f32 ; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpfadd.f32.v4f32(<4 x float> %a) + ret float %1 } define double @test_vaddvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vaddvq_f64 ; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a) - %2 = extractelement <1 x double> %1, i32 0 - ret double %2 + %1 = call double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double> %a) + ret double %1 } define float @test_vmaxv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vmaxv_f32 ; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float> %a) + ret float %1 } define double @test_vmaxvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vmaxvq_f64 ; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a) - %2 = extractelement <1 x double> %1, i32 0 - ret double %2 + %1 = call double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double> %a) + ret double %1 } define float @test_vminv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vminv_f32 ; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float> %a) + ret float %1 } define double @test_vminvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vminvq_f64 ; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a) - %2 = extractelement <1 x double> %1, i32 0 - ret double %2 + %1 = call double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double> %a) + ret double %1 } define double @test_vmaxnmvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vmaxnmvq_f64 ; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a) - %2 = extractelement <1 x double> %1, i32 0 - ret double %2 + %1 = call double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double> %a) + ret double %1 } define float @test_vmaxnmv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vmaxnmv_f32 ; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float> %a) + ret float %1 } define double @test_vminnmvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vminnmvq_f64 ; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a) - %2 = extractelement <1 x double> %1, i32 0 - ret double %2 + %1 = call double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double> %a) + ret double %1 } define float @test_vminnmv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vminnmv_f32 ; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a) - %2 = extractelement <1 x float> %1, i32 0 - ret float %2 + %1 = call float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float> %a) + ret float %1 } define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vpaddq_s64 ; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) + %1 = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) ret <2 x i64> %1 } define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vpaddq_u64 ; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) + %1 = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) ret <2 x i64> %1 } define i64 @test_vaddvq_s64(<2 x i64> %a) { ; CHECK-LABEL: test_vaddvq_s64 ; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) + %1 = call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) %2 = extractelement <1 x i64> %1, i32 0 ret i64 %2 } @@ -214,7 +203,7 @@ define i64 @test_vaddvq_s64(<2 x i64> %a) { define i64 @test_vaddvq_u64(<2 x i64> %a) { ; CHECK-LABEL: test_vaddvq_u64 ; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) + %1 = call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) %2 = extractelement <1 x i64> %1, i32 0 ret i64 %2 } @@ -223,4 +212,4 @@ declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>) declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>) -declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float>) +declare float @llvm.aarch64.neon.vpfadd.f32.v4f32(<4 x float>)