From: Simon Pilgrim Date: Wed, 2 Dec 2015 09:07:55 +0000 (+0000) Subject: [X86][FMA] Optimize FNEG(FMUL) Patterns X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=68cb3950c097e41c9ebb41fe0855bdf52e1cbe87 [X86][FMA] Optimize FNEG(FMUL) Patterns On FMA targets, we can avoid having to load a constant to negate a float/double multiply by instead using a FNMSUB (-(X*Y)-0) Fix for PR24366 Differential Revision: http://reviews.llvm.org/D14909 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@254495 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index fb990e7499e..0877d96a32c 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -26157,24 +26157,40 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = N->getValueType(0); + EVT SVT = VT.getScalarType(); SDValue Arg = N->getOperand(0); + SDLoc DL(N); + + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); + + // If we're negating a FMUL node on a target with FMA, then we can avoid the + // use of a constant by performing (-0 - A*B) instead. + // FIXME: Check rounding control flags as well once it becomes available. + if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) && + Arg->getFlags()->hasNoSignedZeros() && Subtarget->hasAnyFMA()) { + SDValue Zero = DAG.getConstantFP(0.0, DL, VT); + return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Zero); + } // If we're negating a FMA node, then we can adjust the // instruction to include the extra negation. if (Arg.hasOneUse()) { switch (Arg.getOpcode()) { - case X86ISD::FMADD: - return DAG.getNode(X86ISD::FNMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FMSUB: - return DAG.getNode(X86ISD::FNMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMADD: - return DAG.getNode(X86ISD::FMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMSUB: - return DAG.getNode(X86ISD::FMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMADD: + return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMSUB: + return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMADD: + return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMSUB: + return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); } } return SDValue(); diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll index e3295e45823..0f0dd20da04 100644 --- a/test/CodeGen/X86/fma_patterns.ll +++ b/test/CodeGen/X86/fma_patterns.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4 -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 ; ; Pattern: (fadd (fmul x, y), z) -> (fmadd x,y,z) @@ -1109,4 +1109,87 @@ define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y ret <4 x float> %a } +; Pattern: (fneg (fmul x, y)) -> (fnmsub x, y, 0) + +define double @test_f64_fneg_fmul(double %x, double %y) #0 { +; FMA-LABEL: test_f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul nsz double %x, %y + %n = fsub double -0.0, %m + ret double %n +} + +define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { +; FMA-LABEL: test_v4f32_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f32_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f32_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 +; AVX512-NEXT: retq + %m = fmul nsz <4 x float> %x, %y + %n = fsub <4 x float> , %m + ret <4 x float> %n +} + +define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 { +; FMA-LABEL: test_v4f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA4-NEXT: vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %ymm2, %ymm2, %ymm2 +; AVX512-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 +; AVX512-NEXT: retq + %m = fmul nsz <4 x double> %x, %y + %n = fsub <4 x double> , %m + ret <4 x double> %n +} + +define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %y) #0 { +; ALL-LABEL: test_v4f64_fneg_fmul_no_nsz: +; ALL: # BB#0: +; ALL-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; ALL-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0 +; ALL-NEXT: retq + %m = fmul <4 x double> %x, %y + %n = fsub <4 x double> , %m + ret <4 x double> %n +} + attributes #0 = { "unsafe-fp-math"="true" } diff --git a/test/CodeGen/X86/fma_patterns_wide.ll b/test/CodeGen/X86/fma_patterns_wide.ll index f412c174fe3..7b6509ad51c 100644 --- a/test/CodeGen/X86/fma_patterns_wide.ll +++ b/test/CodeGen/X86/fma_patterns_wide.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -fp-contract=fast | FileCheck %s --check-prefix=FMA ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma -fp-contract=fast | FileCheck %s --check-prefix=FMA4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4 -fp-contract=fast | FileCheck %s --check-prefix=FMA4 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -fp-contract=fast | FileCheck %s --check-prefix=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq -fp-contract=fast | FileCheck %s --check-prefix=AVX512 ; ; Pattern: (fadd (fmul x, y), z) -> (fmadd x,y,z) @@ -737,4 +737,85 @@ define <16 x float> @test_v16f32_fma_fmul_x_c1_c2_y(<16 x float> %x, <16 x float ret <16 x float> %a } +; Pattern: (fneg (fmul x, y)) -> (fnmsub x, y, 0) + +define <16 x float> @test_v16f32_fneg_fmul(<16 x float> %x, <16 x float> %y) #0 { +; FMA-LABEL: test_v16f32_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %ymm4, %ymm4, %ymm4 +; FMA-NEXT: vfnmsub213ps %ymm4, %ymm2, %ymm0 +; FMA-NEXT: vfnmsub213ps %ymm4, %ymm3, %ymm1 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v16f32_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %ymm4, %ymm4, %ymm4 +; FMA4-NEXT: vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfnmsubps %ymm4, %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v16f32_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2 +; AVX512-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul nsz <16 x float> %x, %y + %n = fsub <16 x float> , %m + ret <16 x float> %n +} + +define <8 x double> @test_v8f64_fneg_fmul(<8 x double> %x, <8 x double> %y) #0 { +; FMA-LABEL: test_v8f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorpd %ymm4, %ymm4, %ymm4 +; FMA-NEXT: vfnmsub213pd %ymm4, %ymm2, %ymm0 +; FMA-NEXT: vfnmsub213pd %ymm4, %ymm3, %ymm1 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v8f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorpd %ymm4, %ymm4, %ymm4 +; FMA4-NEXT: vfnmsubpd %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfnmsubpd %ymm4, %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v8f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2 +; AVX512-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul nsz <8 x double> %x, %y + %n = fsub <8 x double> , %m + ret <8 x double> %n +} + +define <8 x double> @test_v8f64_fneg_fmul_no_nsz(<8 x double> %x, <8 x double> %y) #0 { +; FMA-LABEL: test_v8f64_fneg_fmul_no_nsz: +; FMA: # BB#0: +; FMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1 +; FMA-NEXT: vmulpd %ymm2, %ymm0, %ymm0 +; FMA-NEXT: vmovapd {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] +; FMA-NEXT: vxorpd %ymm2, %ymm0, %ymm0 +; FMA-NEXT: vxorpd %ymm2, %ymm1, %ymm1 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v8f64_fneg_fmul_no_nsz: +; FMA4: # BB#0: +; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vmovapd {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] +; FMA4-NEXT: vxorpd %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vxorpd %ymm2, %ymm1, %ymm1 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v8f64_fneg_fmul_no_nsz: +; AVX512: # BB#0: +; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vxorpd {{.*}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: retq + %m = fmul <8 x double> %x, %y + %n = fsub <8 x double> , %m + ret <8 x double> %n +} + attributes #0 = { "unsafe-fp-math"="true" }