From b730bdf4e96c1b8f042bfbdef9339601c9d6c1db Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Fri, 21 Aug 2015 18:06:49 +0000 Subject: [PATCH] [x86] enable machine combiner reassociations for 128-bit vector min/max git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@245715 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 8 +++ test/CodeGen/X86/machine-combiner.ll | 96 ++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 3732823f167..d964bf17a0f 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6394,12 +6394,20 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) { // Normal min/max instructions are not commutative because of NaN and signed // zero semantics, but these are. Thus, there's no need to check for global // relaxed math; the instructions themselves have the properties we need. + case X86::MAXCPDrr: + case X86::MAXCPSrr: case X86::MAXCSDrr: case X86::MAXCSSrr: + case X86::MINCPDrr: + case X86::MINCPSrr: case X86::MINCSDrr: case X86::MINCSSrr: + case X86::VMAXCPDrr: + case X86::VMAXCPSrr: case X86::VMAXCSDrr: case X86::VMAXCSSrr: + case X86::VMINCPDrr: + case X86::VMINCPSrr: case X86::VMINCSDrr: case X86::VMINCSSrr: return true; diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index e9300b4c6e9..371747479e5 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -454,3 +454,99 @@ define double @reassociate_maxs_double(double %x0, double %x1, double %x2, doubl ret double %sel2 } +; Verify that SSE and AVX 128-bit vector single-precision minimum ops are reassociated. + +define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { +; SSE-LABEL: reassociate_mins_v4f32: +; SSE: # BB#0: +; SSE-NEXT: addps %xmm1, %xmm0 +; SSE-NEXT: minps %xmm3, %xmm2 +; SSE-NEXT: minps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_mins_v4f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fadd <4 x float> %x0, %x1 + %cmp1 = fcmp olt <4 x float> %x2, %t0 + %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0 + %cmp2 = fcmp olt <4 x float> %x3, %sel1 + %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1 + ret <4 x float> %sel2 +} + +; Verify that SSE and AVX 128-bit vector single-precision maximum ops are reassociated. + +define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { +; SSE-LABEL: reassociate_maxs_v4f32: +; SSE: # BB#0: +; SSE-NEXT: addps %xmm1, %xmm0 +; SSE-NEXT: maxps %xmm3, %xmm2 +; SSE-NEXT: maxps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_maxs_v4f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fadd <4 x float> %x0, %x1 + %cmp1 = fcmp ogt <4 x float> %x2, %t0 + %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0 + %cmp2 = fcmp ogt <4 x float> %x3, %sel1 + %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1 + ret <4 x float> %sel2 +} + +; Verify that SSE and AVX 128-bit vector double-precision minimum ops are reassociated. + +define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { +; SSE-LABEL: reassociate_mins_v2f64: +; SSE: # BB#0: +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: minpd %xmm3, %xmm2 +; SSE-NEXT: minpd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_mins_v2f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fadd <2 x double> %x0, %x1 + %cmp1 = fcmp olt <2 x double> %x2, %t0 + %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0 + %cmp2 = fcmp olt <2 x double> %x3, %sel1 + %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1 + ret <2 x double> %sel2 +} + +; Verify that SSE and AVX 128-bit vector double-precision maximum ops are reassociated. + +define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { +; SSE-LABEL: reassociate_maxs_v2f64: +; SSE: # BB#0: +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: maxpd %xmm3, %xmm2 +; SSE-NEXT: maxpd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_maxs_v2f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fadd <2 x double> %x0, %x1 + %cmp1 = fcmp ogt <2 x double> %x2, %t0 + %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0 + %cmp2 = fcmp ogt <2 x double> %x3, %sel1 + %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1 + ret <2 x double> %sel2 +} + -- 2.34.1