From bf83737adbb2380119c7a4cae5fee4c00c9d5f22 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Fri, 21 Aug 2015 21:04:21 +0000 Subject: [PATCH] [x86] enable machine combiner reassociations for 256-bit vector min/max git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@245735 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 4 ++ test/CodeGen/X86/machine-combiner.ll | 68 ++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 9d799d8e780..c3e862b09ed 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6409,10 +6409,14 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) { case X86::MINCSSrr: case X86::VMAXCPDrr: case X86::VMAXCPSrr: + case X86::VMAXCPDYrr: + case X86::VMAXCPSYrr: case X86::VMAXCSDrr: case X86::VMAXCSSrr: case X86::VMINCPDrr: case X86::VMINCPSrr: + case X86::VMINCPDYrr: + case X86::VMINCPSYrr: case X86::VMINCSDrr: case X86::VMINCSSrr: return true; diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index 371747479e5..b4340b34cc0 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -550,3 +550,71 @@ define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, ret <2 x double> %sel2 } +; Verify that AVX 256-bit vector single-precision minimum ops are reassociated. + +define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { +; AVX-LABEL: reassociate_mins_v8f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <8 x float> %x0, %x1 + %cmp1 = fcmp olt <8 x float> %x2, %t0 + %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0 + %cmp2 = fcmp olt <8 x float> %x3, %sel1 + %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1 + ret <8 x float> %sel2 +} + +; Verify that AVX 256-bit vector single-precision maximum ops are reassociated. + +define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { +; AVX-LABEL: reassociate_maxs_v8f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <8 x float> %x0, %x1 + %cmp1 = fcmp ogt <8 x float> %x2, %t0 + %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0 + %cmp2 = fcmp ogt <8 x float> %x3, %sel1 + %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1 + ret <8 x float> %sel2 +} + +; Verify that AVX 256-bit vector double-precision minimum ops are reassociated. + +define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { +; AVX-LABEL: reassociate_mins_v4f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <4 x double> %x0, %x1 + %cmp1 = fcmp olt <4 x double> %x2, %t0 + %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0 + %cmp2 = fcmp olt <4 x double> %x3, %sel1 + %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1 + ret <4 x double> %sel2 +} + +; Verify that AVX 256-bit vector double-precision maximum ops are reassociated. + +define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { +; AVX-LABEL: reassociate_maxs_v4f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <4 x double> %x0, %x1 + %cmp1 = fcmp ogt <4 x double> %x2, %t0 + %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0 + %cmp2 = fcmp ogt <4 x double> %x3, %sel1 + %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1 + ret <4 x double> %sel2 +} + -- 2.34.1