From f25a5f3290f60443020ed20ec8c1f20520ee14e4 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Thu, 2 Oct 2014 07:22:26 +0000 Subject: [PATCH] [x86] Update this test to run a full complement of the ISA extensions, and use the new grouped FileCheck patterns to match them. No interesting changes yet, but this test is now in proper form to have the other shuffle combining tests merged into it. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218857 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/vector-shuffle-combining.ll | 146 ++++++++++++------- 1 file changed, 92 insertions(+), 54 deletions(-) diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll index 8cb33af47ff..063dba73cd7 100644 --- a/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/test/CodeGen/X86/vector-shuffle-combining.ll @@ -1,4 +1,8 @@ -; RUN: llc < %s -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=CHECK-SSE2 +; RUN: llc < %s -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3 +; RUN: llc < %s -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 +; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 ; ; Verify that the DAG combiner correctly folds bitwise operations across ; shuffles, nested shuffles with undef, pairs of nested shuffles, and other @@ -12,57 +16,72 @@ declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) define <4 x i32> @combine_pshufd1(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd1 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: retq - %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) - %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27) +; ALL-LABEL: combine_pshufd1: +; ALL: # BB#0: # %entry +; ALL-NEXT: retq +entry: + %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) + %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27) ret <4 x i32> %c } define <4 x i32> @combine_pshufd2(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd2 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: retq - %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) +; ALL-LABEL: combine_pshufd2: +; ALL: # BB#0: # %entry +; ALL-NEXT: retq +entry: + %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) %b.cast = bitcast <4 x i32> %b to <8 x i16> %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28) %c.cast = bitcast <8 x i16> %c to <4 x i32> - %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27) + %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27) ret <4 x i32> %d } define <4 x i32> @combine_pshufd3(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd3 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: retq - %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) +; ALL-LABEL: combine_pshufd3: +; ALL: # BB#0: # %entry +; ALL-NEXT: retq +entry: + %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) %b.cast = bitcast <4 x i32> %b to <8 x i16> %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28) %c.cast = bitcast <8 x i16> %c to <4 x i32> - %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27) + %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27) ret <4 x i32> %d } define <4 x i32> @combine_pshufd4(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd4 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4] -; CHECK-SSE2-NEXT: retq - %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31) +; SSE-LABEL: combine_pshufd4: +; SSE: # BB#0: # %entry +; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufd4: +; AVX: # BB#0: # %entry +; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] +; AVX-NEXT: retq +entry: + %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31) %b.cast = bitcast <4 x i32> %b to <8 x i16> %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27) %c.cast = bitcast <8 x i16> %c to <4 x i32> - %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31) + %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31) ret <4 x i32> %d } define <4 x i32> @combine_pshufd5(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd5 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7] -; CHECK-SSE2-NEXT: retq - %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76) +; SSE-LABEL: combine_pshufd5: +; SSE: # BB#0: # %entry +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufd5: +; AVX: # BB#0: # %entry +; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] +; AVX-NEXT: retq +entry: + %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76) %b.cast = bitcast <4 x i32> %b to <8 x i16> %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27) %c.cast = bitcast <8 x i16> %c to <4 x i32> @@ -71,53 +90,72 @@ define <4 x i32> @combine_pshufd5(<4 x i32> %a) { } define <4 x i32> @combine_pshufd6(<4 x i32> %a) { -; CHECK-SSE2-LABEL: @combine_pshufd6 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: pshufd $0 -; CHECK-SSE2-NEXT: retq +; SSE-LABEL: combine_pshufd6: +; SSE: # BB#0: # %entry +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufd6: +; AVX: # BB#0: # %entry +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX-NEXT: retq +entry: %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0) %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8) ret <4 x i32> %c } define <8 x i16> @combine_pshuflw1(<8 x i16> %a) { -; CHECK-SSE2-LABEL: @combine_pshuflw1 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: retq - %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) - %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27) +; ALL-LABEL: combine_pshuflw1: +; ALL: # BB#0: # %entry +; ALL-NEXT: retq +entry: + %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) + %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27) ret <8 x i16> %c } define <8 x i16> @combine_pshuflw2(<8 x i16> %a) { -; CHECK-SSE2-LABEL: @combine_pshuflw2 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: retq +; ALL-LABEL: combine_pshuflw2: +; ALL: # BB#0: # %entry +; ALL-NEXT: retq +entry: %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) - %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28) - %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27) + %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28) + %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27) ret <8 x i16> %d } define <8 x i16> @combine_pshuflw3(<8 x i16> %a) { -; CHECK-SSE2-LABEL: @combine_pshuflw3 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4] -; CHECK-SSE2-NEXT: retq +; SSE-LABEL: combine_pshuflw3: +; SSE: # BB#0: # %entry +; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshuflw3: +; AVX: # BB#0: # %entry +; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] +; AVX-NEXT: retq +entry: %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) - %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27) - %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27) + %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27) + %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27) ret <8 x i16> %d } define <8 x i16> @combine_pshufhw1(<8 x i16> %a) { -; CHECK-SSE2-LABEL: @combine_pshufhw1 -; CHECK-SSE2: # BB#0: -; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7] -; CHECK-SSE2-NEXT: retq +; SSE-LABEL: combine_pshufhw1: +; SSE: # BB#0: # %entry +; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufhw1: +; AVX: # BB#0: # %entry +; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] +; AVX-NEXT: retq +entry: %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) - %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27) - %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27) + %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27) + %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27) ret <8 x i16> %d } - -- 2.34.1