From 1a5f7f54f4d817af3f38a1a07cc89c108eb2c295 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Sun, 21 Sep 2014 12:49:46 +0000 Subject: [PATCH] [x86] Teach the new vector shuffle lowering the basics about insertion of a single element into a zero vector for v4f64 and v4i64 in AVX. Ironically, there is less to see here because xor+blend is so crazy fast that we can't really beat that to zero the high 128-bit lane. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218214 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 18 +++++++++ test/CodeGen/X86/vector-shuffle-256-v4.ll | 49 +++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e0cf603fb41..800f9d4bea7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -9243,6 +9243,15 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, if (isShuffleEquivalent(Mask, 5, 1, 7, 3)) return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1); + // If we have a single input to the zero element, insert that into V1 if we + // can do so cheaply. + int NumV2Elements = + std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }); + if (NumV2Elements == 1 && Mask[0] >= 4) + if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( + MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG)) + return Insertion; + if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask, DAG)) return Blend; @@ -9306,6 +9315,15 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, if (is128BitLaneCrossingShuffleMask(MVT::v4i64, Mask)) return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG); + // If we have a single input to the zero element, insert that into V1 if we + // can do so cheaply. + int NumV2Elements = + std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }); + if (NumV2Elements == 1 && Mask[0] >= 4) + if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( + MVT::v4i64, DL, V1, V2, Mask, Subtarget, DAG)) + return Insertion; + // AVX1 doesn't provide any facilities for v4i64 shuffles, bitcast and // delegate to floating point code. V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V1); diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll index 56bbce61ae6..c5f49d7457e 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -563,3 +563,52 @@ define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) { ret <4 x i64> %f } + +define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) { +; ALL-LABEL: @insert_reg_and_zero_v4i64 +; ALL: # BB#0: +; ALL-NEXT: vmovq %rdi, %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %v = insertelement <4 x i64> undef, i64 %a, i64 0 + %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) { +; ALL-LABEL: @insert_mem_and_zero_v4i64 +; ALL: # BB#0: +; ALL-NEXT: vmovq (%rdi), %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %a = load i64* %ptr + %v = insertelement <4 x i64> undef, i64 %a, i64 0 + %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x double> @insert_reg_and_zero_v4f64(double %a) { +; ALL-LABEL: @insert_reg_and_zero_v4f64 +; ALL: # BB#0: +; ALL: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %v = insertelement <4 x double> undef, double %a, i32 0 + %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> + ret <4 x double> %shuffle +} + +define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) { +; ALL-LABEL: @insert_mem_and_zero_v4f64 +; ALL: # BB#0: +; ALL-NEXT: vmovsd (%rdi), %xmm0 +; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3] +; ALL-NEXT: retq + %a = load double* %ptr + %v = insertelement <4 x double> undef, double %a, i32 0 + %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> + ret <4 x double> %shuffle +} -- 2.34.1