[X86][SSE] Added vector packing test for pr12412
authorSimon Pilgrim <llvm-dev@redking.me.uk>
Sun, 4 Jan 2015 19:08:03 +0000 (19:08 +0000)
committerSimon Pilgrim <llvm-dev@redking.me.uk>
Sun, 4 Jan 2015 19:08:03 +0000 (19:08 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225138 91177308-0d34-0410-b5e6-96231b3b80d8

test/CodeGen/X86/vector-shuffle-128-v16.ll

index 7c92a2636679b648472505b1c40ab1db1efb967b..131410606aa5631d8c3b5e0f2ee33b8be2161594 100644 (file)
@@ -1088,3 +1088,37 @@ entry:
   store <4 x i32> zeroinitializer, <4 x i32>* %ptr2, align 16
   ret void
 }
+
+define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
+; SSE2-LABEL: PR12412:
+; SSE2:       # BB#0: # %entry
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: PR12412:
+; SSSE3:      # BB#0: # %entry
+; SSSE3-NEXT:   pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14]
+; SSSE3-NEXT:   pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT:   por %xmm1, %xmm0
+; SSSE3-NEXT:   retq
+;
+; SSE41-LABEL: PR12412:
+; SSE41:      # BB#0: # %entry
+; SSE41-NEXT:   pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14]
+; SSE41-NEXT:   pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT:   por %xmm1, %xmm0
+; SSE41-NEXT:   retq
+;
+; AVX-LABEL: PR12412:
+; AVX:        # BB#0: # %entry
+; AVX-NEXT:     vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14]
+; AVX-NEXT:     vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:     vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:     retq
+entry:
+  %0 = shufflevector <16 x i8> %inval1, <16 x i8> %inval2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  ret <16 x i8> %0
+}