1 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE2
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
4 define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
6 ; SSE2: # BB#0: # %entry
7 ; SSE2-NEXT: movd %xmm1, %eax
8 ; SSE2-NEXT: movzwl %ax, %eax
9 ; SSE2-NEXT: movd %eax, %xmm1
10 ; SSE2-NEXT: psllw %xmm1, %xmm0
14 ; AVX: # BB#0: # %entry
15 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
16 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
17 ; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
20 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
21 %shl = shl <8 x i16> %A, %vecinit14
25 define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
27 ; SSE2: # BB#0: # %entry
28 ; SSE2-NEXT: xorps %xmm2, %xmm2
29 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
30 ; SSE2-NEXT: pslld %xmm2, %xmm0
34 ; AVX: # BB#0: # %entry
35 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
36 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
37 ; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
40 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
41 %shl = shl <4 x i32> %A, %vecinit6
45 define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
47 ; SSE2: # BB#0: # %entry
48 ; SSE2-NEXT: psllq %xmm1, %xmm0
52 ; AVX: # BB#0: # %entry
53 ; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
56 %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
57 %shl = shl <2 x i64> %A, %vecinit2
61 define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
63 ; SSE2: # BB#0: # %entry
64 ; SSE2-NEXT: movd %xmm1, %eax
65 ; SSE2-NEXT: movzwl %ax, %eax
66 ; SSE2-NEXT: movd %eax, %xmm1
67 ; SSE2-NEXT: psrlw %xmm1, %xmm0
71 ; AVX: # BB#0: # %entry
72 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
73 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
74 ; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
77 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
78 %shr = lshr <8 x i16> %A, %vecinit14
82 define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
84 ; SSE2: # BB#0: # %entry
85 ; SSE2-NEXT: xorps %xmm2, %xmm2
86 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
87 ; SSE2-NEXT: psrld %xmm2, %xmm0
91 ; AVX: # BB#0: # %entry
92 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
93 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
94 ; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
97 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
98 %shr = lshr <4 x i32> %A, %vecinit6
102 define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
104 ; SSE2: # BB#0: # %entry
105 ; SSE2-NEXT: psrlq %xmm1, %xmm0
109 ; AVX: # BB#0: # %entry
110 ; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
113 %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
114 %shr = lshr <2 x i64> %A, %vecinit2
118 define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
120 ; SSE2: # BB#0: # %entry
121 ; SSE2-NEXT: movd %xmm1, %eax
122 ; SSE2-NEXT: movzwl %ax, %eax
123 ; SSE2-NEXT: movd %eax, %xmm1
124 ; SSE2-NEXT: psraw %xmm1, %xmm0
128 ; AVX: # BB#0: # %entry
129 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
130 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
131 ; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
134 %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
135 %shr = ashr <8 x i16> %A, %vecinit14
139 define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
141 ; SSE2: # BB#0: # %entry
142 ; SSE2-NEXT: xorps %xmm2, %xmm2
143 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
144 ; SSE2-NEXT: psrad %xmm2, %xmm0
148 ; AVX: # BB#0: # %entry
149 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
150 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
151 ; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
154 %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
155 %shr = ashr <4 x i32> %A, %vecinit6