X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FX86%2Fvector-idiv.ll;h=2e482a0f1430447cc44d5c7f203e88bdd3d6b589;hb=4da23583b63eb5014454fded95cab1fe651e800e;hp=a98ed322136a261e462f122e23571dbab71a3238;hpb=aa35e012a3e454d654442d89a8f49ec1a7193403;p=oota-llvm.git diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll index a98ed322136..2e482a0f143 100644 --- a/test/CodeGen/X86/vector-idiv.ll +++ b/test/CodeGen/X86/vector-idiv.ll @@ -4,20 +4,19 @@ target triple = "x86_64-unknown-unknown" -define <4 x i32> @test1(<4 x i32> %a) { +define <4 x i32> @test1(<4 x i32> %a) #0 { ; SSE41-LABEL: test1: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm1, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE41-NEXT: psubd %xmm2, %xmm0 +; SSE41-NEXT: pmuludq %xmm2, %xmm3 +; SSE41-NEXT: pmuludq %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] +; SSE41-NEXT: psubd %xmm1, %xmm0 ; SSE41-NEXT: psrld $1, %xmm0 -; SSE41-NEXT: paddd %xmm2, %xmm0 +; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: psrld $2, %xmm0 ; SSE41-NEXT: retq ; @@ -26,11 +25,12 @@ define <4 x i32> @test1(<4 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE-NEXT: psubd %xmm2, %xmm0 ; SSE-NEXT: psrld $1, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 @@ -40,12 +40,12 @@ define <4 x i32> @test1(<4 x i32> %a) { ; AVX-LABEL: test1: ; AVX: # BB#0: ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $1, %xmm0, %xmm0 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 @@ -55,26 +55,26 @@ define <4 x i32> @test1(<4 x i32> %a) { ret <4 x i32> %div } -define <8 x i32> @test2(<8 x i32> %a) { +define <8 x i32> @test2(<8 x i32> %a) #0 { ; SSE41-LABEL: test2: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuludq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] -; SSE41-NEXT: psubd %xmm3, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: pmuludq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: psubd %xmm5, %xmm0 ; SSE41-NEXT: psrld $1, %xmm0 -; SSE41-NEXT: paddd %xmm3, %xmm0 +; SSE41-NEXT: paddd %xmm5, %xmm0 ; SSE41-NEXT: psrld $2, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 ; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] ; SSE41-NEXT: psubd %xmm2, %xmm1 ; SSE41-NEXT: psrld $1, %xmm1 ; SSE41-NEXT: paddd %xmm2, %xmm1 @@ -86,20 +86,22 @@ define <8 x i32> @test2(<8 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] ; SSE-NEXT: psubd %xmm3, %xmm0 ; SSE-NEXT: psrld $1, %xmm0 ; SSE-NEXT: paddd %xmm3, %xmm0 ; SSE-NEXT: psrld $2, %xmm0 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: psrld $1, %xmm1 ; SSE-NEXT: paddd %xmm2, %xmm1 @@ -124,7 +126,7 @@ define <8 x i32> @test2(<8 x i32> %a) { ret <8 x i32> %div } -define <8 x i16> @test3(<8 x i16> %a) { +define <8 x i16> @test3(<8 x i16> %a) #0 { ; SSE41-LABEL: test3: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363] @@ -157,7 +159,7 @@ define <8 x i16> @test3(<8 x i16> %a) { ret <8 x i16> %div } -define <16 x i16> @test4(<16 x i16> %a) { +define <16 x i16> @test4(<16 x i16> %a) #0 { ; SSE41-LABEL: test4: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363] @@ -202,7 +204,7 @@ define <16 x i16> @test4(<16 x i16> %a) { ret <16 x i16> %div } -define <8 x i16> @test5(<8 x i16> %a) { +define <8 x i16> @test5(<8 x i16> %a) #0 { ; SSE41-LABEL: test5: ; SSE41: # BB#0: ; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0 @@ -232,7 +234,7 @@ define <8 x i16> @test5(<8 x i16> %a) { ret <8 x i16> %div } -define <16 x i16> @test6(<16 x i16> %a) { +define <16 x i16> @test6(<16 x i16> %a) #0 { ; SSE41-LABEL: test6: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725] @@ -274,7 +276,7 @@ define <16 x i16> @test6(<16 x i16> %a) { ret <16 x i16> %div } -define <16 x i8> @test7(<16 x i8> %a) { +define <16 x i8> @test7(<16 x i8> %a) #0 { ; SSE41-LABEL: test7: ; SSE41: # BB#0: ; SSE41-NEXT: pextrb $1, %xmm0, %eax @@ -458,6 +460,9 @@ define <16 x i8> @test7(<16 x i8> %a) { ; ; SSE-LABEL: test7: ; SSE: # BB#0: +; SSE-NEXT: pushq %rbp +; SSE-NEXT: pushq %r14 +; SSE-NEXT: pushq %rbx ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: imull $-109, %eax, %ecx @@ -469,156 +474,156 @@ define <16 x i8> @test7(<16 x i8> %a) { ; SSE-NEXT: addb %al, %cl ; SSE-NEXT: movzbl %cl, %eax ; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax -; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r11d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi +; SSE-NEXT: imull $-109, %esi, %edi +; SSE-NEXT: shrl $8, %edi +; SSE-NEXT: addb %sil, %dil +; SSE-NEXT: movb %dil, %bl +; SSE-NEXT: shrb $7, %bl +; SSE-NEXT: sarb $2, %dil +; SSE-NEXT: addb %bl, %dil +; SSE-NEXT: movzbl %dil, %esi +; SSE-NEXT: movd %esi, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: imull $-109, %eax, %esi +; SSE-NEXT: shrl $8, %esi +; SSE-NEXT: addb %al, %sil +; SSE-NEXT: movb %sil, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %sil +; SSE-NEXT: addb %al, %sil +; SSE-NEXT: movzbl %sil, %eax ; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi +; SSE-NEXT: imull $-109, %edi, %ebx +; SSE-NEXT: shrl $8, %ebx +; SSE-NEXT: addb %dil, %bl +; SSE-NEXT: movb %bl, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %bl +; SSE-NEXT: addb %al, %bl +; SSE-NEXT: movzbl %bl, %eax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %edx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm1 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %esi, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %sil, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ecx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 +; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al +; SSE-NEXT: imull $-109, %eax, %edx +; SSE-NEXT: shrl $8, %edx +; SSE-NEXT: addb %al, %dl +; SSE-NEXT: movb %dl, %al ; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: sarb $2, %dl +; SSE-NEXT: addb %al, %dl +; SSE-NEXT: movzbl %dl, %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r14d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r14b, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ebp, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %bpl, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r11d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r11b, %al +; SSE-NEXT: movb %al, %dl +; SSE-NEXT: shrb $7, %dl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %dl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %ecx, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r9d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r9b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r10d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r10b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: imull $-109, %eax, %ecx -; SSE-NEXT: shrl $8, %ecx -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movb %cl, %al -; SSE-NEXT: shrb $7, %al -; SSE-NEXT: sarb $2, %cl -; SSE-NEXT: addb %al, %cl -; SSE-NEXT: movzbl %cl, %eax +; SSE-NEXT: imull $-109, %r8d, %eax +; SSE-NEXT: shrl $8, %eax +; SSE-NEXT: addb %r8b, %al +; SSE-NEXT: movb %al, %cl +; SSE-NEXT: shrb $7, %cl +; SSE-NEXT: sarb $2, %al +; SSE-NEXT: addb %cl, %al +; SSE-NEXT: movzbl %al, %eax ; SSE-NEXT: movd %eax, %xmm4 ; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: imull $-109, %eax, %ecx @@ -634,6 +639,9 @@ define <16 x i8> @test7(<16 x i8> %a) { ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE-NEXT: popq %rbx +; SSE-NEXT: popq %r14 +; SSE-NEXT: popq %rbp ; SSE-NEXT: retq ; ; AVX-LABEL: test7: @@ -819,17 +827,16 @@ define <16 x i8> @test7(<16 x i8> %a) { ret <16 x i8> %div } -define <4 x i32> @test8(<4 x i32> %a) { +define <4 x i32> @test8(<4 x i32> %a) #0 { ; SSE41-LABEL: test8: ; SSE41: # BB#0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE41-NEXT: pmuldq %xmm2, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; SSE41-NEXT: pmuldq %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; SSE41-NEXT: paddd %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: psrld $31, %xmm0 @@ -840,21 +847,22 @@ define <4 x i32> @test8(<4 x i32> %a) { ; ; SSE-LABEL: test8: ; SSE: # BB#0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] +; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: psrad $31, %xmm2 -; SSE-NEXT: pand %xmm0, %xmm2 -; SSE-NEXT: paddd %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE-NEXT: pmuludq %xmm4, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm1 +; SSE-NEXT: pand %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE-NEXT: pmuludq %xmm4, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: paddd %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: psrld $31, %xmm0 @@ -866,12 +874,12 @@ define <4 x i32> @test8(<4 x i32> %a) { ; AVX-LABEL: test8: ; AVX: # BB#0: ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vpmuldq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3] +; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm1 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0 @@ -881,78 +889,80 @@ define <4 x i32> @test8(<4 x i32> %a) { ret <4 x i32> %div } -define <8 x i32> @test9(<8 x i32> %a) { +define <8 x i32> @test9(<8 x i32> %a) #0 { ; SSE41-LABEL: test9: ; SSE41: # BB#0: -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: # kill: XMM0 XMM3 -; SSE41-NEXT: pmuldq %xmm1, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE41-NEXT: pmuldq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE41-NEXT: paddd %xmm3, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psrld $31, %xmm3 -; SSE41-NEXT: psrad $2, %xmm0 -; SSE41-NEXT: paddd %xmm3, %xmm0 -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE41-NEXT: paddd %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psrld $31, %xmm2 -; SSE41-NEXT: psrad $2, %xmm1 -; SSE41-NEXT: paddd %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm3, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] +; SSE41-NEXT: paddd %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: psrad $2, %xmm2 +; SSE41-NEXT: paddd %xmm0, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm0 +; SSE41-NEXT: pmuldq %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE41-NEXT: paddd %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: psrad $2, %xmm3 +; SSE41-NEXT: paddd %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; SSE-LABEL: test9: ; SSE: # BB#0: -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] -; SSE-NEXT: movdqa %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027] +; SSE-NEXT: movdqa %xmm3, %xmm4 ; SSE-NEXT: psrad $31, %xmm4 ; SSE-NEXT: movdqa %xmm4, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm3, %xmm5 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: psrad $31, %xmm5 -; SSE-NEXT: pand %xmm1, %xmm5 +; SSE-NEXT: pand %xmm3, %xmm5 ; SSE-NEXT: paddd %xmm0, %xmm5 -; SSE-NEXT: movdqa %xmm3, %xmm0 -; SSE-NEXT: pmuludq %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3] -; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3] +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3] +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3] ; SSE-NEXT: pmuludq %xmm6, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm7[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] ; SSE-NEXT: psubd %xmm5, %xmm0 -; SSE-NEXT: paddd %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: psrld $31, %xmm3 -; SSE-NEXT: psrad $2, %xmm0 -; SSE-NEXT: paddd %xmm3, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm4 -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: pand %xmm1, %xmm3 -; SSE-NEXT: paddd %xmm4, %xmm3 -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE-NEXT: pmuludq %xmm6, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm4[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm2, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: psrld $31, %xmm2 -; SSE-NEXT: psrad $2, %xmm1 -; SSE-NEXT: paddd %xmm2, %xmm1 +; SSE-NEXT: psrad $2, %xmm0 +; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: movdqa %xmm1, %xmm5 +; SSE-NEXT: psrad $31, %xmm5 +; SSE-NEXT: pand %xmm3, %xmm5 +; SSE-NEXT: paddd %xmm4, %xmm5 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSE-NEXT: pmuludq %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: psubd %xmm5, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psrld $31, %xmm1 +; SSE-NEXT: psrad $2, %xmm2 +; SSE-NEXT: paddd %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: test9: @@ -973,76 +983,80 @@ define <8 x i32> @test9(<8 x i32> %a) { ret <8 x i32> %div } -define <8 x i32> @test10(<8 x i32> %a) { +define <8 x i32> @test10(<8 x i32> %a) #0 { ; SSE41-LABEL: test10: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuludq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: psubd %xmm3, %xmm5 -; SSE41-NEXT: psrld $1, %xmm5 -; SSE41-NEXT: paddd %xmm3, %xmm5 -; SSE41-NEXT: psrld $2, %xmm5 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7] -; SSE41-NEXT: pmulld %xmm3, %xmm5 -; SSE41-NEXT: psubd %xmm5, %xmm0 -; SSE41-NEXT: pmuludq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuludq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: psubd %xmm2, %xmm4 +; SSE41-NEXT: pmuludq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: psubd %xmm5, %xmm4 ; SSE41-NEXT: psrld $1, %xmm4 -; SSE41-NEXT: paddd %xmm2, %xmm4 +; SSE41-NEXT: paddd %xmm5, %xmm4 ; SSE41-NEXT: psrld $2, %xmm4 -; SSE41-NEXT: pmulld %xmm3, %xmm4 -; SSE41-NEXT: psubd %xmm4, %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7] +; SSE41-NEXT: pmulld %xmm5, %xmm4 +; SSE41-NEXT: psubd %xmm4, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm3, %xmm4 +; SSE41-NEXT: pmuludq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: psubd %xmm2, %xmm3 +; SSE41-NEXT: psrld $1, %xmm3 +; SSE41-NEXT: paddd %xmm2, %xmm3 +; SSE41-NEXT: psrld $2, %xmm3 +; SSE41-NEXT: pmulld %xmm5, %xmm3 +; SSE41-NEXT: psubd %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; SSE-LABEL: test10: ; SSE: # BB#0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pmuludq %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [613566757,613566757,613566757,613566757] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pmuludq %xmm3, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] ; SSE-NEXT: movdqa %xmm0, %xmm5 -; SSE-NEXT: psubd %xmm3, %xmm5 +; SSE-NEXT: psubd %xmm2, %xmm5 ; SSE-NEXT: psrld $1, %xmm5 -; SSE-NEXT: paddd %xmm3, %xmm5 +; SSE-NEXT: paddd %xmm2, %xmm5 ; SSE-NEXT: psrld $2, %xmm5 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3] -; SSE-NEXT: pmuludq %xmm3, %xmm5 -; SSE-NEXT: pmuludq %xmm3, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,1,3] +; SSE-NEXT: pmuludq %xmm2, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSE-NEXT: pmuludq %xmm2, %xmm6 +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] ; SSE-NEXT: psubd %xmm5, %xmm0 -; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] ; SSE-NEXT: movdqa %xmm1, %xmm4 -; SSE-NEXT: psubd %xmm2, %xmm4 +; SSE-NEXT: psubd %xmm3, %xmm4 ; SSE-NEXT: psrld $1, %xmm4 -; SSE-NEXT: paddd %xmm2, %xmm4 +; SSE-NEXT: paddd %xmm3, %xmm4 ; SSE-NEXT: psrld $2, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3] -; SSE-NEXT: pmuludq %xmm3, %xmm4 -; SSE-NEXT: pmuludq %xmm3, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSE-NEXT: pmuludq %xmm2, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE-NEXT: psubd %xmm4, %xmm1 ; SSE-NEXT: retq ; @@ -1067,36 +1081,36 @@ define <8 x i32> @test10(<8 x i32> %a) { ret <8 x i32> %rem } -define <8 x i32> @test11(<8 x i32> %a) { +define <8 x i32> @test11(<8 x i32> %a) #0 { ; SSE41-LABEL: test11: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmuldq %xmm2, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm5 -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3] -; SSE41-NEXT: paddd %xmm0, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm5 -; SSE41-NEXT: psrld $31, %xmm5 -; SSE41-NEXT: psrad $2, %xmm3 -; SSE41-NEXT: paddd %xmm5, %xmm3 -; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7] -; SSE41-NEXT: pmulld %xmm5, %xmm3 -; SSE41-NEXT: psubd %xmm3, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: pmuldq %xmm2, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; SSE41-NEXT: paddd %xmm0, %xmm5 +; SSE41-NEXT: movdqa %xmm5, %xmm4 +; SSE41-NEXT: psrld $31, %xmm4 +; SSE41-NEXT: psrad $2, %xmm5 +; SSE41-NEXT: paddd %xmm4, %xmm5 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7] +; SSE41-NEXT: pmulld %xmm4, %xmm5 +; SSE41-NEXT: psubd %xmm5, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm3, %xmm5 ; SSE41-NEXT: pmuldq %xmm1, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm4, %xmm3 -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] ; SSE41-NEXT: paddd %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: psrld $31, %xmm3 ; SSE41-NEXT: psrad $2, %xmm2 ; SSE41-NEXT: paddd %xmm3, %xmm2 -; SSE41-NEXT: pmulld %xmm5, %xmm2 +; SSE41-NEXT: pmulld %xmm4, %xmm2 ; SSE41-NEXT: psubd %xmm2, %xmm1 ; SSE41-NEXT: retq ; @@ -1111,13 +1125,14 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: psrad $31, %xmm6 ; SSE-NEXT: pand %xmm2, %xmm6 ; SSE-NEXT: paddd %xmm4, %xmm6 -; SSE-NEXT: movdqa %xmm0, %xmm7 -; SSE-NEXT: pmuludq %xmm2, %xmm7 +; SSE-NEXT: movdqa %xmm0, %xmm4 +; SSE-NEXT: pmuludq %xmm2, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm5, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm4[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] ; SSE-NEXT: psubd %xmm6, %xmm7 ; SSE-NEXT: paddd %xmm0, %xmm7 ; SSE-NEXT: movdqa %xmm7, %xmm4 @@ -1127,9 +1142,10 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm7 +; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3] ; SSE-NEXT: pmuludq %xmm4, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm6[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] ; SSE-NEXT: psubd %xmm7, %xmm0 ; SSE-NEXT: pand %xmm1, %xmm3 ; SSE-NEXT: movdqa %xmm1, %xmm6 @@ -1137,10 +1153,11 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: pand %xmm2, %xmm6 ; SSE-NEXT: paddd %xmm3, %xmm6 ; SSE-NEXT: pmuludq %xmm1, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE-NEXT: pmuludq %xmm5, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm6, %xmm2 ; SSE-NEXT: paddd %xmm1, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm3 @@ -1149,9 +1166,10 @@ define <8 x i32> @test11(<8 x i32> %a) { ; SSE-NEXT: paddd %xmm3, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; SSE-NEXT: pmuludq %xmm4, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: psubd %xmm2, %xmm1 ; SSE-NEXT: retq ; @@ -1176,7 +1194,7 @@ define <8 x i32> @test11(<8 x i32> %a) { ret <8 x i32> %rem } -define <2 x i16> @test12() { +define <2 x i16> @test12() #0 { ; SSE41-LABEL: test12: ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm0, %xmm0 @@ -1197,20 +1215,19 @@ define <2 x i16> @test12() { ret <2 x i16> %B9 } -define <4 x i32> @PR20355(<4 x i32> %a) { +define <4 x i32> @PR20355(<4 x i32> %a) #0 { ; SSE41-LABEL: PR20355: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766] -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 ; SSE41-NEXT: pmuldq %xmm1, %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE41-NEXT: pmuldq %xmm2, %xmm1 -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE41-NEXT: movaps %xmm0, %xmm1 -; SSE41-NEXT: psrld $31, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrld $31, %xmm0 +; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; SSE-LABEL: PR20355: @@ -1225,26 +1242,26 @@ define <4 x i32> @PR20355(<4 x i32> %a) { ; SSE-NEXT: paddd %xmm2, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE-NEXT: pmuludq %xmm1, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE-NEXT: pmuludq %xmm2, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] -; SSE-NEXT: psubd %xmm3, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $31, %xmm1 -; SSE-NEXT: paddd %xmm0, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE-NEXT: pmuludq %xmm2, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] +; SSE-NEXT: psubd %xmm3, %xmm4 +; SSE-NEXT: movdqa %xmm4, %xmm0 +; SSE-NEXT: psrld $31, %xmm0 +; SSE-NEXT: paddd %xmm4, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: PR20355: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,3],xmm0[1,3] -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX-NEXT: vpsrld $31, %xmm0, %xmm1 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -1252,3 +1269,5 @@ entry: %sdiv = sdiv <4 x i32> %a, ret <4 x i32> %sdiv } + +attributes #0 = { nounwind }