From: Chandler Carruth Date: Fri, 3 Oct 2014 01:37:53 +0000 (+0000) Subject: [x86] Merge two very similar tests and regenerate FileCheck lines for X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=9a7125399b8de8da77487d5699620e5048f98052 [x86] Merge two very similar tests and regenerate FileCheck lines for them. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218945 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/sse-scalar-fp-arith-2.ll b/test/CodeGen/X86/sse-scalar-fp-arith-2.ll deleted file mode 100644 index 600ee1b7b1e..00000000000 --- a/test/CodeGen/X86/sse-scalar-fp-arith-2.ll +++ /dev/null @@ -1,423 +0,0 @@ -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck -check-prefix=CHECK -check-prefix=AVX %s - -; Ensure that the backend selects SSE/AVX scalar fp instructions -; from a packed fp instrution plus a vector insert. - - -define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { - %1 = fadd <4 x float> %a, %b - %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test_add_ss -; SSE2: addss %xmm1, %xmm0 -; AVX: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { - %1 = fsub <4 x float> %a, %b - %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test_sub_ss -; SSE2: subss %xmm1, %xmm0 -; AVX: vsubss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { - %1 = fmul <4 x float> %a, %b - %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test_mul_ss -; SSE2: mulss %xmm1, %xmm0 -; AVX: vmulss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { - %1 = fdiv <4 x float> %a, %b - %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test_div_ss -; SSE2: divss %xmm1, %xmm0 -; AVX: vdivss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { - %1 = fadd <2 x double> %a, %b - %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test_add_sd -; SSE2: addsd %xmm1, %xmm0 -; AVX: vaddsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { - %1 = fsub <2 x double> %a, %b - %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test_sub_sd -; SSE2: subsd %xmm1, %xmm0 -; AVX: vsubsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { - %1 = fmul <2 x double> %a, %b - %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test_mul_sd -; SSE2: mulsd %xmm1, %xmm0 -; AVX: vmulsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { - %1 = fdiv <2 x double> %a, %b - %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test_div_sd -; SSE2: divsd %xmm1, %xmm0 -; AVX: vdivsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) { - %1 = fadd <4 x float> %b, %a - %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test2_add_ss -; SSE2: addss %xmm0, %xmm1 -; AVX: vaddss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) { - %1 = fsub <4 x float> %b, %a - %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test2_sub_ss -; SSE2: subss %xmm0, %xmm1 -; AVX: vsubss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) { - %1 = fmul <4 x float> %b, %a - %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test2_mul_ss -; SSE2: mulss %xmm0, %xmm1 -; AVX: vmulss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) { - %1 = fdiv <4 x float> %b, %a - %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> - ret <4 x float> %2 -} - -; CHECK-LABEL: test2_div_ss -; SSE2: divss %xmm0, %xmm1 -; AVX: vdivss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) { - %1 = fadd <2 x double> %b, %a - %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test2_add_sd -; SSE2: addsd %xmm0, %xmm1 -; AVX: vaddsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) { - %1 = fsub <2 x double> %b, %a - %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test2_sub_sd -; SSE2: subsd %xmm0, %xmm1 -; AVX: vsubsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) { - %1 = fmul <2 x double> %b, %a - %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test2_mul_sd -; SSE2: mulsd %xmm0, %xmm1 -; AVX: vmulsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) { - %1 = fdiv <2 x double> %b, %a - %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> - ret <2 x double> %2 -} - -; CHECK-LABEL: test2_div_sd -; SSE2: divsd %xmm0, %xmm1 -; AVX: vdivsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <4 x float> @test3_add_ss(<4 x float> %a, <4 x float> %b) { - %1 = fadd <4 x float> %a, %b - %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test3_add_ss -; SSE2: addss %xmm1, %xmm0 -; AVX: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test3_sub_ss(<4 x float> %a, <4 x float> %b) { - %1 = fsub <4 x float> %a, %b - %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test3_sub_ss -; SSE2: subss %xmm1, %xmm0 -; AVX: vsubss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test3_mul_ss(<4 x float> %a, <4 x float> %b) { - %1 = fmul <4 x float> %a, %b - %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test3_mul_ss -; SSE2: mulss %xmm1, %xmm0 -; AVX: vmulss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test3_div_ss(<4 x float> %a, <4 x float> %b) { - %1 = fdiv <4 x float> %a, %b - %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test3_div_ss -; SSE2: divss %xmm1, %xmm0 -; AVX: vdivss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <2 x double> @test3_add_sd(<2 x double> %a, <2 x double> %b) { - %1 = fadd <2 x double> %a, %b - %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test3_add_sd -; SSE2: addsd %xmm1, %xmm0 -; AVX: vaddsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test3_sub_sd(<2 x double> %a, <2 x double> %b) { - %1 = fsub <2 x double> %a, %b - %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test3_sub_sd -; SSE2: subsd %xmm1, %xmm0 -; AVX: vsubsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test3_mul_sd(<2 x double> %a, <2 x double> %b) { - %1 = fmul <2 x double> %a, %b - %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test3_mul_sd -; SSE2: mulsd %xmm1, %xmm0 -; AVX: vmulsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test3_div_sd(<2 x double> %a, <2 x double> %b) { - %1 = fdiv <2 x double> %a, %b - %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test3_div_sd -; SSE2: divsd %xmm1, %xmm0 -; AVX: vdivsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <4 x float> @test4_add_ss(<4 x float> %a, <4 x float> %b) { - %1 = fadd <4 x float> %b, %a - %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test4_add_ss -; SSE2: addss %xmm0, %xmm1 -; AVX: vaddss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test4_sub_ss(<4 x float> %a, <4 x float> %b) { - %1 = fsub <4 x float> %b, %a - %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test4_sub_ss -; SSE2: subss %xmm0, %xmm1 -; AVX: vsubss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test4_mul_ss(<4 x float> %a, <4 x float> %b) { - %1 = fmul <4 x float> %b, %a - %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test4_mul_ss -; SSE2: mulss %xmm0, %xmm1 -; AVX: vmulss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <4 x float> @test4_div_ss(<4 x float> %a, <4 x float> %b) { - %1 = fdiv <4 x float> %b, %a - %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 - ret <4 x float> %2 -} - -; CHECK-LABEL: test4_div_ss -; SSE2: divss %xmm0, %xmm1 -; AVX: vdivss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - -define <2 x double> @test4_add_sd(<2 x double> %a, <2 x double> %b) { - %1 = fadd <2 x double> %b, %a - %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test4_add_sd -; SSE2: addsd %xmm0, %xmm1 -; AVX: vaddsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test4_sub_sd(<2 x double> %a, <2 x double> %b) { - %1 = fsub <2 x double> %b, %a - %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test4_sub_sd -; SSE2: subsd %xmm0, %xmm1 -; AVX: vsubsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test4_mul_sd(<2 x double> %a, <2 x double> %b) { - %1 = fmul <2 x double> %b, %a - %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test4_mul_sd -; SSE2: mulsd %xmm0, %xmm1 -; AVX: vmulsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - -define <2 x double> @test4_div_sd(<2 x double> %a, <2 x double> %b) { - %1 = fdiv <2 x double> %b, %a - %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 - ret <2 x double> %2 -} - -; CHECK-LABEL: test4_div_sd -; SSE2: divsd %xmm0, %xmm1 -; AVX: vdivsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll index 3949a835e67..e6a6d99a199 100644 --- a/test/CodeGen/X86/sse-scalar-fp-arith.ll +++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll @@ -1,13 +1,21 @@ -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck -check-prefix=CHECK -check-prefix=AVX %s +; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE2 %s +; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE41 %s +; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck --check-prefix=AVX %s ; Ensure that the backend no longer emits unnecessary vector insert ; instructions immediately after SSE scalar fp instructions ; like addss or mulss. - define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %add = fadd float %2, %1 @@ -15,14 +23,16 @@ define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_add_ss -; SSE2: addss %xmm1, %xmm0 -; AVX: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %sub = fsub float %2, %1 @@ -30,13 +40,16 @@ define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_sub_ss -; SSE2: subss %xmm1, %xmm0 -; AVX: vsubss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %mul = fmul float %2, %1 @@ -44,14 +57,16 @@ define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_mul_ss -; SSE2: mulss %xmm1, %xmm0 -; AVX: vmulss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %div = fdiv float %2, %1 @@ -59,14 +74,16 @@ define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_div_ss -; SSE2: divss %xmm1, %xmm0 -; AVX: vdivss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %add = fadd double %2, %1 @@ -74,14 +91,16 @@ define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_add_sd -; SSE2: addsd %xmm1, %xmm0 -; AVX: vaddsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %sub = fsub double %2, %1 @@ -89,14 +108,16 @@ define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_sub_sd -; SSE2: subsd %xmm1, %xmm0 -; AVX: vsubsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %mul = fmul double %2, %1 @@ -104,14 +125,16 @@ define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_mul_sd -; SSE2: mulsd %xmm1, %xmm0 -; AVX: vmulsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %div = fdiv double %2, %1 @@ -119,14 +142,17 @@ define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_div_sd -; SSE2: divsd %xmm1, %xmm0 -; AVX: vdivsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %add = fadd float %1, %2 @@ -134,14 +160,17 @@ define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_add_ss -; SSE2: addss %xmm0, %xmm1 -; AVX: vaddss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %sub = fsub float %2, %1 @@ -149,14 +178,17 @@ define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_sub_ss -; SSE2: subss %xmm0, %xmm1 -; AVX: vsubss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %mul = fmul float %1, %2 @@ -164,14 +196,17 @@ define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_mul_ss -; SSE2: mulss %xmm0, %xmm1 -; AVX: vmulss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %div = fdiv float %2, %1 @@ -179,14 +214,17 @@ define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_div_ss -; SSE2: divss %xmm0, %xmm1 -; AVX: vdivss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %add = fadd double %1, %2 @@ -194,14 +232,17 @@ define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_add_sd -; SSE2: addsd %xmm0, %xmm1 -; AVX: vaddsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %sub = fsub double %2, %1 @@ -209,14 +250,17 @@ define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_sub_sd -; SSE2: subsd %xmm0, %xmm1 -; AVX: vsubsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %mul = fmul double %1, %2 @@ -224,14 +268,17 @@ define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_mul_sd -; SSE2: mulsd %xmm0, %xmm1 -; AVX: vmulsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %div = fdiv double %2, %1 @@ -239,14 +286,18 @@ define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_div_sd -; SSE2: divsd %xmm0, %xmm1 -; AVX: vdivsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %add = fadd float %2, %1 @@ -255,14 +306,19 @@ define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_add_ss -; CHECK: addss -; CHECK: addss -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: subss %xmm1, %xmm2 +; SSE-NEXT: subss %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %sub = fsub float %2, %1 @@ -271,14 +327,18 @@ define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_sub_ss -; CHECK: subss -; CHECK: subss -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %mul = fmul float %2, %1 @@ -287,13 +347,19 @@ define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_mul_ss -; CHECK: mulss -; CHECK: mulss -; CHECK-NOT: movss -; CHECK: ret - define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_div_ss: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: divss %xmm1, %xmm2 +; SSE-NEXT: divss %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %div = fdiv float %2, %1 @@ -302,9 +368,501 @@ define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_div_ss -; CHECK: divss -; CHECK: divss -; CHECK-NOT: movss -; CHECK: ret +; Ensure that the backend selects SSE/AVX scalar fp instructions +; from a packed fp instrution plus a vector insert. + +define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> + ret <4 x float> %2 +} + +define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> + ret <2 x double> %2 +} + +define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> + ret <2 x double> %2 +} + +define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> + ret <2 x double> %2 +} +define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> + ret <2 x double> %2 +} + +define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> + ret <4 x float> %2 +} + +define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> + ret <2 x double> %2 +} + +define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %a, %b + %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %a, %b + %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %a, %b + %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %a, %b + %2 = select <4 x i1> , <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %a, %b + %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %a, %b + %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %a, %b + %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %a, %b + %2 = select <2 x i1> , <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %b, %a + %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %b, %a + %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %b, %a + %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %b, %a + %2 = select <4 x i1> , <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %b, %a + %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %b, %a + %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %b, %a + %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %b, %a + %2 = select <2 x i1> , <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +}