1 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
8 define float @tst1(float %a, float %b) {
10 ; X32: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
11 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
12 ; X32-NEXT: movss %xmm1, 4(%esp)
13 ; X32-NEXT: movss %xmm0, (%esp)
14 ; X32-NEXT: calll copysignf
15 ; X32-NEXT: addl $8, %esp
19 ; X64: movaps %xmm0, %xmm2
20 ; X64-NEXT: movaps %xmm1, %xmm0
21 ; X64-NEXT: movaps %xmm2, %xmm1
22 ; X64-NEXT: jmp copysignf
23 %tmp = tail call float @copysignf( float %b, float %a )
27 define double @tst2(double %a, float %b, float %c) {
29 ; X32: movsd {{.*#+}} xmm0 = mem[0],zero
30 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
31 ; X32-NEXT: addss 32(%esp), %xmm1
32 ; X32-NEXT: cvtss2sd %xmm1, %xmm1
33 ; X32-NEXT: movsd %xmm0, (%esp)
34 ; X32-NEXT: movsd %xmm1, 8(%esp)
35 ; X32-NEXT: calll copysign
36 ; X32-NEXT: addl $16, %esp
40 ; X64: addss %xmm2, %xmm1
41 ; X64-NEXT: cvtss2sd %xmm1, %xmm1
42 ; X64-NEXT: jmp copysign
43 %tmp1 = fadd float %b, %c
44 %tmp2 = fpext float %tmp1 to double
45 %tmp = tail call double @copysign( double %a, double %tmp2 )
49 declare float @copysignf(float, float)
50 declare double @copysign(double, double)
56 define float @int1(float %a, float %b) {
58 ; X32: movss 12(%esp), %xmm0 {{.*#+}} xmm0 = mem[0],zero,zero,zero
59 ; X32-NEXT: movss 8(%esp), %xmm1 {{.*#+}} xmm1 = mem[0],zero,zero,zero
60 ; X32-NEXT: andps .LCPI2_0, %xmm1
61 ; X32-NEXT: andps .LCPI2_1, %xmm0
62 ; X32-NEXT: orps %xmm1, %xmm0
63 ; X32-NEXT: movss %xmm0, (%esp)
64 ; X32-NEXT: flds (%esp)
69 ; X64: andps .LCPI2_0(%rip), %xmm0
70 ; X64-NEXT: andps .LCPI2_1(%rip), %xmm1
71 ; X64-NEXT: orps %xmm1, %xmm0
73 %tmp = tail call float @llvm.copysign.f32( float %b, float %a )
77 define double @int2(double %a, float %b, float %c) {
79 ; X32: movsd 8(%ebp), %xmm0 {{.*#+}} xmm0 = mem[0],zero
80 ; X32-NEXT: movss 16(%ebp), %xmm1 {{.*#+}} xmm1 = mem[0],zero,zero,zero
81 ; X32-NEXT: addss 20(%ebp), %xmm1
82 ; X32-NEXT: andpd .LCPI3_0, %xmm0
83 ; X32-NEXT: cvtss2sd %xmm1, %xmm1
84 ; X32-NEXT: andpd .LCPI3_1, %xmm1
85 ; X32-NEXT: orpd %xmm0, %xmm1
86 ; X32-NEXT: movsd %xmm1, (%esp)
87 ; X32-NEXT: fldl (%esp)
88 ; X32-NEXT: movl %ebp, %esp
93 ; X64: addss %xmm2, %xmm1
94 ; X64-NEXT: andpd .LCPI3_0(%rip), %xmm0
95 ; X64-NEXT: cvtss2sd %xmm1, %xmm1
96 ; X64-NEXT: andpd .LCPI3_1(%rip), %xmm1
97 ; X64-NEXT: orpd %xmm1, %xmm0
99 %tmp1 = fadd float %b, %c
100 %tmp2 = fpext float %tmp1 to double
101 %tmp = tail call double @llvm.copysign.f64( double %a, double %tmp2 )
105 define float @cst1() {
112 ; X64: movss .LCPI4_0(%rip), %xmm0 {{.*#+}} xmm0 = mem[0],zero,zero,zero
114 %tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
118 define double @cst2() {
125 ; X64: movsd .LCPI5_0(%rip), %xmm0 {{.*#+}} xmm0 = mem[0],zero
127 %tmp1 = fadd float -1.0, -1.0
128 %tmp2 = fpext float %tmp1 to double
129 %tmp = tail call double @llvm.copysign.f64( double 0.0, double %tmp2 )
133 declare float @llvm.copysign.f32(float %Mag, float %Sgn)
134 declare double @llvm.copysign.f64(double %Mag, double %Sgn)