1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
3 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
5 ; Verify we fold loads into unary sse intrinsics only when optimizing for size
7 define float @rcpss(float* %a) {
10 ; SSE-NEXT: movss (%rdi), %xmm0
11 ; SSE-NEXT: rcpss %xmm0, %xmm0
16 ; AVX-NEXT: vmovss (%rdi), %xmm0
17 ; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
19 %ld = load float, float* %a
20 %ins = insertelement <4 x float> undef, float %ld, i32 0
21 %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
22 %ext = extractelement <4 x float> %res, i32 0
26 define float @rsqrtss(float* %a) {
29 ; SSE-NEXT: movss (%rdi), %xmm0
30 ; SSE-NEXT: rsqrtss %xmm0, %xmm0
35 ; AVX-NEXT: vmovss (%rdi), %xmm0
36 ; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
38 %ld = load float, float* %a
39 %ins = insertelement <4 x float> undef, float %ld, i32 0
40 %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
41 %ext = extractelement <4 x float> %res, i32 0
45 define float @sqrtss(float* %a) {
48 ; SSE-NEXT: movss (%rdi), %xmm0
49 ; SSE-NEXT: sqrtss %xmm0, %xmm0
54 ; AVX-NEXT: vmovss (%rdi), %xmm0
55 ; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
57 %ld = load float, float* %a
58 %ins = insertelement <4 x float> undef, float %ld, i32 0
59 %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
60 %ext = extractelement <4 x float> %res, i32 0
64 define double @sqrtsd(double* %a) {
67 ; SSE-NEXT: movsd (%rdi), %xmm0
68 ; SSE-NEXT: sqrtsd %xmm0, %xmm0
73 ; AVX-NEXT: vmovsd (%rdi), %xmm0
74 ; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
76 %ld = load double, double* %a
77 %ins = insertelement <2 x double> undef, double %ld, i32 0
78 %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
79 %ext = extractelement <2 x double> %res, i32 0
83 define float @rcpss_size(float* %a) optsize {
84 ; SSE-LABEL: rcpss_size:
86 ; SSE-NEXT: rcpss (%rdi), %xmm0
89 ; AVX-LABEL: rcpss_size:
91 ; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0
93 %ld = load float, float* %a
94 %ins = insertelement <4 x float> undef, float %ld, i32 0
95 %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
96 %ext = extractelement <4 x float> %res, i32 0
100 define float @rsqrtss_size(float* %a) optsize {
101 ; SSE-LABEL: rsqrtss_size:
103 ; SSE-NEXT: rsqrtss (%rdi), %xmm0
106 ; AVX-LABEL: rsqrtss_size:
108 ; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0
110 %ld = load float, float* %a
111 %ins = insertelement <4 x float> undef, float %ld, i32 0
112 %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
113 %ext = extractelement <4 x float> %res, i32 0
117 define float @sqrtss_size(float* %a) optsize{
118 ; SSE-LABEL: sqrtss_size:
120 ; SSE-NEXT: sqrtss (%rdi), %xmm0
123 ; AVX-LABEL: sqrtss_size:
125 ; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
127 %ld = load float, float* %a
128 %ins = insertelement <4 x float> undef, float %ld, i32 0
129 %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
130 %ext = extractelement <4 x float> %res, i32 0
134 define double @sqrtsd_size(double* %a) optsize {
135 ; SSE-LABEL: sqrtsd_size:
137 ; SSE-NEXT: sqrtsd (%rdi), %xmm0
140 ; AVX-LABEL: sqrtsd_size:
142 ; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
144 %ld = load double, double* %a
145 %ins = insertelement <2 x double> undef, double %ld, i32 0
146 %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
147 %ext = extractelement <2 x double> %res, i32 0
151 declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
152 declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
153 declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
154 declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone