1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
3 ;; Scalar Floating-point Convert
5 define float @test_vcvtxn(double %a) {
7 ; CHECK: fcvtxn {{s[0-9]}}, {{d[0-9]}}
9 %vcvtf.i = insertelement <1 x double> undef, double %a, i32 0
10 %vcvtf1.i = tail call <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double> %vcvtf.i)
11 %0 = extractelement <1 x float> %vcvtf1.i, i32 0
15 declare <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double>)
17 define i32 @test_vcvtass(float %a) {
19 ; CHECK: fcvtas {{s[0-9]}}, {{s[0-9]}}
21 %vcvtas.i = insertelement <1 x float> undef, float %a, i32 0
22 %vcvtas1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float> %vcvtas.i)
23 %0 = extractelement <1 x i32> %vcvtas1.i, i32 0
27 declare <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float>)
29 define i64 @test_test_vcvtasd(double %a) {
30 ; CHECK: test_test_vcvtasd
31 ; CHECK: fcvtas {{d[0-9]}}, {{d[0-9]}}
33 %vcvtas.i = insertelement <1 x double> undef, double %a, i32 0
34 %vcvtas1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %vcvtas.i)
35 %0 = extractelement <1 x i64> %vcvtas1.i, i32 0
39 declare <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double>)
41 define i32 @test_vcvtaus(float %a) {
43 ; CHECK: fcvtau {{s[0-9]}}, {{s[0-9]}}
45 %vcvtau.i = insertelement <1 x float> undef, float %a, i32 0
46 %vcvtau1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float> %vcvtau.i)
47 %0 = extractelement <1 x i32> %vcvtau1.i, i32 0
51 declare <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float>)
53 define i64 @test_vcvtaud(double %a) {
55 ; CHECK: fcvtau {{d[0-9]}}, {{d[0-9]}}
57 %vcvtau.i = insertelement <1 x double> undef, double %a, i32 0
58 %vcvtau1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %vcvtau.i)
59 %0 = extractelement <1 x i64> %vcvtau1.i, i32 0
63 declare <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double>)
65 define i32 @test_vcvtmss(float %a) {
67 ; CHECK: fcvtms {{s[0-9]}}, {{s[0-9]}}
69 %vcvtms.i = insertelement <1 x float> undef, float %a, i32 0
70 %vcvtms1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float> %vcvtms.i)
71 %0 = extractelement <1 x i32> %vcvtms1.i, i32 0
75 declare <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float>)
77 define i64 @test_vcvtmd_s64_f64(double %a) {
78 ; CHECK: test_vcvtmd_s64_f64
79 ; CHECK: fcvtms {{d[0-9]}}, {{d[0-9]}}
81 %vcvtms.i = insertelement <1 x double> undef, double %a, i32 0
82 %vcvtms1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %vcvtms.i)
83 %0 = extractelement <1 x i64> %vcvtms1.i, i32 0
87 declare <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double>)
89 define i32 @test_vcvtmus(float %a) {
91 ; CHECK: fcvtmu {{s[0-9]}}, {{s[0-9]}}
93 %vcvtmu.i = insertelement <1 x float> undef, float %a, i32 0
94 %vcvtmu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float> %vcvtmu.i)
95 %0 = extractelement <1 x i32> %vcvtmu1.i, i32 0
99 declare <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float>)
101 define i64 @test_vcvtmud(double %a) {
102 ; CHECK: test_vcvtmud
103 ; CHECK: fcvtmu {{d[0-9]}}, {{d[0-9]}}
105 %vcvtmu.i = insertelement <1 x double> undef, double %a, i32 0
106 %vcvtmu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %vcvtmu.i)
107 %0 = extractelement <1 x i64> %vcvtmu1.i, i32 0
111 declare <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double>)
113 define i32 @test_vcvtnss(float %a) {
114 ; CHECK: test_vcvtnss
115 ; CHECK: fcvtns {{s[0-9]}}, {{s[0-9]}}
117 %vcvtns.i = insertelement <1 x float> undef, float %a, i32 0
118 %vcvtns1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float> %vcvtns.i)
119 %0 = extractelement <1 x i32> %vcvtns1.i, i32 0
123 declare <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float>)
125 define i64 @test_vcvtnd_s64_f64(double %a) {
126 ; CHECK: test_vcvtnd_s64_f64
127 ; CHECK: fcvtns {{d[0-9]}}, {{d[0-9]}}
129 %vcvtns.i = insertelement <1 x double> undef, double %a, i32 0
130 %vcvtns1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %vcvtns.i)
131 %0 = extractelement <1 x i64> %vcvtns1.i, i32 0
135 declare <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double>)
137 define i32 @test_vcvtnus(float %a) {
138 ; CHECK: test_vcvtnus
139 ; CHECK: fcvtnu {{s[0-9]}}, {{s[0-9]}}
141 %vcvtnu.i = insertelement <1 x float> undef, float %a, i32 0
142 %vcvtnu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float> %vcvtnu.i)
143 %0 = extractelement <1 x i32> %vcvtnu1.i, i32 0
147 declare <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float>)
149 define i64 @test_vcvtnud(double %a) {
150 ; CHECK: test_vcvtnud
151 ; CHECK: fcvtnu {{d[0-9]}}, {{d[0-9]}}
153 %vcvtnu.i = insertelement <1 x double> undef, double %a, i32 0
154 %vcvtnu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %vcvtnu.i)
155 %0 = extractelement <1 x i64> %vcvtnu1.i, i32 0
159 declare <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double>)
161 define i32 @test_vcvtpss(float %a) {
162 ; CHECK: test_vcvtpss
163 ; CHECK: fcvtps {{s[0-9]}}, {{s[0-9]}}
165 %vcvtps.i = insertelement <1 x float> undef, float %a, i32 0
166 %vcvtps1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float> %vcvtps.i)
167 %0 = extractelement <1 x i32> %vcvtps1.i, i32 0
171 declare <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float>)
173 define i64 @test_vcvtpd_s64_f64(double %a) {
174 ; CHECK: test_vcvtpd_s64_f64
175 ; CHECK: fcvtps {{d[0-9]}}, {{d[0-9]}}
177 %vcvtps.i = insertelement <1 x double> undef, double %a, i32 0
178 %vcvtps1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %vcvtps.i)
179 %0 = extractelement <1 x i64> %vcvtps1.i, i32 0
183 declare <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double>)
185 define i32 @test_vcvtpus(float %a) {
186 ; CHECK: test_vcvtpus
187 ; CHECK: fcvtpu {{s[0-9]}}, {{s[0-9]}}
189 %vcvtpu.i = insertelement <1 x float> undef, float %a, i32 0
190 %vcvtpu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float> %vcvtpu.i)
191 %0 = extractelement <1 x i32> %vcvtpu1.i, i32 0
195 declare <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float>)
197 define i64 @test_vcvtpud(double %a) {
198 ; CHECK: test_vcvtpud
199 ; CHECK: fcvtpu {{d[0-9]}}, {{d[0-9]}}
201 %vcvtpu.i = insertelement <1 x double> undef, double %a, i32 0
202 %vcvtpu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %vcvtpu.i)
203 %0 = extractelement <1 x i64> %vcvtpu1.i, i32 0
207 declare <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double>)
209 define i32 @test_vcvtss(float %a) {
211 ; CHECK: fcvtzs {{s[0-9]}}, {{s[0-9]}}
213 %vcvtzs.i = insertelement <1 x float> undef, float %a, i32 0
214 %vcvtzs1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float> %vcvtzs.i)
215 %0 = extractelement <1 x i32> %vcvtzs1.i, i32 0
219 declare <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float>)
221 define i64 @test_vcvtd_s64_f64(double %a) {
222 ; CHECK: test_vcvtd_s64_f64
223 ; CHECK: fcvtzs {{d[0-9]}}, {{d[0-9]}}
225 %vcvzs.i = insertelement <1 x double> undef, double %a, i32 0
226 %vcvzs1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %vcvzs.i)
227 %0 = extractelement <1 x i64> %vcvzs1.i, i32 0
231 declare <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double>)
233 define i32 @test_vcvtus(float %a) {
235 ; CHECK: fcvtzu {{s[0-9]}}, {{s[0-9]}}
237 %vcvtzu.i = insertelement <1 x float> undef, float %a, i32 0
238 %vcvtzu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float> %vcvtzu.i)
239 %0 = extractelement <1 x i32> %vcvtzu1.i, i32 0
243 declare <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float>)
245 define i64 @test_vcvtud(double %a) {
247 ; CHECK: fcvtzu {{d[0-9]}}, {{d[0-9]}}
249 %vcvtzu.i = insertelement <1 x double> undef, double %a, i32 0
250 %vcvtzu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %vcvtzu.i)
251 %0 = extractelement <1 x i64> %vcvtzu1.i, i32 0
255 declare <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double>)