1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
2 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
4 @lhs = global fp128 zeroinitializer
5 @rhs = global fp128 zeroinitializer
7 define fp128 @test_add() {
8 ; CHECK-LABEL: test_add:
10 %lhs = load fp128* @lhs
11 %rhs = load fp128* @rhs
12 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
13 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
15 %val = fadd fp128 %lhs, %rhs
20 define fp128 @test_sub() {
21 ; CHECK-LABEL: test_sub:
23 %lhs = load fp128* @lhs
24 %rhs = load fp128* @rhs
25 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
26 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
28 %val = fsub fp128 %lhs, %rhs
33 define fp128 @test_mul() {
34 ; CHECK-LABEL: test_mul:
36 %lhs = load fp128* @lhs
37 %rhs = load fp128* @rhs
38 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
39 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
41 %val = fmul fp128 %lhs, %rhs
46 define fp128 @test_div() {
47 ; CHECK-LABEL: test_div:
49 %lhs = load fp128* @lhs
50 %rhs = load fp128* @rhs
51 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
52 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
54 %val = fdiv fp128 %lhs, %rhs
62 define void @test_fptosi() {
63 ; CHECK-LABEL: test_fptosi:
64 %val = load fp128* @lhs
66 %val32 = fptosi fp128 %val to i32
67 store i32 %val32, i32* @var32
70 %val64 = fptosi fp128 %val to i64
71 store i64 %val64, i64* @var64
77 define void @test_fptoui() {
78 ; CHECK-LABEL: test_fptoui:
79 %val = load fp128* @lhs
81 %val32 = fptoui fp128 %val to i32
82 store i32 %val32, i32* @var32
83 ; CHECK: bl __fixunstfsi
85 %val64 = fptoui fp128 %val to i64
86 store i64 %val64, i64* @var64
87 ; CHECK: bl __fixunstfdi
92 define void @test_sitofp() {
93 ; CHECK-LABEL: test_sitofp:
95 %src32 = load i32* @var32
96 %val32 = sitofp i32 %src32 to fp128
97 store volatile fp128 %val32, fp128* @lhs
98 ; CHECK: bl __floatsitf
100 %src64 = load i64* @var64
101 %val64 = sitofp i64 %src64 to fp128
102 store volatile fp128 %val64, fp128* @lhs
103 ; CHECK: bl __floatditf
108 define void @test_uitofp() {
109 ; CHECK-LABEL: test_uitofp:
111 %src32 = load i32* @var32
112 %val32 = uitofp i32 %src32 to fp128
113 store volatile fp128 %val32, fp128* @lhs
114 ; CHECK: bl __floatunsitf
116 %src64 = load i64* @var64
117 %val64 = uitofp i64 %src64 to fp128
118 store volatile fp128 %val64, fp128* @lhs
119 ; CHECK: bl __floatunditf
124 define i1 @test_setcc1() {
125 ; CHECK-LABEL: test_setcc1:
127 %lhs = load fp128* @lhs
128 %rhs = load fp128* @rhs
129 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
130 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
132 ; Technically, everything after the call to __letf2 is redundant, but we'll let
133 ; LLVM have its fun for now.
134 %val = fcmp ole fp128 %lhs, %rhs
137 ; CHECK: csinc w0, wzr, wzr, gt
143 define i1 @test_setcc2() {
144 ; CHECK-LABEL: test_setcc2:
146 %lhs = load fp128* @lhs
147 %rhs = load fp128* @rhs
148 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
149 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
151 ; Technically, everything after the call to __letf2 is redundant, but we'll let
152 ; LLVM have its fun for now.
153 %val = fcmp ugt fp128 %lhs, %rhs
156 ; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le
158 ; CHECK: bl __unordtf2
160 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
162 ; CHECK: orr w0, [[UNORDERED]], [[GT]]
168 define i32 @test_br_cc() {
169 ; CHECK-LABEL: test_br_cc:
171 %lhs = load fp128* @lhs
172 %rhs = load fp128* @rhs
173 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
174 ; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
176 ; olt == !uge, which LLVM unfortunately "optimizes" this to.
177 %cond = fcmp olt fp128 %lhs, %rhs
180 ; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt
182 ; CHECK: bl __unordtf2
184 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
186 ; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
187 ; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
188 br i1 %cond, label %iftrue, label %iffalse
193 ; CHECK-NEXT: movz {{x0|w0}}, #42
194 ; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
199 ; CHECK-NEXT: movz {{x0|w0}}, #29
200 ; CHECK-NEXT: [[REALRET]]:
204 define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
205 ; CHECK-LABEL: test_select:
207 %val = select i1 %cond, fp128 %lhs, fp128 %rhs
208 store fp128 %val, fp128* @lhs
209 ; CHECK: cmp {{w[0-9]+}}, #0
210 ; CHECK-AARCH64: str q1, [sp]
211 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
213 ; CHECK-AARCH64-NEXT: str q0, [sp]
214 ; CHECK-ARM64-NEXT: orr v[[DEST:[0-9]+]].16b, v0.16b, v0.16b
215 ; CHECK-NEXT: [[IFFALSE]]:
216 ; CHECK-AARCH64-NEXT: ldr q[[DEST:[0-9]+]], [sp]
217 ; CHECK: str q[[DEST]], [{{x[0-9]+}}, {{#?}}:lo12:lhs]
222 @varfloat = global float 0.0
223 @vardouble = global double 0.0
225 define void @test_round() {
226 ; CHECK-LABEL: test_round:
228 %val = load fp128* @lhs
230 %float = fptrunc fp128 %val to float
231 store float %float, float* @varfloat
232 ; CHECK: bl __trunctfsf2
233 ; CHECK: str s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
235 %double = fptrunc fp128 %val to double
236 store double %double, double* @vardouble
237 ; CHECK: bl __trunctfdf2
238 ; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble]
243 define void @test_extend() {
244 ; CHECK-LABEL: test_extend:
246 %val = load fp128* @lhs
248 %float = load float* @varfloat
249 %fromfloat = fpext float %float to fp128
250 store volatile fp128 %fromfloat, fp128* @lhs
251 ; CHECK: bl __extendsftf2
252 ; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
254 %double = load double* @vardouble
255 %fromdouble = fpext double %double to fp128
256 store volatile fp128 %fromdouble, fp128* @lhs
257 ; CHECK: bl __extenddftf2
258 ; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
264 define fp128 @test_neg(fp128 %in) {
265 ; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
266 ; Make sure the weird hex constant below *is* -0.0
267 ; CHECK-NEXT: fp128 -0
269 ; CHECK-LABEL: test_neg:
271 ; Could in principle be optimized to fneg which we can't select, this makes
272 ; sure that doesn't happen.
273 %ret = fsub fp128 0xL00000000000000008000000000000000, %in
274 ; CHECK-AARCH64: str q0, [sp, #-16]
275 ; CHECK-AARCH64-NEXT: ldr q1, [sp], #16
276 ; CHECK-ARM64: orr v1.16b, v0.16b, v0.16b
277 ; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:[[MINUS0]]]