1 ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
2 ; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU
4 ; Absolute value doubleword
5 declare i64 @llvm.hexagon.A2.absp(i64)
6 define i64 @A2_absp(i64 %a) {
7 %z = call i64 @llvm.hexagon.A2.absp(i64 %a)
10 ; CHECK: r1:0 = abs(r1:0)
13 declare i32 @llvm.hexagon.A2.abs(i32)
14 define i32 @A2_abs(i32 %a) {
15 %z = call i32 @llvm.hexagon.A2.abs(i32 %a)
20 declare i32 @llvm.hexagon.A2.abssat(i32)
21 define i32 @A2_abssat(i32 %a) {
22 %z = call i32 @llvm.hexagon.A2.abssat(i32 %a)
25 ; CHECK: r0 = abs(r0):sat
28 declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32)
29 define i32 @S4_addaddi(i32 %a, i32 %b) {
30 %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
33 ; CHECK: r0 = add(r0, add(r1, #0))
35 declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
36 define i32 @S4_subaddi(i32 %a, i32 %b) {
37 %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
40 ; CHECK: r0 = add(r0, sub(#0, r1))
42 declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
43 define i32 @M2_accii(i32 %a, i32 %b) {
44 %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
47 ; CHECK: r0 += add(r1, #0)
49 declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
50 define i32 @M2_naccii(i32 %a, i32 %b) {
51 %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
54 ; CHECK: r0 -= add(r1, #0)
56 declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
57 define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
58 %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
61 ; CHECK: r0 += add(r1, r2)
63 declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
64 define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
65 %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
68 ; CHECK: r0 -= add(r1, r2)
71 declare i64 @llvm.hexagon.A2.addp(i64, i64)
72 define i64 @A2_addp(i64 %a, i64 %b) {
73 %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
76 ; CHECK: r1:0 = add(r1:0, r3:2)
78 declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
79 define i64 @A2_addpsat(i64 %a, i64 %b) {
80 %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
83 ; CHECK: r1:0 = add(r1:0, r3:2):sat
86 declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
87 define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
88 %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
91 ; CHECK: r0 = add(r0.l, r1.l)
93 declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
94 define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
95 %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
98 ; CHECK: r0 = add(r0.l, r1.h)
100 declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
101 define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
102 %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
105 ; CHECK: r0 = add(r0.l, r1.l):sat
107 declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
108 define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
109 %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
112 ; CHECK: r0 = add(r0.l, r1.h):sat
114 declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
115 define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
116 %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
119 ; CHECK: r0 = add(r0.l, r1.l):<<16
121 declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
122 define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
123 %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
126 ; CHECK: r0 = add(r0.l, r1.h):<<16
128 declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
129 define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
130 %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
133 ; CHECK: r0 = add(r0.h, r1.l):<<16
135 declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
136 define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
137 %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
140 ; CHECK: r0 = add(r0.h, r1.h):<<16
142 declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
143 define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
144 %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
147 ; CHECK: r0 = add(r0.l, r1.l):sat:<<16
149 declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
150 define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
151 %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
154 ; CHECK: r0 = add(r0.l, r1.h):sat:<<16
156 declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
157 define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
158 %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
161 ; CHECK: r0 = add(r0.h, r1.l):sat:<<16
163 declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
164 define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
165 %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
168 ; CHECK: r0 = add(r0.h, r1.h):sat:<<16
170 ; Logical doublewords
171 declare i64 @llvm.hexagon.A2.notp(i64)
172 define i64 @A2_notp(i64 %a) {
173 %z = call i64 @llvm.hexagon.A2.notp(i64 %a)
176 ; CHECK: r1:0 = not(r1:0)
178 declare i64 @llvm.hexagon.A2.andp(i64, i64)
179 define i64 @A2_andp(i64 %a, i64 %b) {
180 %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
183 ; CHECK: r1:0 = and(r1:0, r3:2)
185 declare i64 @llvm.hexagon.A4.andnp(i64, i64)
186 define i64 @A2_andnp(i64 %a, i64 %b) {
187 %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
190 ; CHECK: r1:0 = and(r1:0, ~r3:2)
192 declare i64 @llvm.hexagon.A2.orp(i64, i64)
193 define i64 @A2_orp(i64 %a, i64 %b) {
194 %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
197 ; CHECK: r1:0 = or(r1:0, r3:2)
199 declare i64 @llvm.hexagon.A4.ornp(i64, i64)
200 define i64 @A2_ornp(i64 %a, i64 %b) {
201 %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
204 ; CHECK: r1:0 = or(r1:0, ~r3:2)
206 declare i64 @llvm.hexagon.A2.xorp(i64, i64)
207 define i64 @A2_xorp(i64 %a, i64 %b) {
208 %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
211 ; CHECK: r1:0 = xor(r1:0, r3:2)
213 ; Logical-logical doublewords
214 declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
215 define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
216 %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
219 ; CHECK: r1:0 ^= xor(r3:2, r5:4)
221 ; Logical-logical words
222 declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
223 define i32 @S4_or_andi(i32 %a, i32 %b) {
224 %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
227 ; CHECK: r0 |= and(r1, #0)
229 declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
230 define i32 @S4_or_andix(i32 %a, i32 %b) {
231 %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
234 ; CHECK: r1 = or(r0, and(r1, #0))
236 declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
237 define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
238 %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
241 ; CHECK: r0 |= and(r1, ~r2)
243 declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
244 define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
245 %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
248 ; CHECK: r0 &= and(r1, ~r2)
250 declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
251 define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
252 %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
255 ; CHECK: r0 ^= and(r1, ~r2)
257 declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
258 define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
259 %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
262 ; CHECK: r0 &= and(r1, r2)
264 declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
265 define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
266 %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
269 ; CHECK: r0 &= or(r1, r2)
271 declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
272 define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
273 %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
276 ; CHECK: r0 &= xor(r1, r2)
278 declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
279 define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
280 %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
283 ; CHECK: r0 |= and(r1, r2)
285 declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
286 define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
287 %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
290 ; CHECK: r0 |= or(r1, r2)
292 declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
293 define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
294 %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
297 ; CHECK: r0 |= xor(r1, r2)
299 declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
300 define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
301 %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
304 ; CHECK: r0 ^= and(r1, r2)
306 declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
307 define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
308 %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
311 ; CHECK: r0 ^= or(r1, r2)
314 declare i32 @llvm.hexagon.A2.max(i32, i32)
315 define i32 @A2_max(i32 %a, i32 %b) {
316 %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
319 ; CHECK: r0 = max(r0, r1)
321 declare i32 @llvm.hexagon.A2.maxu(i32, i32)
322 define i32 @A2_maxu(i32 %a, i32 %b) {
323 %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
326 ; CHECK: r0 = maxu(r0, r1)
328 ; Maximum doublewords
329 declare i64 @llvm.hexagon.A2.maxp(i64, i64)
330 define i64 @A2_maxp(i64 %a, i64 %b) {
331 %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
334 ; CHECK: r1:0 = max(r1:0, r3:2)
336 declare i64 @llvm.hexagon.A2.maxup(i64, i64)
337 define i64 @A2_maxup(i64 %a, i64 %b) {
338 %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
341 ; CHECK: r1:0 = maxu(r1:0, r3:2)
344 declare i32 @llvm.hexagon.A2.min(i32, i32)
345 define i32 @A2_min(i32 %a, i32 %b) {
346 %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
349 ; CHECK: r0 = min(r0, r1)
351 declare i32 @llvm.hexagon.A2.minu(i32, i32)
352 define i32 @A2_minu(i32 %a, i32 %b) {
353 %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
356 ; CHECK: r0 = minu(r0, r1)
358 ; Minimum doublewords
359 declare i64 @llvm.hexagon.A2.minp(i64, i64)
360 define i64 @A2_minp(i64 %a, i64 %b) {
361 %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
364 ; CHECK: r1:0 = min(r1:0, r3:2)
366 declare i64 @llvm.hexagon.A2.minup(i64, i64)
367 define i64 @A2_minup(i64 %a, i64 %b) {
368 %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
371 ; CHECK: r1:0 = minu(r1:0, r3:2)
374 declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
375 define i32 @A4_modwrapu(i32 %a, i32 %b) {
376 %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
379 ; CHECK: r0 = modwrap(r0, r1)
382 declare i64 @llvm.hexagon.A2.negp(i64)
383 define i64 @A2_negp(i64 %a) {
384 %z = call i64 @llvm.hexagon.A2.negp(i64 %a)
387 ; CHECK: r1:0 = neg(r1:0)
389 declare i32 @llvm.hexagon.A2.negsat(i32)
390 define i32 @A2_negsat(i32 %a) {
391 %z = call i32 @llvm.hexagon.A2.negsat(i32 %a)
394 ; CHECK: r0 = neg(r0):sat
397 declare i32 @llvm.hexagon.A2.roundsat(i64)
398 define i32 @A2_roundsat(i64 %a) {
399 %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a)
402 ; CHECK: r0 = round(r1:0):sat
404 declare i32 @llvm.hexagon.A4.cround.ri(i32, i32)
405 define i32 @A4_cround_ri(i32 %a) {
406 %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
409 ; CHECK: r0 = cround(r0, #0)
411 declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
412 define i32 @A4_round_ri(i32 %a) {
413 %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
416 ; CHECK: r0 = round(r0, #0)
418 declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
419 define i32 @A4_round_ri_sat(i32 %a) {
420 %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
423 ; CHECK: r0 = round(r0, #0):sat
425 declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
426 define i32 @A4_cround_rr(i32 %a, i32 %b) {
427 %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
430 ; CHECK: r0 = cround(r0, r1)
432 declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
433 define i32 @A4_round_rr(i32 %a, i32 %b) {
434 %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
437 ; CHECK: r0 = round(r0, r1)
439 declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
440 define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
441 %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
444 ; CHECK: r0 = round(r0, r1):sat
446 ; Subtract doublewords
447 declare i64 @llvm.hexagon.A2.subp(i64, i64)
448 define i64 @A2_subp(i64 %a, i64 %b) {
449 %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
452 ; CHECK: r1:0 = sub(r1:0, r3:2)
454 ; Subtract and accumulate
455 declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
456 define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
457 %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
460 ; CHECK: r0 += sub(r1, r2)
463 declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
464 define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
465 %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
468 ; CHECK: r0 = sub(r0.l, r1.l)
470 declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
471 define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
472 %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
475 ; CHECK: r0 = sub(r0.l, r1.h)
477 declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
478 define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
479 %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
482 ; CHECK: r0 = sub(r0.l, r1.l):sat
484 declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
485 define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
486 %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
489 ; CHECK: r0 = sub(r0.l, r1.h):sat
491 declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
492 define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
493 %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
496 ; CHECK: r0 = sub(r0.l, r1.l):<<16
498 declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
499 define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
500 %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
503 ; CHECK: r0 = sub(r0.l, r1.h):<<16
505 declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
506 define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
507 %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
510 ; CHECK: r0 = sub(r0.h, r1.l):<<16
512 declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
513 define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
514 %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
517 ; CHECK: r0 = sub(r0.h, r1.h):<<16
519 declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
520 define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
521 %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
524 ; CHECK: r0 = sub(r0.l, r1.l):sat:<<16
526 declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
527 define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
528 %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
531 ; CHECK: r0 = sub(r0.l, r1.h):sat:<<16
533 declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
534 define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
535 %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
538 ; CHECK: r0 = sub(r0.h, r1.l):sat:<<16
540 declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
541 define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
542 %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
545 ; CHECK: r0 = sub(r0.h, r1.h):sat:<<16
547 ; Sign extend word to doubleword
548 declare i64 @llvm.hexagon.A2.sxtw(i32)
549 define i64 @A2_sxtw(i32 %a) {
550 %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a)
555 ; Vector absolute value halfwords
556 declare i64 @llvm.hexagon.A2.vabsh(i64)
557 define i64 @A2_vabsh(i64 %a) {
558 %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a)
561 ; CHECK: r1:0 = vabsh(r1:0)
563 declare i64 @llvm.hexagon.A2.vabshsat(i64)
564 define i64 @A2_vabshsat(i64 %a) {
565 %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a)
568 ; CHECK: r1:0 = vabsh(r1:0):sat
570 ; Vector absolute value words
571 declare i64 @llvm.hexagon.A2.vabsw(i64)
572 define i64 @A2_vabsw(i64 %a) {
573 %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a)
576 ; CHECK: r1:0 = vabsw(r1:0)
578 declare i64 @llvm.hexagon.A2.vabswsat(i64)
579 define i64 @A2_vabswsat(i64 %a) {
580 %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a)
583 ; CHECK: r1:0 = vabsw(r1:0):sat
585 ; Vector absolute difference halfwords
586 declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64)
587 define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
588 %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
591 ; CHECK: r1:0 = vabsdiffh(r1:0, r3:2)
593 ; Vector absolute difference words
594 declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
595 define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
596 %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
599 ; CHECK: r1:0 = vabsdiffw(r1:0, r3:2)
601 ; Vector add halfwords
602 declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
603 define i64 @A2_vaddh(i64 %a, i64 %b) {
604 %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
607 ; CHECK: r1:0 = vaddh(r1:0, r3:2)
609 declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
610 define i64 @A2_vaddhs(i64 %a, i64 %b) {
611 %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
614 ; CHECK: r1:0 = vaddh(r1:0, r3:2):sat
616 declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
617 define i64 @A2_vadduhs(i64 %a, i64 %b) {
618 %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
621 ; CHECK: r1:0 = vadduh(r1:0, r3:2):sat
623 ; Vector add halfwords with saturate and pack to unsigned bytes
624 declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
625 define i32 @A5_vaddhubs(i64 %a, i64 %b) {
626 %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
629 ; CHECK: r0 = vaddhub(r1:0, r3:2):sat
631 ; Vector reduce add unsigned bytes
632 declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
633 define i64 @A2_vraddub(i64 %a, i64 %b) {
634 %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
637 ; CHECK: r1:0 = vraddub(r1:0, r3:2)
639 declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
640 define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
641 %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
644 ; CHECK: r1:0 += vraddub(r3:2, r5:4)
646 ; Vector reduce add halfwords
647 declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
648 define i32 @M2_vradduh(i64 %a, i64 %b) {
649 %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
652 ; CHECK: r0 = vradduh(r1:0, r3:2)
654 declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
655 define i32 @M2_vraddh(i64 %a, i64 %b) {
656 %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
659 ; CHECK: r0 = vraddh(r1:0, r3:2)
662 declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
663 define i64 @A2_vaddub(i64 %a, i64 %b) {
664 %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
667 ; CHECK: r1:0 = vaddub(r1:0, r3:2)
669 declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
670 define i64 @A2_vaddubs(i64 %a, i64 %b) {
671 %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
674 ; CHECK: r1:0 = vaddub(r1:0, r3:2):sat
677 declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
678 define i64 @A2_vaddw(i64 %a, i64 %b) {
679 %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
682 ; CHECK: r1:0 = vaddw(r1:0, r3:2)
684 declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
685 define i64 @A2_vaddws(i64 %a, i64 %b) {
686 %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
689 ; CHECK: r1:0 = vaddw(r1:0, r3:2):sat
691 ; Vector average halfwords
692 declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
693 define i64 @A2_vavgh(i64 %a, i64 %b) {
694 %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
697 ; CHECK: r1:0 = vavgh(r1:0, r3:2)
699 declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
700 define i64 @A2_vavghr(i64 %a, i64 %b) {
701 %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
704 ; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd
706 declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
707 define i64 @A2_vavghcr(i64 %a, i64 %b) {
708 %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
711 ; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd
713 declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
714 define i64 @A2_vavguh(i64 %a, i64 %b) {
715 %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
718 ; CHECK: r1:0 = vavguh(r1:0, r3:2)
720 declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
721 define i64 @A2_vavguhr(i64 %a, i64 %b) {
722 %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
725 ; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd
727 declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
728 define i64 @A2_vnavgh(i64 %a, i64 %b) {
729 %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
732 ; CHECK: r1:0 = vnavgh(r1:0, r3:2)
734 declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
735 define i64 @A2_vnavghr(i64 %a, i64 %b) {
736 %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
739 ; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd
741 declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
742 define i64 @A2_vnavghcr(i64 %a, i64 %b) {
743 %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
746 ; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd
748 ; Vector average unsigned bytes
749 declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
750 define i64 @A2_vavgub(i64 %a, i64 %b) {
751 %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
754 ; CHECK: r1:0 = vavgub(r1:0, r3:2)
756 declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
757 define i64 @A2_vavgubr(i64 %a, i64 %b) {
758 %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
761 ; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd
763 ; Vector average words
764 declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
765 define i64 @A2_vavgw(i64 %a, i64 %b) {
766 %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
769 ; CHECK: r1:0 = vavgw(r1:0, r3:2)
771 declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
772 define i64 @A2_vavgwr(i64 %a, i64 %b) {
773 %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
776 ; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd
778 declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
779 define i64 @A2_vavgwcr(i64 %a, i64 %b) {
780 %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
783 ; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd
785 declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
786 define i64 @A2_vavguw(i64 %a, i64 %b) {
787 %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
790 ; CHECK: r1:0 = vavguw(r1:0, r3:2)
792 declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
793 define i64 @A2_vavguwr(i64 %a, i64 %b) {
794 %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
797 ; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd
799 declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
800 define i64 @A2_vnavgw(i64 %a, i64 %b) {
801 %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
804 ; CHECK: r1:0 = vnavgw(r1:0, r3:2)
806 declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
807 define i64 @A2_vnavgwr(i64 %a, i64 %b) {
808 %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
811 ; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd
813 declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
814 define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
815 %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
818 ; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd
820 ; Vector conditional negate
821 declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
822 define i64 @S2_vcnegh(i64 %a, i32 %b) {
823 %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
826 ; CHECK: r1:0 = vcnegh(r1:0, r2)
828 declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
829 define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
830 %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
833 ; CHECK: r1:0 += vrcnegh(r3:2, r4)
835 ; Vector maximum bytes
836 declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
837 define i64 @A2_vmaxub(i64 %a, i64 %b) {
838 %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
841 ; CHECK: r1:0 = vmaxub(r1:0, r3:2)
843 declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
844 define i64 @A2_vmaxb(i64 %a, i64 %b) {
845 %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
848 ; CHECK: r1:0 = vmaxb(r1:0, r3:2)
850 ; Vector maximum halfwords
851 declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
852 define i64 @A2_vmaxh(i64 %a, i64 %b) {
853 %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
856 ; CHECK: r1:0 = vmaxh(r1:0, r3:2)
858 declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
859 define i64 @A2_vmaxuh(i64 %a, i64 %b) {
860 %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
863 ; CHECK: r1:0 = vmaxuh(r1:0, r3:2)
865 ; Vector reduce maximum halfwords
866 declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
867 define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
868 %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
871 ; CHECK: r1:0 = vrmaxh(r3:2, r4)
873 declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
874 define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
875 %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
878 ; CHECK: r1:0 = vrmaxuh(r3:2, r4)
880 ; Vector reduce maximum words
881 declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
882 define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
883 %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
886 ; CHECK: r1:0 = vrmaxw(r3:2, r4)
888 declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
889 define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
890 %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
893 ; CHECK: r1:0 = vrmaxuw(r3:2, r4)
895 ; Vector minimum bytes
896 declare i64 @llvm.hexagon.A2.vminub(i64, i64)
897 define i64 @A2_vminub(i64 %a, i64 %b) {
898 %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
901 ; CHECK: r1:0 = vminub(r1:0, r3:2)
903 declare i64 @llvm.hexagon.A2.vminb(i64, i64)
904 define i64 @A2_vminb(i64 %a, i64 %b) {
905 %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
908 ; CHECK: r1:0 = vminb(r1:0, r3:2)
910 ; Vector minimum halfwords
911 declare i64 @llvm.hexagon.A2.vminh(i64, i64)
912 define i64 @A2_vminh(i64 %a, i64 %b) {
913 %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
916 ; CHECK: r1:0 = vminh(r1:0, r3:2)
918 declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
919 define i64 @A2_vminuh(i64 %a, i64 %b) {
920 %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
923 ; CHECK: r1:0 = vminuh(r1:0, r3:2)
925 ; Vector reduce minimum halfwords
926 declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
927 define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
928 %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
931 ; CHECK: r1:0 = vrminh(r3:2, r4)
933 declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
934 define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
935 %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
938 ; CHECK: r1:0 = vrminuh(r3:2, r4)
940 ; Vector reduce minimum words
941 declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
942 define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
943 %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
946 ; CHECK: r1:0 = vrminw(r3:2, r4)
948 declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
949 define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
950 %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
953 ; CHECK: r1:0 = vrminuw(r3:2, r4)
955 ; Vector sum of absolute differences unsigned bytes
956 declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
957 define i64 @A2_vrsadub(i64 %a, i64 %b) {
958 %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
961 ; CHECK: r1:0 = vrsadub(r1:0, r3:2)
963 declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
964 define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
965 %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
968 ; CHECK: r1:0 += vrsadub(r3:2, r5:4)
970 ; Vector subtract halfwords
971 declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
972 define i64 @A2_vsubh(i64 %a, i64 %b) {
973 %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
976 ; CHECK: r1:0 = vsubh(r1:0, r3:2)
978 declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
979 define i64 @A2_vsubhs(i64 %a, i64 %b) {
980 %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
983 ; CHECK: r1:0 = vsubh(r1:0, r3:2):sat
985 declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
986 define i64 @A2_vsubuhs(i64 %a, i64 %b) {
987 %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
990 ; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat
992 ; Vector subtract bytes
993 declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
994 define i64 @A2_vsubub(i64 %a, i64 %b) {
995 %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
998 ; CHECK: r1:0 = vsubub(r1:0, r3:2)
1000 declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
1001 define i64 @A2_vsububs(i64 %a, i64 %b) {
1002 %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
1005 ; CHECK: r1:0 = vsubub(r1:0, r3:2):sat
1007 ; Vector subtract words
1008 declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
1009 define i64 @A2_vsubw(i64 %a, i64 %b) {
1010 %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
1013 ; CHECK: r1:0 = vsubw(r1:0, r3:2)
1015 declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
1016 define i64 @A2_vsubws(i64 %a, i64 %b) {
1017 %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
1020 ; CHECK: r1:0 = vsubw(r1:0, r3:2):sat