1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
3 declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
5 declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
7 declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
9 declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
11 declare <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32>, <1 x i32>, <4 x i32>) #1
13 declare <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32>, <1 x i32>, <4 x i32>) #1
15 declare <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32>, <1 x i32>, <4 x i32>) #1
17 declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>) #1
19 declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>) #1
21 declare <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32>) #1
23 declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>) #1
25 declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>) #1
27 declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>) #1
29 declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>) #1
31 define <16 x i8> @test_vaeseq_u8(<16 x i8> %data, <16 x i8> %key) {
32 ; CHECK: test_vaeseq_u8:
33 ; CHECK: aese {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
35 %aese.i = tail call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %data, <16 x i8> %key)
39 define <16 x i8> @test_vaesdq_u8(<16 x i8> %data, <16 x i8> %key) {
40 ; CHECK: test_vaesdq_u8:
41 ; CHECK: aesd {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
43 %aesd.i = tail call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %data, <16 x i8> %key)
47 define <16 x i8> @test_vaesmcq_u8(<16 x i8> %data) {
48 ; CHECK: test_vaesmcq_u8:
49 ; CHECK: aesmc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
51 %aesmc.i = tail call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %data)
52 ret <16 x i8> %aesmc.i
55 define <16 x i8> @test_vaesimcq_u8(<16 x i8> %data) {
56 ; CHECK: test_vaesimcq_u8:
57 ; CHECK: aesimc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
59 %aesimc.i = tail call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %data)
60 ret <16 x i8> %aesimc.i
63 define i32 @test_vsha1h_u32(i32 %hash_e) {
64 ; CHECK: test_vsha1h_u32:
65 ; CHECK: sha1h {{s[0-9]+}}, {{s[0-9]+}}
67 %sha1h.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
68 %sha1h1.i = tail call <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32> %sha1h.i)
69 %0 = extractelement <1 x i32> %sha1h1.i, i32 0
73 define <4 x i32> @test_vsha1su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w12_15) {
74 ; CHECK: test_vsha1su1q_u32:
75 ; CHECK: sha1su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
77 %sha1su12.i = tail call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w12_15)
78 ret <4 x i32> %sha1su12.i
81 define <4 x i32> @test_vsha256su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7) {
82 ; CHECK: test_vsha256su0q_u32:
83 ; CHECK: sha256su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
85 %sha256su02.i = tail call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7)
86 ret <4 x i32> %sha256su02.i
89 define <4 x i32> @test_vsha1cq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
90 ; CHECK: test_vsha1cq_u32:
91 ; CHECK: sha1c {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
93 %sha1c.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
94 %sha1c1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32> %hash_abcd, <1 x i32> %sha1c.i, <4 x i32> %wk)
95 ret <4 x i32> %sha1c1.i
98 define <4 x i32> @test_vsha1pq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
99 ; CHECK: test_vsha1pq_u32:
100 ; CHECK: sha1p {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
102 %sha1p.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
103 %sha1p1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32> %hash_abcd, <1 x i32> %sha1p.i, <4 x i32> %wk)
104 ret <4 x i32> %sha1p1.i
107 define <4 x i32> @test_vsha1mq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
108 ; CHECK: test_vsha1mq_u32:
109 ; CHECK: sha1m {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
111 %sha1m.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
112 %sha1m1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32> %hash_abcd, <1 x i32> %sha1m.i, <4 x i32> %wk)
113 ret <4 x i32> %sha1m1.i
116 define <4 x i32> @test_vsha1su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11) {
117 ; CHECK: test_vsha1su0q_u32:
118 ; CHECK: sha1su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
120 %sha1su03.i = tail call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
121 ret <4 x i32> %sha1su03.i
124 define <4 x i32> @test_vsha256hq_u32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
125 ; CHECK: test_vsha256hq_u32:
126 ; CHECK: sha256h {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
128 %sha256h3.i = tail call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
129 ret <4 x i32> %sha256h3.i
132 define <4 x i32> @test_vsha256h2q_u32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
133 ; CHECK: test_vsha256h2q_u32:
134 ; CHECK: sha256h2 {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
136 %sha256h23.i = tail call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
137 ret <4 x i32> %sha256h23.i
140 define <4 x i32> @test_vsha256su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
141 ; CHECK: test_vsha256su1q_u32:
142 ; CHECK: sha256su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
144 %sha256su13.i = tail call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
145 ret <4 x i32> %sha256su13.i