1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -mattr=+crypto | FileCheck %s
2 ; RUN: not llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon 2>&1 | FileCheck --check-prefix=CHECK-NO-CRYPTO %s
3 ; arm64 has a separate test for this, covering the same features (crypto.ll). N.b. NO-CRYPTO will need porting.
5 declare <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32>, <4 x i32>, <4 x i32>) #1
7 declare <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32>, <4 x i32>, <4 x i32>) #1
9 declare <4 x i32> @llvm.arm.neon.sha256h(<4 x i32>, <4 x i32>, <4 x i32>) #1
11 declare <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32>, <4 x i32>, <4 x i32>) #1
13 declare <4 x i32> @llvm.arm.neon.sha1m(<4 x i32>, i32, <4 x i32>) #1
15 declare <4 x i32> @llvm.arm.neon.sha1p(<4 x i32>, i32, <4 x i32>) #1
17 declare <4 x i32> @llvm.arm.neon.sha1c(<4 x i32>, i32, <4 x i32>) #1
19 declare <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32>, <4 x i32>) #1
21 declare <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32>, <4 x i32>) #1
23 declare i32 @llvm.arm.neon.sha1h(i32) #1
25 declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8>) #1
27 declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8>) #1
29 declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8>, <16 x i8>) #1
31 declare <16 x i8> @llvm.arm.neon.aese(<16 x i8>, <16 x i8>) #1
33 define <16 x i8> @test_vaeseq_u8(<16 x i8> %data, <16 x i8> %key) {
34 ; CHECK: test_vaeseq_u8:
35 ; CHECK: aese {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
36 ; CHECK-NO-CRYPTO: Cannot select: intrinsic %llvm.arm.neon.aese
38 %aese.i = tail call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %data, <16 x i8> %key)
42 define <16 x i8> @test_vaesdq_u8(<16 x i8> %data, <16 x i8> %key) {
43 ; CHECK: test_vaesdq_u8:
44 ; CHECK: aesd {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
46 %aesd.i = tail call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %data, <16 x i8> %key)
50 define <16 x i8> @test_vaesmcq_u8(<16 x i8> %data) {
51 ; CHECK: test_vaesmcq_u8:
52 ; CHECK: aesmc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
54 %aesmc.i = tail call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %data)
55 ret <16 x i8> %aesmc.i
58 define <16 x i8> @test_vaesimcq_u8(<16 x i8> %data) {
59 ; CHECK: test_vaesimcq_u8:
60 ; CHECK: aesimc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
62 %aesimc.i = tail call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %data)
63 ret <16 x i8> %aesimc.i
66 define i32 @test_vsha1h_u32(i32 %hash_e) {
67 ; CHECK: test_vsha1h_u32:
68 ; CHECK: sha1h {{s[0-9]+}}, {{s[0-9]+}}
70 %sha1h1.i = tail call i32 @llvm.arm.neon.sha1h(i32 %hash_e)
74 define <4 x i32> @test_vsha1su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w12_15) {
75 ; CHECK: test_vsha1su1q_u32:
76 ; CHECK: sha1su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
78 %sha1su12.i = tail call <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32> %tw0_3, <4 x i32> %w12_15)
79 ret <4 x i32> %sha1su12.i
82 define <4 x i32> @test_vsha256su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7) {
83 ; CHECK: test_vsha256su0q_u32:
84 ; CHECK: sha256su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
86 %sha256su02.i = tail call <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
87 ret <4 x i32> %sha256su02.i
90 define <4 x i32> @test_vsha1cq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
91 ; CHECK: test_vsha1cq_u32:
92 ; CHECK: sha1c {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
94 %sha1c1.i = tail call <4 x i32> @llvm.arm.neon.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
95 ret <4 x i32> %sha1c1.i
98 define <4 x i32> @test_vsha1pq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
99 ; CHECK: test_vsha1pq_u32:
100 ; CHECK: sha1p {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
102 %sha1p1.i = tail call <4 x i32> @llvm.arm.neon.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
103 ret <4 x i32> %sha1p1.i
106 define <4 x i32> @test_vsha1mq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
107 ; CHECK: test_vsha1mq_u32:
108 ; CHECK: sha1m {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
110 %sha1m1.i = tail call <4 x i32> @llvm.arm.neon.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
111 ret <4 x i32> %sha1m1.i
114 define <4 x i32> @test_vsha1su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11) {
115 ; CHECK: test_vsha1su0q_u32:
116 ; CHECK: sha1su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
118 %sha1su03.i = tail call <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
119 ret <4 x i32> %sha1su03.i
122 define <4 x i32> @test_vsha256hq_u32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
123 ; CHECK: test_vsha256hq_u32:
124 ; CHECK: sha256h {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
126 %sha256h3.i = tail call <4 x i32> @llvm.arm.neon.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
127 ret <4 x i32> %sha256h3.i
130 define <4 x i32> @test_vsha256h2q_u32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
131 ; CHECK: test_vsha256h2q_u32:
132 ; CHECK: sha256h2 {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
134 %sha256h23.i = tail call <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
135 ret <4 x i32> %sha256h23.i
138 define <4 x i32> @test_vsha256su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
139 ; CHECK: test_vsha256su1q_u32:
140 ; CHECK: sha256su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
142 %sha256su13.i = tail call <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
143 ret <4 x i32> %sha256su13.i