2 * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
3 * using optimized ARM assembler and NEON instructions.
5 * Copyright © 2015 Google Inc.
7 * This file is based on sha256_ssse3_glue.c:
8 * Copyright (C) 2013 Intel Corporation
9 * Author: Tim Chen <tim.c.chen@linux.intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
18 #include <crypto/internal/hash.h>
19 #include <linux/crypto.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
23 #include <linux/cryptohash.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <crypto/sha.h>
27 #include <asm/byteorder.h>
30 #include "sha256_glue.h"
32 asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
33 unsigned int num_blks);
36 int sha256_init(struct shash_desc *desc)
38 struct sha256_state *sctx = shash_desc_ctx(desc);
40 sctx->state[0] = SHA256_H0;
41 sctx->state[1] = SHA256_H1;
42 sctx->state[2] = SHA256_H2;
43 sctx->state[3] = SHA256_H3;
44 sctx->state[4] = SHA256_H4;
45 sctx->state[5] = SHA256_H5;
46 sctx->state[6] = SHA256_H6;
47 sctx->state[7] = SHA256_H7;
53 int sha224_init(struct shash_desc *desc)
55 struct sha256_state *sctx = shash_desc_ctx(desc);
57 sctx->state[0] = SHA224_H0;
58 sctx->state[1] = SHA224_H1;
59 sctx->state[2] = SHA224_H2;
60 sctx->state[3] = SHA224_H3;
61 sctx->state[4] = SHA224_H4;
62 sctx->state[5] = SHA224_H5;
63 sctx->state[6] = SHA224_H6;
64 sctx->state[7] = SHA224_H7;
70 int __sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len,
73 struct sha256_state *sctx = shash_desc_ctx(desc);
74 unsigned int done = 0;
79 done = SHA256_BLOCK_SIZE - partial;
80 memcpy(sctx->buf + partial, data, done);
81 sha256_block_data_order(sctx->state, sctx->buf, 1);
84 if (len - done >= SHA256_BLOCK_SIZE) {
85 const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
87 sha256_block_data_order(sctx->state, data + done, rounds);
88 done += rounds * SHA256_BLOCK_SIZE;
91 memcpy(sctx->buf, data + done, len - done);
96 int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len)
98 struct sha256_state *sctx = shash_desc_ctx(desc);
99 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
101 /* Handle the fast case right here */
102 if (partial + len < SHA256_BLOCK_SIZE) {
104 memcpy(sctx->buf + partial, data, len);
109 return __sha256_update(desc, data, len, partial);
112 /* Add padding and return the message digest. */
113 static int sha256_final(struct shash_desc *desc, u8 *out)
115 struct sha256_state *sctx = shash_desc_ctx(desc);
116 unsigned int i, index, padlen;
117 __be32 *dst = (__be32 *)out;
119 static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
121 /* save number of bits */
122 bits = cpu_to_be64(sctx->count << 3);
124 /* Pad out to 56 mod 64 and append length */
125 index = sctx->count % SHA256_BLOCK_SIZE;
126 padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
128 /* We need to fill a whole block for __sha256_update */
130 sctx->count += padlen;
131 memcpy(sctx->buf + index, padding, padlen);
133 __sha256_update(desc, padding, padlen, index);
135 __sha256_update(desc, (const u8 *)&bits, sizeof(bits), 56);
137 /* Store state in digest */
138 for (i = 0; i < 8; i++)
139 dst[i] = cpu_to_be32(sctx->state[i]);
142 memset(sctx, 0, sizeof(*sctx));
147 static int sha224_final(struct shash_desc *desc, u8 *out)
149 u8 D[SHA256_DIGEST_SIZE];
151 sha256_final(desc, D);
153 memcpy(out, D, SHA224_DIGEST_SIZE);
154 memset(D, 0, SHA256_DIGEST_SIZE);
159 int sha256_export(struct shash_desc *desc, void *out)
161 struct sha256_state *sctx = shash_desc_ctx(desc);
163 memcpy(out, sctx, sizeof(*sctx));
168 int sha256_import(struct shash_desc *desc, const void *in)
170 struct sha256_state *sctx = shash_desc_ctx(desc);
172 memcpy(sctx, in, sizeof(*sctx));
177 static struct shash_alg algs[] = { {
178 .digestsize = SHA256_DIGEST_SIZE,
180 .update = sha256_update,
181 .final = sha256_final,
182 .export = sha256_export,
183 .import = sha256_import,
184 .descsize = sizeof(struct sha256_state),
185 .statesize = sizeof(struct sha256_state),
187 .cra_name = "sha256",
188 .cra_driver_name = "sha256-asm",
190 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
191 .cra_blocksize = SHA256_BLOCK_SIZE,
192 .cra_module = THIS_MODULE,
195 .digestsize = SHA224_DIGEST_SIZE,
197 .update = sha256_update,
198 .final = sha224_final,
199 .export = sha256_export,
200 .import = sha256_import,
201 .descsize = sizeof(struct sha256_state),
202 .statesize = sizeof(struct sha256_state),
204 .cra_name = "sha224",
205 .cra_driver_name = "sha224-asm",
207 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
208 .cra_blocksize = SHA224_BLOCK_SIZE,
209 .cra_module = THIS_MODULE,
213 static int __init sha256_mod_init(void)
215 int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
220 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
221 res = crypto_register_shashes(sha256_neon_algs,
222 ARRAY_SIZE(sha256_neon_algs));
225 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
231 static void __exit sha256_mod_fini(void)
233 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
235 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
236 crypto_unregister_shashes(sha256_neon_algs,
237 ARRAY_SIZE(sha256_neon_algs));
240 module_init(sha256_mod_init);
241 module_exit(sha256_mod_fini);
243 MODULE_LICENSE("GPL");
244 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
246 MODULE_ALIAS_CRYPTO("sha256");