2 * Multi buffer SHA1 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2014 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
25 * Copyright(c) 2014 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
70 #include <asm/xsave.h>
71 #include <linux/hardirq.h>
72 #include <asm/fpu-internal.h>
73 #include "sha_mb_ctx.h"
75 #define FLUSH_INTERVAL 1000 /* in usec */
77 static struct mcryptd_alg_state sha1_mb_alg_state;
80 struct mcryptd_ahash *mcryptd_tfm;
83 static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
85 struct shash_desc *desc;
87 desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
88 return container_of(desc, struct mcryptd_hash_request_ctx, desc);
91 static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
93 return container_of((void *) ctx, struct ahash_request, __ctx);
96 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
97 struct shash_desc *desc)
99 rctx->flag = HASH_UPDATE;
102 static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
103 static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state,
104 struct job_sha1 *job);
105 static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
106 static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
108 inline void sha1_init_digest(uint32_t *digest)
110 static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
111 SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
112 memcpy(digest, initial_digest, sizeof(initial_digest));
115 inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
118 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
120 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
123 i += ((SHA1_BLOCK_SIZE - 1) &
124 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
125 + 1 + SHA1_PADLENGTHFIELD_SIZE;
127 #if SHA1_PADLENGTHFIELD_SIZE == 16
128 *((uint64_t *) &padblock[i - 16]) = 0;
131 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
133 /* Number of extra blocks to hash */
134 return i >> SHA1_LOG2_BLOCK_SIZE;
137 static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx)
140 if (ctx->status & HASH_CTX_STS_COMPLETE) {
141 /* Clear PROCESSING bit */
142 ctx->status = HASH_CTX_STS_COMPLETE;
147 * If the extra blocks are empty, begin hashing what remains
148 * in the user's buffer.
150 if (ctx->partial_block_buffer_length == 0 &&
151 ctx->incoming_buffer_length) {
153 const void *buffer = ctx->incoming_buffer;
154 uint32_t len = ctx->incoming_buffer_length;
158 * Only entire blocks can be hashed.
159 * Copy remainder to extra blocks buffer.
161 copy_len = len & (SHA1_BLOCK_SIZE-1);
165 memcpy(ctx->partial_block_buffer,
166 ((const char *) buffer + len),
168 ctx->partial_block_buffer_length = copy_len;
171 ctx->incoming_buffer_length = 0;
173 /* len should be a multiple of the block size now */
174 assert((len % SHA1_BLOCK_SIZE) == 0);
176 /* Set len to the number of blocks to be hashed */
177 len >>= SHA1_LOG2_BLOCK_SIZE;
181 ctx->job.buffer = (uint8_t *) buffer;
183 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr,
190 * If the extra blocks are not empty, then we are
191 * either on the last block(s) or we need more
192 * user input before continuing.
194 if (ctx->status & HASH_CTX_STS_LAST) {
196 uint8_t *buf = ctx->partial_block_buffer;
197 uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length);
199 ctx->status = (HASH_CTX_STS_PROCESSING |
200 HASH_CTX_STS_COMPLETE);
201 ctx->job.buffer = buf;
202 ctx->job.len = (uint32_t) n_extra_blocks;
203 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
207 ctx->status = HASH_CTX_STS_IDLE;
214 static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
217 * If get_comp_job returns NULL, there are no jobs complete.
218 * If get_comp_job returns a job, verify that it is safe to return to the user.
219 * If it is not ready, resubmit the job to finish processing.
220 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
221 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
223 struct sha1_hash_ctx *ctx;
225 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
226 return sha1_ctx_mgr_resubmit(mgr, ctx);
229 static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
231 sha1_job_mgr_init(&mgr->mgr);
234 static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
235 struct sha1_hash_ctx *ctx,
240 if (flags & (~HASH_ENTIRE)) {
241 /* User should not pass anything other than FIRST, UPDATE, or LAST */
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
246 if (ctx->status & HASH_CTX_STS_PROCESSING) {
247 /* Cannot submit to a currently processing job. */
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
252 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
253 /* Cannot update a finished job. */
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
259 if (flags & HASH_FIRST) {
261 sha1_init_digest(ctx->job.result_digest);
263 /* Reset byte counter */
264 ctx->total_length = 0;
266 /* Clear extra blocks */
267 ctx->partial_block_buffer_length = 0;
270 /* If we made it here, there were no errors during this call to submit */
271 ctx->error = HASH_CTX_ERROR_NONE;
273 /* Store buffer ptr info from user */
274 ctx->incoming_buffer = buffer;
275 ctx->incoming_buffer_length = len;
277 /* Store the user's request flags and mark this ctx as currently being processed. */
278 ctx->status = (flags & HASH_LAST) ?
279 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
280 HASH_CTX_STS_PROCESSING;
282 /* Advance byte counter */
283 ctx->total_length += len;
286 * If there is anything currently buffered in the extra blocks,
287 * append to it until it contains a whole block.
288 * Or if the user's buffer contains less than a whole block,
289 * append as much as possible to the extra block.
291 if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) {
292 /* Compute how many bytes to copy from user buffer into extra block */
293 uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length;
298 /* Copy and update relevant pointers and counters */
299 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
302 ctx->partial_block_buffer_length += copy_len;
303 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
304 ctx->incoming_buffer_length = len - copy_len;
307 /* The extra block should never contain more than 1 block here */
308 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
310 /* If the extra block buffer contains exactly 1 block, it can be hashed. */
311 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
312 ctx->partial_block_buffer_length = 0;
314 ctx->job.buffer = ctx->partial_block_buffer;
316 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
320 return sha1_ctx_mgr_resubmit(mgr, ctx);
323 static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
325 struct sha1_hash_ctx *ctx;
328 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
330 /* If flush returned 0, there are no more jobs in flight. */
335 * If flush returned a job, resubmit the job to finish processing.
337 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
340 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
341 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
342 * still need processing. Loop.
349 static int sha1_mb_init(struct shash_desc *desc)
351 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
354 sctx->job.result_digest[0] = SHA1_H0;
355 sctx->job.result_digest[1] = SHA1_H1;
356 sctx->job.result_digest[2] = SHA1_H2;
357 sctx->job.result_digest[3] = SHA1_H3;
358 sctx->job.result_digest[4] = SHA1_H4;
359 sctx->total_length = 0;
360 sctx->partial_block_buffer_length = 0;
361 sctx->status = HASH_CTX_STS_IDLE;
366 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
369 struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
370 __be32 *dst = (__be32 *) rctx->out;
372 for (i = 0; i < 5; ++i)
373 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
378 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
379 struct mcryptd_alg_cstate *cstate, bool flush)
381 int flag = HASH_UPDATE;
383 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
384 struct sha1_hash_ctx *sha_ctx;
387 while (!(rctx->flag & HASH_DONE)) {
388 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
393 /* check if the walk is done */
394 if (crypto_ahash_walk_last(&rctx->walk)) {
395 rctx->flag |= HASH_DONE;
396 if (rctx->flag & HASH_FINAL)
400 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc);
402 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
405 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
409 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
416 /* copy the results */
417 if (rctx->flag & HASH_FINAL)
418 sha1_mb_set_results(rctx);
425 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
426 struct mcryptd_alg_cstate *cstate,
429 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
430 struct sha1_hash_ctx *sha_ctx;
431 struct mcryptd_hash_request_ctx *req_ctx;
434 /* remove from work list */
435 spin_lock(&cstate->work_lock);
436 list_del(&rctx->waiter);
437 spin_unlock(&cstate->work_lock);
440 rctx->complete(&req->base, err);
443 rctx->complete(&req->base, err);
447 /* check to see if there are other jobs that are done */
448 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
450 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
451 ret = sha_finish_walk(&req_ctx, cstate, false);
453 spin_lock(&cstate->work_lock);
454 list_del(&req_ctx->waiter);
455 spin_unlock(&cstate->work_lock);
457 req = cast_mcryptd_ctx_to_req(req_ctx);
459 rctx->complete(&req->base, ret);
462 rctx->complete(&req->base, ret);
466 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
472 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
473 struct mcryptd_alg_cstate *cstate)
475 unsigned long next_flush;
476 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
479 rctx->tag.arrival = jiffies; /* tag the arrival time */
480 rctx->tag.seq_num = cstate->next_seq_num++;
481 next_flush = rctx->tag.arrival + delay;
482 rctx->tag.expire = next_flush;
484 spin_lock(&cstate->work_lock);
485 list_add_tail(&rctx->waiter, &cstate->work_list);
486 spin_unlock(&cstate->work_lock);
488 mcryptd_arm_flusher(cstate, delay);
491 static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
494 struct mcryptd_hash_request_ctx *rctx =
495 container_of(desc, struct mcryptd_hash_request_ctx, desc);
496 struct mcryptd_alg_cstate *cstate =
497 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
499 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
500 struct sha1_hash_ctx *sha_ctx;
505 if (rctx->tag.cpu != smp_processor_id()) {
506 pr_err("mcryptd error: cpu clash\n");
510 /* need to init context */
511 req_ctx_init(rctx, desc);
513 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
520 if (crypto_ahash_walk_last(&rctx->walk))
521 rctx->flag |= HASH_DONE;
524 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
525 sha1_mb_add_list(rctx, cstate);
527 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE);
530 /* check if anything is returned */
534 if (sha_ctx->error) {
535 ret = sha_ctx->error;
536 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
540 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
541 ret = sha_finish_walk(&rctx, cstate, false);
546 sha_complete_job(rctx, cstate, ret);
550 static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
551 unsigned int len, u8 *out)
553 struct mcryptd_hash_request_ctx *rctx =
554 container_of(desc, struct mcryptd_hash_request_ctx, desc);
555 struct mcryptd_alg_cstate *cstate =
556 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
558 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
559 struct sha1_hash_ctx *sha_ctx;
560 int ret = 0, flag = HASH_UPDATE, nbytes;
563 if (rctx->tag.cpu != smp_processor_id()) {
564 pr_err("mcryptd error: cpu clash\n");
568 /* need to init context */
569 req_ctx_init(rctx, desc);
571 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
578 if (crypto_ahash_walk_last(&rctx->walk)) {
579 rctx->flag |= HASH_DONE;
585 rctx->flag |= HASH_FINAL;
586 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
587 sha1_mb_add_list(rctx, cstate);
590 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
593 /* check if anything is returned */
597 if (sha_ctx->error) {
598 ret = sha_ctx->error;
602 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
603 ret = sha_finish_walk(&rctx, cstate, false);
607 sha_complete_job(rctx, cstate, ret);
611 static int sha1_mb_final(struct shash_desc *desc, u8 *out)
613 struct mcryptd_hash_request_ctx *rctx =
614 container_of(desc, struct mcryptd_hash_request_ctx, desc);
615 struct mcryptd_alg_cstate *cstate =
616 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
618 struct sha1_hash_ctx *sha_ctx;
623 if (rctx->tag.cpu != smp_processor_id()) {
624 pr_err("mcryptd error: cpu clash\n");
628 /* need to init context */
629 req_ctx_init(rctx, desc);
632 rctx->flag |= HASH_DONE | HASH_FINAL;
634 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
635 /* flag HASH_FINAL and 0 data size */
636 sha1_mb_add_list(rctx, cstate);
638 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST);
641 /* check if anything is returned */
645 if (sha_ctx->error) {
646 ret = sha_ctx->error;
647 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
651 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
652 ret = sha_finish_walk(&rctx, cstate, false);
656 sha_complete_job(rctx, cstate, ret);
660 static int sha1_mb_export(struct shash_desc *desc, void *out)
662 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
664 memcpy(out, sctx, sizeof(*sctx));
669 static int sha1_mb_import(struct shash_desc *desc, const void *in)
671 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
673 memcpy(sctx, in, sizeof(*sctx));
679 static struct shash_alg sha1_mb_shash_alg = {
680 .digestsize = SHA1_DIGEST_SIZE,
681 .init = sha1_mb_init,
682 .update = sha1_mb_update,
683 .final = sha1_mb_final,
684 .finup = sha1_mb_finup,
685 .export = sha1_mb_export,
686 .import = sha1_mb_import,
687 .descsize = sizeof(struct sha1_hash_ctx),
688 .statesize = sizeof(struct sha1_hash_ctx),
690 .cra_name = "__sha1-mb",
691 .cra_driver_name = "__intel_sha1-mb",
694 * use ASYNC flag as some buffers in multi-buffer
695 * algo may not have completed before hashing thread sleep
697 .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC,
698 .cra_blocksize = SHA1_BLOCK_SIZE,
699 .cra_module = THIS_MODULE,
700 .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
704 static int sha1_mb_async_init(struct ahash_request *req)
706 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
707 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
708 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
709 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
711 memcpy(mcryptd_req, req, sizeof(*req));
712 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
713 return crypto_ahash_init(mcryptd_req);
716 static int sha1_mb_async_update(struct ahash_request *req)
718 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
720 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
721 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
722 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
724 memcpy(mcryptd_req, req, sizeof(*req));
725 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
726 return crypto_ahash_update(mcryptd_req);
729 static int sha1_mb_async_finup(struct ahash_request *req)
731 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
733 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
734 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
735 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
737 memcpy(mcryptd_req, req, sizeof(*req));
738 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
739 return crypto_ahash_finup(mcryptd_req);
742 static int sha1_mb_async_final(struct ahash_request *req)
744 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
746 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
747 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
748 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
750 memcpy(mcryptd_req, req, sizeof(*req));
751 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
752 return crypto_ahash_final(mcryptd_req);
755 static int sha1_mb_async_digest(struct ahash_request *req)
757 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
758 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
759 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
760 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
762 memcpy(mcryptd_req, req, sizeof(*req));
763 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
764 return crypto_ahash_digest(mcryptd_req);
767 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
769 struct mcryptd_ahash *mcryptd_tfm;
770 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
771 struct mcryptd_hash_ctx *mctx;
773 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 0, 0);
774 if (IS_ERR(mcryptd_tfm))
775 return PTR_ERR(mcryptd_tfm);
776 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
777 mctx->alg_state = &sha1_mb_alg_state;
778 ctx->mcryptd_tfm = mcryptd_tfm;
779 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
780 sizeof(struct ahash_request) +
781 crypto_ahash_reqsize(&mcryptd_tfm->base));
786 static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
788 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
790 mcryptd_free_ahash(ctx->mcryptd_tfm);
793 static struct ahash_alg sha1_mb_async_alg = {
794 .init = sha1_mb_async_init,
795 .update = sha1_mb_async_update,
796 .final = sha1_mb_async_final,
797 .finup = sha1_mb_async_finup,
798 .digest = sha1_mb_async_digest,
800 .digestsize = SHA1_DIGEST_SIZE,
803 .cra_driver_name = "sha1_mb",
805 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
806 .cra_blocksize = SHA1_BLOCK_SIZE,
807 .cra_type = &crypto_ahash_type,
808 .cra_module = THIS_MODULE,
809 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
810 .cra_init = sha1_mb_async_init_tfm,
811 .cra_exit = sha1_mb_async_exit_tfm,
812 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
818 static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
820 struct mcryptd_hash_request_ctx *rctx;
821 unsigned long cur_time;
822 unsigned long next_flush = 0;
823 struct sha1_hash_ctx *sha_ctx;
828 while (!list_empty(&cstate->work_list)) {
829 rctx = list_entry(cstate->work_list.next,
830 struct mcryptd_hash_request_ctx, waiter);
831 if time_before(cur_time, rctx->tag.expire)
834 sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
837 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
840 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
841 sha_finish_walk(&rctx, cstate, true);
842 sha_complete_job(rctx, cstate, 0);
845 if (!list_empty(&cstate->work_list)) {
846 rctx = list_entry(cstate->work_list.next,
847 struct mcryptd_hash_request_ctx, waiter);
848 /* get the hash context and then flush time */
849 next_flush = rctx->tag.expire;
850 mcryptd_arm_flusher(cstate, get_delay(next_flush));
855 static int __init sha1_mb_mod_init(void)
860 struct mcryptd_alg_cstate *cpu_state;
862 /* check for dependent cpu features */
863 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
864 !boot_cpu_has(X86_FEATURE_BMI2))
867 /* initialize multibuffer structures */
868 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
870 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
871 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
872 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
873 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
875 if (!sha1_mb_alg_state.alg_cstate)
877 for_each_possible_cpu(cpu) {
878 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
879 cpu_state->next_flush = 0;
880 cpu_state->next_seq_num = 0;
881 cpu_state->flusher_engaged = false;
882 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
883 cpu_state->cpu = cpu;
884 cpu_state->alg_state = &sha1_mb_alg_state;
885 cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL);
888 sha1_ctx_mgr_init(cpu_state->mgr);
889 INIT_LIST_HEAD(&cpu_state->work_list);
890 spin_lock_init(&cpu_state->work_lock);
892 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
894 err = crypto_register_shash(&sha1_mb_shash_alg);
897 err = crypto_register_ahash(&sha1_mb_async_alg);
904 crypto_unregister_shash(&sha1_mb_shash_alg);
906 for_each_possible_cpu(cpu) {
907 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
908 kfree(cpu_state->mgr);
910 free_percpu(sha1_mb_alg_state.alg_cstate);
914 static void __exit sha1_mb_mod_fini(void)
917 struct mcryptd_alg_cstate *cpu_state;
919 crypto_unregister_ahash(&sha1_mb_async_alg);
920 crypto_unregister_shash(&sha1_mb_shash_alg);
921 for_each_possible_cpu(cpu) {
922 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
923 kfree(cpu_state->mgr);
925 free_percpu(sha1_mb_alg_state.alg_cstate);
928 module_init(sha1_mb_mod_init);
929 module_exit(sha1_mb_mod_fini);
931 MODULE_LICENSE("GPL");
932 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
934 MODULE_ALIAS("sha1");