2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <crypto/internal/hash.h>
18 #include <crypto/sha.h>
22 #define MV_CESA "MV-CESA:"
23 #define MAX_HW_HASH_SIZE 0xFFFF
27 * /---------------------------------------\
28 * | | request complete
30 * IDLE -> new request -> BUSY -> done -> DEQUEUE
32 * | | more scatter entries
42 * struct req_progress - used for every crypt request
43 * @src_sg_it: sg iterator for src
44 * @dst_sg_it: sg iterator for dst
45 * @sg_src_left: bytes left in src to process (scatter list)
46 * @src_start: offset to add to src start position (scatter list)
47 * @crypt_len: length of current hw crypt/hash process
48 * @hw_nbytes: total bytes to process in hw for this request
49 * @copy_back: whether to copy data back (crypt) or not (hash)
50 * @sg_dst_left: bytes left dst to process in this scatter list
51 * @dst_start: offset to add to dst start position (scatter list)
52 * @hw_processed_bytes: number of bytes processed by hw (request).
54 * sg helper are used to iterate over the scatterlist. Since the size of the
55 * SRAM may be less than the scatter size, this struct struct is used to keep
56 * track of progress within current scatterlist.
59 struct sg_mapping_iter src_sg_it;
60 struct sg_mapping_iter dst_sg_it;
61 void (*complete) (void);
62 void (*process) (int is_first);
73 int hw_processed_bytes;
80 struct task_struct *queue_th;
82 /* the lock protects queue and eng_st */
84 struct crypto_queue queue;
85 enum engine_status eng_st;
86 struct crypto_async_request *cur_req;
87 struct req_progress p;
94 static struct crypto_priv *cpg;
97 u8 aes_enc_key[AES_KEY_LEN];
100 u32 need_calc_aes_dkey;
118 struct mv_tfm_hash_ctx {
119 struct crypto_shash *fallback;
120 struct crypto_shash *base_hash;
121 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
126 struct mv_req_hash_ctx {
128 u32 state[SHA1_DIGEST_SIZE / 4];
129 u8 buffer[SHA1_BLOCK_SIZE];
130 int first_hash; /* marks that we don't have previous state */
131 int last_chunk; /* marks that this is the 'final' request */
132 int extra_bytes; /* unprocessed bytes in buffer */
135 struct scatterlist dummysg;
138 static void compute_aes_dec_key(struct mv_ctx *ctx)
140 struct crypto_aes_ctx gen_aes_key;
143 if (!ctx->need_calc_aes_dkey)
146 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
148 key_pos = ctx->key_len + 24;
149 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
150 switch (ctx->key_len) {
151 case AES_KEYSIZE_256:
154 case AES_KEYSIZE_192:
156 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
160 ctx->need_calc_aes_dkey = 0;
163 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
166 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
167 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
170 case AES_KEYSIZE_128:
171 case AES_KEYSIZE_192:
172 case AES_KEYSIZE_256:
175 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
179 ctx->need_calc_aes_dkey = 1;
181 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
185 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
192 if (!p->sg_src_left) {
193 ret = sg_miter_next(&p->src_sg_it);
195 p->sg_src_left = p->src_sg_it.length;
199 sbuf = p->src_sg_it.addr + p->src_start;
201 if (p->sg_src_left <= len - copied) {
202 memcpy(dbuf + copied, sbuf, p->sg_src_left);
203 copied += p->sg_src_left;
208 int copy_len = len - copied;
209 memcpy(dbuf + copied, sbuf, copy_len);
210 p->src_start += copy_len;
211 p->sg_src_left -= copy_len;
217 static void setup_data_in(void)
219 struct req_progress *p = &cpg->p;
221 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
222 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
223 data_in_sram - p->crypt_len);
224 p->crypt_len = data_in_sram;
227 static void mv_process_current_q(int first_block)
229 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
230 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232 struct sec_accel_config op;
234 switch (req_ctx->op) {
236 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
240 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
241 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
242 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
244 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
247 if (req_ctx->decrypt) {
248 op.config |= CFG_DIR_DEC;
249 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
252 op.config |= CFG_DIR_ENC;
253 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
257 switch (ctx->key_len) {
258 case AES_KEYSIZE_128:
259 op.config |= CFG_AES_LEN_128;
261 case AES_KEYSIZE_192:
262 op.config |= CFG_AES_LEN_192;
264 case AES_KEYSIZE_256:
265 op.config |= CFG_AES_LEN_256;
268 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
269 ENC_P_DST(SRAM_DATA_OUT_START);
270 op.enc_key_p = SRAM_DATA_KEY_P;
273 op.enc_len = cpg->p.crypt_len;
274 memcpy(cpg->sram + SRAM_CONFIG, &op,
275 sizeof(struct sec_accel_config));
277 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
279 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
282 * XXX: add timer if the interrupt does not occur for some mystery
287 static void mv_crypto_algo_completion(void)
289 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
290 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
292 sg_miter_stop(&cpg->p.src_sg_it);
293 sg_miter_stop(&cpg->p.dst_sg_it);
295 if (req_ctx->op != COP_AES_CBC)
298 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
301 static void mv_process_hash_current(int first_block)
303 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
304 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
305 struct req_progress *p = &cpg->p;
306 struct sec_accel_config op = { 0 };
309 switch (req_ctx->op) {
312 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
315 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
320 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
327 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
329 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
330 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
332 is_last = req_ctx->last_chunk
333 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
334 && (req_ctx->count <= MAX_HW_HASH_SIZE);
335 if (req_ctx->first_hash) {
337 op.config |= CFG_NOT_FRAG;
339 op.config |= CFG_FIRST_FRAG;
341 req_ctx->first_hash = 0;
344 op.config |= CFG_LAST_FRAG;
346 op.config |= CFG_MID_FRAG;
349 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
351 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
353 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
356 * XXX: add timer if the interrupt does not occur for some mystery
361 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
362 struct shash_desc *desc)
365 struct sha1_state shash_state;
367 shash_state.count = ctx->count + ctx->count_add;
368 for (i = 0; i < 5; i++)
369 shash_state.state[i] = ctx->state[i];
370 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
371 return crypto_shash_import(desc, &shash_state);
374 static int mv_hash_final_fallback(struct ahash_request *req)
376 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
377 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
379 struct shash_desc shash;
380 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
384 desc.shash.tfm = tfm_ctx->fallback;
385 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
386 if (unlikely(req_ctx->first_hash)) {
387 crypto_shash_init(&desc.shash);
388 crypto_shash_update(&desc.shash, req_ctx->buffer,
389 req_ctx->extra_bytes);
391 /* only SHA1 for now....
393 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
397 rc = crypto_shash_final(&desc.shash, req->result);
402 static void mv_hash_algo_completion(void)
404 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
405 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
407 if (ctx->extra_bytes)
408 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
409 sg_miter_stop(&cpg->p.src_sg_it);
411 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
412 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
413 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
414 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
415 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
417 if (likely(ctx->last_chunk)) {
418 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
419 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
420 crypto_ahash_digestsize(crypto_ahash_reqtfm
423 mv_hash_final_fallback(req);
427 static void dequeue_complete_req(void)
429 struct crypto_async_request *req = cpg->cur_req;
432 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
433 if (cpg->p.copy_back) {
434 int need_copy_len = cpg->p.crypt_len;
439 if (!cpg->p.sg_dst_left) {
440 ret = sg_miter_next(&cpg->p.dst_sg_it);
442 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
443 cpg->p.dst_start = 0;
446 buf = cpg->p.dst_sg_it.addr;
447 buf += cpg->p.dst_start;
449 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
452 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
454 sram_offset += dst_copy;
455 cpg->p.sg_dst_left -= dst_copy;
456 need_copy_len -= dst_copy;
457 cpg->p.dst_start += dst_copy;
458 } while (need_copy_len > 0);
461 cpg->p.crypt_len = 0;
463 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
464 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
465 /* process next scatter list entry */
466 cpg->eng_st = ENGINE_BUSY;
470 cpg->eng_st = ENGINE_IDLE;
472 req->complete(req, 0);
477 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
483 cur_len = sl[i].length;
485 if (total_bytes > cur_len)
486 total_bytes -= cur_len;
494 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
496 struct req_progress *p = &cpg->p;
499 cpg->cur_req = &req->base;
500 memset(p, 0, sizeof(struct req_progress));
501 p->hw_nbytes = req->nbytes;
502 p->complete = mv_crypto_algo_completion;
503 p->process = mv_process_current_q;
506 num_sgs = count_sgs(req->src, req->nbytes);
507 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
509 num_sgs = count_sgs(req->dst, req->nbytes);
510 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
512 mv_process_current_q(1);
515 static void mv_start_new_hash_req(struct ahash_request *req)
517 struct req_progress *p = &cpg->p;
518 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
519 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
520 int num_sgs, hw_bytes, old_extra_bytes, rc;
521 cpg->cur_req = &req->base;
522 memset(p, 0, sizeof(struct req_progress));
523 hw_bytes = req->nbytes + ctx->extra_bytes;
524 old_extra_bytes = ctx->extra_bytes;
526 if (unlikely(ctx->extra_bytes)) {
527 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
529 p->crypt_len = ctx->extra_bytes;
532 memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
534 if (unlikely(!ctx->first_hash)) {
535 writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
536 writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
537 writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
538 writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
539 writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
542 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
543 if (ctx->extra_bytes != 0
544 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
545 hw_bytes -= ctx->extra_bytes;
547 ctx->extra_bytes = 0;
549 num_sgs = count_sgs(req->src, req->nbytes);
550 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
553 p->hw_nbytes = hw_bytes;
554 p->complete = mv_hash_algo_completion;
555 p->process = mv_process_hash_current;
557 mv_process_hash_current(1);
559 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
560 ctx->extra_bytes - old_extra_bytes);
561 sg_miter_stop(&p->src_sg_it);
563 rc = mv_hash_final_fallback(req);
566 cpg->eng_st = ENGINE_IDLE;
568 req->base.complete(&req->base, rc);
573 static int queue_manag(void *data)
575 cpg->eng_st = ENGINE_IDLE;
577 struct crypto_async_request *async_req = NULL;
578 struct crypto_async_request *backlog;
580 __set_current_state(TASK_INTERRUPTIBLE);
582 if (cpg->eng_st == ENGINE_W_DEQUEUE)
583 dequeue_complete_req();
585 spin_lock_irq(&cpg->lock);
586 if (cpg->eng_st == ENGINE_IDLE) {
587 backlog = crypto_get_backlog(&cpg->queue);
588 async_req = crypto_dequeue_request(&cpg->queue);
590 BUG_ON(cpg->eng_st != ENGINE_IDLE);
591 cpg->eng_st = ENGINE_BUSY;
594 spin_unlock_irq(&cpg->lock);
597 backlog->complete(backlog, -EINPROGRESS);
602 if (async_req->tfm->__crt_alg->cra_type !=
603 &crypto_ahash_type) {
604 struct ablkcipher_request *req =
605 container_of(async_req,
606 struct ablkcipher_request,
608 mv_start_new_crypt_req(req);
610 struct ahash_request *req =
611 ahash_request_cast(async_req);
612 mv_start_new_hash_req(req);
619 } while (!kthread_should_stop());
623 static int mv_handle_req(struct crypto_async_request *req)
628 spin_lock_irqsave(&cpg->lock, flags);
629 ret = crypto_enqueue_request(&cpg->queue, req);
630 spin_unlock_irqrestore(&cpg->lock, flags);
631 wake_up_process(cpg->queue_th);
635 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
637 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
639 req_ctx->op = COP_AES_ECB;
640 req_ctx->decrypt = 0;
642 return mv_handle_req(&req->base);
645 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
647 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
648 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
650 req_ctx->op = COP_AES_ECB;
651 req_ctx->decrypt = 1;
653 compute_aes_dec_key(ctx);
654 return mv_handle_req(&req->base);
657 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
659 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
661 req_ctx->op = COP_AES_CBC;
662 req_ctx->decrypt = 0;
664 return mv_handle_req(&req->base);
667 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
669 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
670 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
672 req_ctx->op = COP_AES_CBC;
673 req_ctx->decrypt = 1;
675 compute_aes_dec_key(ctx);
676 return mv_handle_req(&req->base);
679 static int mv_cra_init(struct crypto_tfm *tfm)
681 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
685 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
686 int is_last, unsigned int req_len,
689 memset(ctx, 0, sizeof(*ctx));
691 ctx->count = req_len;
693 ctx->last_chunk = is_last;
694 ctx->count_add = count_add;
697 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
700 ctx->last_chunk = is_last;
701 ctx->count += req_len;
704 static int mv_hash_init(struct ahash_request *req)
706 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
707 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
712 static int mv_hash_update(struct ahash_request *req)
717 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
718 return mv_handle_req(&req->base);
721 static int mv_hash_final(struct ahash_request *req)
723 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
724 /* dummy buffer of 4 bytes */
725 sg_init_one(&ctx->dummysg, ctx->buffer, 4);
726 /* I think I'm allowed to do that... */
727 ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
728 mv_update_hash_req_ctx(ctx, 1, 0);
729 return mv_handle_req(&req->base);
732 static int mv_hash_finup(struct ahash_request *req)
735 return mv_hash_final(req);
737 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
738 return mv_handle_req(&req->base);
741 static int mv_hash_digest(struct ahash_request *req)
743 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
744 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
745 req->nbytes, tfm_ctx->count_add);
746 return mv_handle_req(&req->base);
749 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
752 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
754 for (i = 0; i < 5; i++) {
755 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
756 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
760 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
764 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
770 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
774 /* Can't see a way to extract the ipad/opad from the fallback tfm
775 so I'm basically copying code from the hmac module */
776 bs = crypto_shash_blocksize(ctx->base_hash);
777 ds = crypto_shash_digestsize(ctx->base_hash);
778 ss = crypto_shash_statesize(ctx->base_hash);
782 struct shash_desc shash;
783 char ctx[crypto_shash_descsize(ctx->base_hash)];
789 desc.shash.tfm = ctx->base_hash;
790 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
791 CRYPTO_TFM_REQ_MAY_SLEEP;
797 crypto_shash_digest(&desc.shash, key, keylen, ipad);
803 memcpy(ipad, key, keylen);
805 memset(ipad + keylen, 0, bs - keylen);
806 memcpy(opad, ipad, bs);
808 for (i = 0; i < bs; i++) {
813 rc = crypto_shash_init(&desc.shash) ? :
814 crypto_shash_update(&desc.shash, ipad, bs) ? :
815 crypto_shash_export(&desc.shash, ipad) ? :
816 crypto_shash_init(&desc.shash) ? :
817 crypto_shash_update(&desc.shash, opad, bs) ? :
818 crypto_shash_export(&desc.shash, opad);
821 mv_hash_init_ivs(ctx, ipad, opad);
827 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
828 enum hash_op op, int count_add)
830 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
831 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
832 struct crypto_shash *fallback_tfm = NULL;
833 struct crypto_shash *base_hash = NULL;
837 ctx->count_add = count_add;
839 /* Allocate a fallback and abort if it failed. */
840 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
841 CRYPTO_ALG_NEED_FALLBACK);
842 if (IS_ERR(fallback_tfm)) {
843 printk(KERN_WARNING MV_CESA
844 "Fallback driver '%s' could not be loaded!\n",
845 fallback_driver_name);
846 err = PTR_ERR(fallback_tfm);
849 ctx->fallback = fallback_tfm;
851 if (base_hash_name) {
852 /* Allocate a hash to compute the ipad/opad of hmac. */
853 base_hash = crypto_alloc_shash(base_hash_name, 0,
854 CRYPTO_ALG_NEED_FALLBACK);
855 if (IS_ERR(base_hash)) {
856 printk(KERN_WARNING MV_CESA
857 "Base driver '%s' could not be loaded!\n",
859 err = PTR_ERR(fallback_tfm);
863 ctx->base_hash = base_hash;
865 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
866 sizeof(struct mv_req_hash_ctx) +
867 crypto_shash_descsize(ctx->fallback));
870 crypto_free_shash(fallback_tfm);
875 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
877 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
879 crypto_free_shash(ctx->fallback);
881 crypto_free_shash(ctx->base_hash);
884 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
886 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
889 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
891 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
894 irqreturn_t crypto_int(int irq, void *priv)
898 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
899 if (!(val & SEC_INT_ACCEL0_DONE))
902 val &= ~SEC_INT_ACCEL0_DONE;
903 writel(val, cpg->reg + FPGA_INT_STATUS);
904 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
905 BUG_ON(cpg->eng_st != ENGINE_BUSY);
906 cpg->eng_st = ENGINE_W_DEQUEUE;
907 wake_up_process(cpg->queue_th);
911 struct crypto_alg mv_aes_alg_ecb = {
912 .cra_name = "ecb(aes)",
913 .cra_driver_name = "mv-ecb-aes",
915 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
917 .cra_ctxsize = sizeof(struct mv_ctx),
919 .cra_type = &crypto_ablkcipher_type,
920 .cra_module = THIS_MODULE,
921 .cra_init = mv_cra_init,
924 .min_keysize = AES_MIN_KEY_SIZE,
925 .max_keysize = AES_MAX_KEY_SIZE,
926 .setkey = mv_setkey_aes,
927 .encrypt = mv_enc_aes_ecb,
928 .decrypt = mv_dec_aes_ecb,
933 struct crypto_alg mv_aes_alg_cbc = {
934 .cra_name = "cbc(aes)",
935 .cra_driver_name = "mv-cbc-aes",
937 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
938 .cra_blocksize = AES_BLOCK_SIZE,
939 .cra_ctxsize = sizeof(struct mv_ctx),
941 .cra_type = &crypto_ablkcipher_type,
942 .cra_module = THIS_MODULE,
943 .cra_init = mv_cra_init,
946 .ivsize = AES_BLOCK_SIZE,
947 .min_keysize = AES_MIN_KEY_SIZE,
948 .max_keysize = AES_MAX_KEY_SIZE,
949 .setkey = mv_setkey_aes,
950 .encrypt = mv_enc_aes_cbc,
951 .decrypt = mv_dec_aes_cbc,
956 struct ahash_alg mv_sha1_alg = {
957 .init = mv_hash_init,
958 .update = mv_hash_update,
959 .final = mv_hash_final,
960 .finup = mv_hash_finup,
961 .digest = mv_hash_digest,
963 .digestsize = SHA1_DIGEST_SIZE,
966 .cra_driver_name = "mv-sha1",
969 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
970 .cra_blocksize = SHA1_BLOCK_SIZE,
971 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
972 .cra_init = mv_cra_hash_sha1_init,
973 .cra_exit = mv_cra_hash_exit,
974 .cra_module = THIS_MODULE,
979 struct ahash_alg mv_hmac_sha1_alg = {
980 .init = mv_hash_init,
981 .update = mv_hash_update,
982 .final = mv_hash_final,
983 .finup = mv_hash_finup,
984 .digest = mv_hash_digest,
985 .setkey = mv_hash_setkey,
987 .digestsize = SHA1_DIGEST_SIZE,
989 .cra_name = "hmac(sha1)",
990 .cra_driver_name = "mv-hmac-sha1",
993 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
994 .cra_blocksize = SHA1_BLOCK_SIZE,
995 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
996 .cra_init = mv_cra_hash_hmac_sha1_init,
997 .cra_exit = mv_cra_hash_exit,
998 .cra_module = THIS_MODULE,
1003 static int mv_probe(struct platform_device *pdev)
1005 struct crypto_priv *cp;
1006 struct resource *res;
1011 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1019 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1023 spin_lock_init(&cp->lock);
1024 crypto_init_queue(&cp->queue, 50);
1025 cp->reg = ioremap(res->start, res->end - res->start + 1);
1031 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1036 cp->sram_size = res->end - res->start + 1;
1037 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1038 cp->sram = ioremap(res->start, cp->sram_size);
1044 irq = platform_get_irq(pdev, 0);
1045 if (irq < 0 || irq == NO_IRQ) {
1047 goto err_unmap_sram;
1051 platform_set_drvdata(pdev, cp);
1054 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1055 if (IS_ERR(cp->queue_th)) {
1056 ret = PTR_ERR(cp->queue_th);
1060 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1063 goto err_unmap_sram;
1065 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1066 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1068 ret = crypto_register_alg(&mv_aes_alg_ecb);
1072 ret = crypto_register_alg(&mv_aes_alg_cbc);
1076 ret = crypto_register_ahash(&mv_sha1_alg);
1080 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1082 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1084 cpg->has_hmac_sha1 = 1;
1086 printk(KERN_WARNING MV_CESA
1087 "Could not register hmac-sha1 driver\n");
1092 crypto_unregister_alg(&mv_aes_alg_ecb);
1096 kthread_stop(cp->queue_th);
1104 platform_set_drvdata(pdev, NULL);
1108 static int mv_remove(struct platform_device *pdev)
1110 struct crypto_priv *cp = platform_get_drvdata(pdev);
1112 crypto_unregister_alg(&mv_aes_alg_ecb);
1113 crypto_unregister_alg(&mv_aes_alg_cbc);
1115 crypto_unregister_ahash(&mv_sha1_alg);
1116 if (cp->has_hmac_sha1)
1117 crypto_unregister_ahash(&mv_hmac_sha1_alg);
1118 kthread_stop(cp->queue_th);
1119 free_irq(cp->irq, cp);
1120 memset(cp->sram, 0, cp->sram_size);
1128 static struct platform_driver marvell_crypto = {
1130 .remove = mv_remove,
1132 .owner = THIS_MODULE,
1133 .name = "mv_crypto",
1136 MODULE_ALIAS("platform:mv_crypto");
1138 static int __init mv_crypto_init(void)
1140 return platform_driver_register(&marvell_crypto);
1142 module_init(mv_crypto_init);
1144 static void __exit mv_crypto_exit(void)
1146 platform_driver_unregister(&marvell_crypto);
1148 module_exit(mv_crypto_exit);
1150 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1151 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1152 MODULE_LICENSE("GPL");