2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
21 * /---------------------------------------\
22 * | | request complete
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
26 * | | more scatter entries
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @hw_nbytes: total bytes to process in hw for this request
43 * @copy_back: whether to copy data back (crypt) or not (hash)
44 * @sg_dst_left: bytes left dst to process in this scatter list
45 * @dst_start: offset to add to dst start position (scatter list)
46 * @hw_processed_bytes: number of bytes processed by hw (request).
48 * sg helper are used to iterate over the scatterlist. Since the size of the
49 * SRAM may be less than the scatter size, this struct struct is used to keep
50 * track of progress within current scatterlist.
53 struct sg_mapping_iter src_sg_it;
54 struct sg_mapping_iter dst_sg_it;
55 void (*complete) (void);
56 void (*process) (int is_first);
67 int hw_processed_bytes;
74 struct task_struct *queue_th;
76 /* the lock protects queue and eng_st */
78 struct crypto_queue queue;
79 enum engine_status eng_st;
80 struct crypto_async_request *cur_req;
81 struct req_progress p;
86 static struct crypto_priv *cpg;
89 u8 aes_enc_key[AES_KEY_LEN];
92 u32 need_calc_aes_dkey;
105 static void compute_aes_dec_key(struct mv_ctx *ctx)
107 struct crypto_aes_ctx gen_aes_key;
110 if (!ctx->need_calc_aes_dkey)
113 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
115 key_pos = ctx->key_len + 24;
116 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
117 switch (ctx->key_len) {
118 case AES_KEYSIZE_256:
121 case AES_KEYSIZE_192:
123 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
127 ctx->need_calc_aes_dkey = 0;
130 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
133 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
134 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
137 case AES_KEYSIZE_128:
138 case AES_KEYSIZE_192:
139 case AES_KEYSIZE_256:
142 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
146 ctx->need_calc_aes_dkey = 1;
148 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
152 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
159 if (!p->sg_src_left) {
160 ret = sg_miter_next(&p->src_sg_it);
162 p->sg_src_left = p->src_sg_it.length;
166 sbuf = p->src_sg_it.addr + p->src_start;
168 if (p->sg_src_left <= len - copied) {
169 memcpy(dbuf + copied, sbuf, p->sg_src_left);
170 copied += p->sg_src_left;
175 int copy_len = len - copied;
176 memcpy(dbuf + copied, sbuf, copy_len);
177 p->src_start += copy_len;
178 p->sg_src_left -= copy_len;
184 static void setup_data_in(void)
186 struct req_progress *p = &cpg->p;
188 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
189 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
190 data_in_sram - p->crypt_len);
191 p->crypt_len = data_in_sram;
194 static void mv_process_current_q(int first_block)
196 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
197 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
198 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
199 struct sec_accel_config op;
201 switch (req_ctx->op) {
203 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
207 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
208 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
209 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
211 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
214 if (req_ctx->decrypt) {
215 op.config |= CFG_DIR_DEC;
216 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
219 op.config |= CFG_DIR_ENC;
220 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
224 switch (ctx->key_len) {
225 case AES_KEYSIZE_128:
226 op.config |= CFG_AES_LEN_128;
228 case AES_KEYSIZE_192:
229 op.config |= CFG_AES_LEN_192;
231 case AES_KEYSIZE_256:
232 op.config |= CFG_AES_LEN_256;
235 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
236 ENC_P_DST(SRAM_DATA_OUT_START);
237 op.enc_key_p = SRAM_DATA_KEY_P;
240 op.enc_len = cpg->p.crypt_len;
241 memcpy(cpg->sram + SRAM_CONFIG, &op,
242 sizeof(struct sec_accel_config));
244 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
246 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
249 * XXX: add timer if the interrupt does not occur for some mystery
254 static void mv_crypto_algo_completion(void)
256 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
257 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
259 sg_miter_stop(&cpg->p.src_sg_it);
260 sg_miter_stop(&cpg->p.dst_sg_it);
262 if (req_ctx->op != COP_AES_CBC)
265 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
268 static void dequeue_complete_req(void)
270 struct crypto_async_request *req = cpg->cur_req;
273 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
274 if (cpg->p.copy_back) {
275 int need_copy_len = cpg->p.crypt_len;
280 if (!cpg->p.sg_dst_left) {
281 ret = sg_miter_next(&cpg->p.dst_sg_it);
283 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
284 cpg->p.dst_start = 0;
287 buf = cpg->p.dst_sg_it.addr;
288 buf += cpg->p.dst_start;
290 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
293 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
295 sram_offset += dst_copy;
296 cpg->p.sg_dst_left -= dst_copy;
297 need_copy_len -= dst_copy;
298 cpg->p.dst_start += dst_copy;
299 } while (need_copy_len > 0);
302 cpg->p.crypt_len = 0;
304 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
305 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
306 /* process next scatter list entry */
307 cpg->eng_st = ENGINE_BUSY;
311 cpg->eng_st = ENGINE_IDLE;
313 req->complete(req, 0);
318 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
324 cur_len = sl[i].length;
326 if (total_bytes > cur_len)
327 total_bytes -= cur_len;
335 static void mv_enqueue_new_req(struct ablkcipher_request *req)
337 struct req_progress *p = &cpg->p;
340 cpg->cur_req = &req->base;
341 memset(p, 0, sizeof(struct req_progress));
342 p->hw_nbytes = req->nbytes;
343 p->complete = mv_crypto_algo_completion;
344 p->process = mv_process_current_q;
347 num_sgs = count_sgs(req->src, req->nbytes);
348 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
350 num_sgs = count_sgs(req->dst, req->nbytes);
351 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
353 mv_process_current_q(1);
356 static int queue_manag(void *data)
358 cpg->eng_st = ENGINE_IDLE;
360 struct ablkcipher_request *req;
361 struct crypto_async_request *async_req = NULL;
362 struct crypto_async_request *backlog;
364 __set_current_state(TASK_INTERRUPTIBLE);
366 if (cpg->eng_st == ENGINE_W_DEQUEUE)
367 dequeue_complete_req();
369 spin_lock_irq(&cpg->lock);
370 if (cpg->eng_st == ENGINE_IDLE) {
371 backlog = crypto_get_backlog(&cpg->queue);
372 async_req = crypto_dequeue_request(&cpg->queue);
374 BUG_ON(cpg->eng_st != ENGINE_IDLE);
375 cpg->eng_st = ENGINE_BUSY;
378 spin_unlock_irq(&cpg->lock);
381 backlog->complete(backlog, -EINPROGRESS);
386 req = container_of(async_req,
387 struct ablkcipher_request, base);
388 mv_enqueue_new_req(req);
394 } while (!kthread_should_stop());
398 static int mv_handle_req(struct crypto_async_request *req)
403 spin_lock_irqsave(&cpg->lock, flags);
404 ret = crypto_enqueue_request(&cpg->queue, req);
405 spin_unlock_irqrestore(&cpg->lock, flags);
406 wake_up_process(cpg->queue_th);
410 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
412 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
414 req_ctx->op = COP_AES_ECB;
415 req_ctx->decrypt = 0;
417 return mv_handle_req(&req->base);
420 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
422 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
423 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
425 req_ctx->op = COP_AES_ECB;
426 req_ctx->decrypt = 1;
428 compute_aes_dec_key(ctx);
429 return mv_handle_req(&req->base);
432 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
434 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
436 req_ctx->op = COP_AES_CBC;
437 req_ctx->decrypt = 0;
439 return mv_handle_req(&req->base);
442 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
444 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
445 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
447 req_ctx->op = COP_AES_CBC;
448 req_ctx->decrypt = 1;
450 compute_aes_dec_key(ctx);
451 return mv_handle_req(&req->base);
454 static int mv_cra_init(struct crypto_tfm *tfm)
456 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
460 irqreturn_t crypto_int(int irq, void *priv)
464 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
465 if (!(val & SEC_INT_ACCEL0_DONE))
468 val &= ~SEC_INT_ACCEL0_DONE;
469 writel(val, cpg->reg + FPGA_INT_STATUS);
470 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
471 BUG_ON(cpg->eng_st != ENGINE_BUSY);
472 cpg->eng_st = ENGINE_W_DEQUEUE;
473 wake_up_process(cpg->queue_th);
477 struct crypto_alg mv_aes_alg_ecb = {
478 .cra_name = "ecb(aes)",
479 .cra_driver_name = "mv-ecb-aes",
481 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
483 .cra_ctxsize = sizeof(struct mv_ctx),
485 .cra_type = &crypto_ablkcipher_type,
486 .cra_module = THIS_MODULE,
487 .cra_init = mv_cra_init,
490 .min_keysize = AES_MIN_KEY_SIZE,
491 .max_keysize = AES_MAX_KEY_SIZE,
492 .setkey = mv_setkey_aes,
493 .encrypt = mv_enc_aes_ecb,
494 .decrypt = mv_dec_aes_ecb,
499 struct crypto_alg mv_aes_alg_cbc = {
500 .cra_name = "cbc(aes)",
501 .cra_driver_name = "mv-cbc-aes",
503 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
504 .cra_blocksize = AES_BLOCK_SIZE,
505 .cra_ctxsize = sizeof(struct mv_ctx),
507 .cra_type = &crypto_ablkcipher_type,
508 .cra_module = THIS_MODULE,
509 .cra_init = mv_cra_init,
512 .ivsize = AES_BLOCK_SIZE,
513 .min_keysize = AES_MIN_KEY_SIZE,
514 .max_keysize = AES_MAX_KEY_SIZE,
515 .setkey = mv_setkey_aes,
516 .encrypt = mv_enc_aes_cbc,
517 .decrypt = mv_dec_aes_cbc,
522 static int mv_probe(struct platform_device *pdev)
524 struct crypto_priv *cp;
525 struct resource *res;
530 printk(KERN_ERR "Second crypto dev?\n");
534 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
538 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
542 spin_lock_init(&cp->lock);
543 crypto_init_queue(&cp->queue, 50);
544 cp->reg = ioremap(res->start, res->end - res->start + 1);
550 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
555 cp->sram_size = res->end - res->start + 1;
556 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
557 cp->sram = ioremap(res->start, cp->sram_size);
563 irq = platform_get_irq(pdev, 0);
564 if (irq < 0 || irq == NO_IRQ) {
570 platform_set_drvdata(pdev, cp);
573 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
574 if (IS_ERR(cp->queue_th)) {
575 ret = PTR_ERR(cp->queue_th);
579 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
584 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
585 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
587 ret = crypto_register_alg(&mv_aes_alg_ecb);
591 ret = crypto_register_alg(&mv_aes_alg_cbc);
596 crypto_unregister_alg(&mv_aes_alg_ecb);
600 kthread_stop(cp->queue_th);
608 platform_set_drvdata(pdev, NULL);
612 static int mv_remove(struct platform_device *pdev)
614 struct crypto_priv *cp = platform_get_drvdata(pdev);
616 crypto_unregister_alg(&mv_aes_alg_ecb);
617 crypto_unregister_alg(&mv_aes_alg_cbc);
618 kthread_stop(cp->queue_th);
619 free_irq(cp->irq, cp);
620 memset(cp->sram, 0, cp->sram_size);
628 static struct platform_driver marvell_crypto = {
632 .owner = THIS_MODULE,
636 MODULE_ALIAS("platform:mv_crypto");
638 static int __init mv_crypto_init(void)
640 return platform_driver_register(&marvell_crypto);
642 module_init(mv_crypto_init);
644 static void __exit mv_crypto_exit(void)
646 platform_driver_unregister(&marvell_crypto);
648 module_exit(mv_crypto_exit);
650 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
651 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
652 MODULE_LICENSE("GPL");