Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / mv_cesa.c
index 6f29012bcc434dbcc1101782072678991d53a509..18a436cafc107f1004cb4686e7ca8946d2f8f2d2 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
 
 #include "mv_cesa.h"
+
+#define MV_CESA        "MV-CESA:"
+#define MAX_HW_HASH_SIZE       0xFFFF
+
 /*
  * STM:
  *   /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
  * @dst_sg_it:         sg iterator for dst
  * @sg_src_left:       bytes left in src to process (scatter list)
  * @src_start:         offset to add to src start position (scatter list)
- * @crypt_len:         length of current crypt process
+ * @crypt_len:         length of current hw crypt/hash process
+ * @hw_nbytes:         total bytes to process in hw for this request
+ * @copy_back:         whether to copy data back (crypt) or not (hash)
  * @sg_dst_left:       bytes left dst to process in this scatter list
  * @dst_start:         offset to add to dst start position (scatter list)
- * @total_req_bytes:   total number of bytes processed (request).
+ * @hw_processed_bytes:        number of bytes processed by hw (request).
  *
  * sg helper are used to iterate over the scatterlist. Since the size of the
  * SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
 struct req_progress {
        struct sg_mapping_iter src_sg_it;
        struct sg_mapping_iter dst_sg_it;
+       void (*complete) (void);
+       void (*process) (int is_first);
 
        /* src mostly */
        int sg_src_left;
        int src_start;
        int crypt_len;
+       int hw_nbytes;
        /* dst mostly */
+       int copy_back;
        int sg_dst_left;
        int dst_start;
-       int total_req_bytes;
+       int hw_processed_bytes;
 };
 
 struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
        spinlock_t lock;
        struct crypto_queue queue;
        enum engine_status eng_st;
-       struct ablkcipher_request *cur_req;
+       struct crypto_async_request *cur_req;
        struct req_progress p;
        int max_req_size;
        int sram_size;
+       int has_sha1;
+       int has_hmac_sha1;
 };
 
 static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
        int decrypt;
 };
 
+enum hash_op {
+       COP_SHA1,
+       COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+       struct crypto_shash *fallback;
+       struct crypto_shash *base_hash;
+       u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+       int count_add;
+       enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+       u64 count;
+       u32 state[SHA1_DIGEST_SIZE / 4];
+       u8 buffer[SHA1_BLOCK_SIZE];
+       int first_hash;         /* marks that we don't have previous state */
+       int last_chunk;         /* marks that this is the 'final' request */
+       int extra_bytes;        /* unprocessed bytes in buffer */
+       enum hash_op op;
+       int count_add;
+       struct scatterlist dummysg;
+};
+
 static void compute_aes_dec_key(struct mv_ctx *ctx)
 {
        struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
        return 0;
 }
 
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
        int ret;
-       void *buf;
+       void *sbuf;
+       int copied = 0;
 
-       if (!cpg->p.sg_src_left) {
-               ret = sg_miter_next(&cpg->p.src_sg_it);
-               BUG_ON(!ret);
-               cpg->p.sg_src_left = cpg->p.src_sg_it.length;
-               cpg->p.src_start = 0;
-       }
-
-       cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
-       buf = cpg->p.src_sg_it.addr;
-       buf += cpg->p.src_start;
+       while (1) {
+               if (!p->sg_src_left) {
+                       ret = sg_miter_next(&p->src_sg_it);
+                       BUG_ON(!ret);
+                       p->sg_src_left = p->src_sg_it.length;
+                       p->src_start = 0;
+               }
 
-       memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+               sbuf = p->src_sg_it.addr + p->src_start;
+
+               if (p->sg_src_left <= len - copied) {
+                       memcpy(dbuf + copied, sbuf, p->sg_src_left);
+                       copied += p->sg_src_left;
+                       p->sg_src_left = 0;
+                       if (copied >= len)
+                               break;
+               } else {
+                       int copy_len = len - copied;
+                       memcpy(dbuf + copied, sbuf, copy_len);
+                       p->src_start += copy_len;
+                       p->sg_src_left -= copy_len;
+                       break;
+               }
+       }
+}
 
-       cpg->p.sg_src_left -= cpg->p.crypt_len;
-       cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+       struct req_progress *p = &cpg->p;
+       int data_in_sram =
+           min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+       copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+                       data_in_sram - p->crypt_len);
+       p->crypt_len = data_in_sram;
 }
 
 static void mv_process_current_q(int first_block)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
        struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
                break;
        case COP_AES_CBC:
+       default:
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
                op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
                        ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
                ENC_P_DST(SRAM_DATA_OUT_START);
        op.enc_key_p = SRAM_DATA_KEY_P;
 
-       setup_data_in(req);
+       setup_data_in();
        op.enc_len = cpg->p.crypt_len;
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
 
 static void mv_crypto_algo_completion(void)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 
+       sg_miter_stop(&cpg->p.src_sg_it);
+       sg_miter_stop(&cpg->p.dst_sg_it);
+
        if (req_ctx->op != COP_AES_CBC)
                return ;
 
        memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
 }
 
+static void mv_process_hash_current(int first_block)
+{
+       struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+       struct req_progress *p = &cpg->p;
+       struct sec_accel_config op = { 0 };
+       int is_last;
+
+       switch (req_ctx->op) {
+       case COP_SHA1:
+       default:
+               op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+               break;
+       case COP_HMAC_SHA1:
+               op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+               break;
+       }
+
+       op.mac_src_p =
+               MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+               req_ctx->
+               count);
+
+       setup_data_in();
+
+       op.mac_digest =
+               MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+       op.mac_iv =
+               MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+               MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+       is_last = req_ctx->last_chunk
+               && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+               && (req_ctx->count <= MAX_HW_HASH_SIZE);
+       if (req_ctx->first_hash) {
+               if (is_last)
+                       op.config |= CFG_NOT_FRAG;
+               else
+                       op.config |= CFG_FIRST_FRAG;
+
+               req_ctx->first_hash = 0;
+       } else {
+               if (is_last)
+                       op.config |= CFG_LAST_FRAG;
+               else
+                       op.config |= CFG_MID_FRAG;
+       }
+
+       memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+       /* GO */
+       writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+       /*
+       * XXX: add timer if the interrupt does not occur for some mystery
+       * reason
+       */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+                                         struct shash_desc *desc)
+{
+       int i;
+       struct sha1_state shash_state;
+
+       shash_state.count = ctx->count + ctx->count_add;
+       for (i = 0; i < 5; i++)
+               shash_state.state[i] = ctx->state[i];
+       memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+       return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+       } desc;
+       int rc;
+
+       desc.shash.tfm = tfm_ctx->fallback;
+       desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       if (unlikely(req_ctx->first_hash)) {
+               crypto_shash_init(&desc.shash);
+               crypto_shash_update(&desc.shash, req_ctx->buffer,
+                                   req_ctx->extra_bytes);
+       } else {
+               /* only SHA1 for now....
+                */
+               rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+               if (rc)
+                       goto out;
+       }
+       rc = crypto_shash_final(&desc.shash, req->result);
+out:
+       return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+       struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+       if (ctx->extra_bytes)
+               copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+       sg_miter_stop(&cpg->p.src_sg_it);
+
+       ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+       ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+       ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+       ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+       ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+       if (likely(ctx->last_chunk)) {
+               if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+                       memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+                              crypto_ahash_digestsize(crypto_ahash_reqtfm
+                                                      (req)));
+               } else
+                       mv_hash_final_fallback(req);
+       }
+}
+
 static void dequeue_complete_req(void)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct crypto_async_request *req = cpg->cur_req;
        void *buf;
        int ret;
+       cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+       if (cpg->p.copy_back) {
+               int need_copy_len = cpg->p.crypt_len;
+               int sram_offset = 0;
+               do {
+                       int dst_copy;
+
+                       if (!cpg->p.sg_dst_left) {
+                               ret = sg_miter_next(&cpg->p.dst_sg_it);
+                               BUG_ON(!ret);
+                               cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+                               cpg->p.dst_start = 0;
+                       }
 
-       cpg->p.total_req_bytes += cpg->p.crypt_len;
-       do {
-               int dst_copy;
-
-               if (!cpg->p.sg_dst_left) {
-                       ret = sg_miter_next(&cpg->p.dst_sg_it);
-                       BUG_ON(!ret);
-                       cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
-                       cpg->p.dst_start = 0;
-               }
-
-               buf = cpg->p.dst_sg_it.addr;
-               buf += cpg->p.dst_start;
+                       buf = cpg->p.dst_sg_it.addr;
+                       buf += cpg->p.dst_start;
 
-               dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+                       dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
 
-               memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+                       memcpy(buf,
+                              cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+                              dst_copy);
+                       sram_offset += dst_copy;
+                       cpg->p.sg_dst_left -= dst_copy;
+                       need_copy_len -= dst_copy;
+                       cpg->p.dst_start += dst_copy;
+               } while (need_copy_len > 0);
+       }
 
-               cpg->p.sg_dst_left -= dst_copy;
-               cpg->p.crypt_len -= dst_copy;
-               cpg->p.dst_start += dst_copy;
-       } while (cpg->p.crypt_len > 0);
+       cpg->p.crypt_len = 0;
 
        BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
-       if (cpg->p.total_req_bytes < req->nbytes) {
+       if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
                /* process next scatter list entry */
                cpg->eng_st = ENGINE_BUSY;
-               mv_process_current_q(0);
+               cpg->p.process(0);
        } else {
-               sg_miter_stop(&cpg->p.src_sg_it);
-               sg_miter_stop(&cpg->p.dst_sg_it);
-               mv_crypto_algo_completion();
+               cpg->p.complete();
                cpg->eng_st = ENGINE_IDLE;
-               req->base.complete(&req->base, 0);
+               local_bh_disable();
+               req->complete(req, 0);
+               local_bh_enable();
        }
 }
 
 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
 {
        int i = 0;
-
-       do {
-               total_bytes -= sl[i].length;
-               i++;
-
-       } while (total_bytes > 0);
+       size_t cur_len;
+
+       while (1) {
+               cur_len = sl[i].length;
+               ++i;
+               if (total_bytes > cur_len)
+                       total_bytes -= cur_len;
+               else
+                       break;
+       }
 
        return i;
 }
 
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
 {
+       struct req_progress *p = &cpg->p;
        int num_sgs;
 
-       cpg->cur_req = req;
-       memset(&cpg->p, 0, sizeof(struct req_progress));
+       cpg->cur_req = &req->base;
+       memset(p, 0, sizeof(struct req_progress));
+       p->hw_nbytes = req->nbytes;
+       p->complete = mv_crypto_algo_completion;
+       p->process = mv_process_current_q;
+       p->copy_back = 1;
 
        num_sgs = count_sgs(req->src, req->nbytes);
-       sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+       sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 
        num_sgs = count_sgs(req->dst, req->nbytes);
-       sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+       sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
        mv_process_current_q(1);
 }
 
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+       struct req_progress *p = &cpg->p;
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       int num_sgs, hw_bytes, old_extra_bytes, rc;
+       cpg->cur_req = &req->base;
+       memset(p, 0, sizeof(struct req_progress));
+       hw_bytes = req->nbytes + ctx->extra_bytes;
+       old_extra_bytes = ctx->extra_bytes;
+
+       if (unlikely(ctx->extra_bytes)) {
+               memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+                      ctx->extra_bytes);
+               p->crypt_len = ctx->extra_bytes;
+       }
+
+       memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+       if (unlikely(!ctx->first_hash)) {
+               writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+               writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+               writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+               writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+               writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+       }
+
+       ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+       if (ctx->extra_bytes != 0
+           && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+               hw_bytes -= ctx->extra_bytes;
+       else
+               ctx->extra_bytes = 0;
+
+       num_sgs = count_sgs(req->src, req->nbytes);
+       sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+       if (hw_bytes) {
+               p->hw_nbytes = hw_bytes;
+               p->complete = mv_hash_algo_completion;
+               p->process = mv_process_hash_current;
+
+               mv_process_hash_current(1);
+       } else {
+               copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+                               ctx->extra_bytes - old_extra_bytes);
+               sg_miter_stop(&p->src_sg_it);
+               if (ctx->last_chunk)
+                       rc = mv_hash_final_fallback(req);
+               else
+                       rc = 0;
+               cpg->eng_st = ENGINE_IDLE;
+               local_bh_disable();
+               req->base.complete(&req->base, rc);
+               local_bh_enable();
+       }
+}
+
 static int queue_manag(void *data)
 {
        cpg->eng_st = ENGINE_IDLE;
        do {
-               struct ablkcipher_request *req;
                struct crypto_async_request *async_req = NULL;
                struct crypto_async_request *backlog;
 
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
                }
 
                if (async_req) {
-                       req = container_of(async_req,
-                                       struct ablkcipher_request, base);
-                       mv_enqueue_new_req(req);
+                       if (async_req->tfm->__crt_alg->cra_type !=
+                           &crypto_ahash_type) {
+                               struct ablkcipher_request *req =
+                                   container_of(async_req,
+                                                struct ablkcipher_request,
+                                                base);
+                               mv_start_new_crypt_req(req);
+                       } else {
+                               struct ahash_request *req =
+                                   ahash_request_cast(async_req);
+                               mv_start_new_hash_req(req);
+                       }
                        async_req = NULL;
                }
 
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
        return 0;
 }
 
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
 {
        unsigned long flags;
        int ret;
 
        spin_lock_irqsave(&cpg->lock, flags);
-       ret = ablkcipher_enqueue_request(&cpg->queue, req);
+       ret = crypto_enqueue_request(&cpg->queue, req);
        spin_unlock_irqrestore(&cpg->lock, flags);
        wake_up_process(cpg->queue_th);
        return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
        req_ctx->op = COP_AES_ECB;
        req_ctx->decrypt = 0;
 
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
 
        compute_aes_dec_key(ctx);
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
        req_ctx->op = COP_AES_CBC;
        req_ctx->decrypt = 0;
 
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
 
        compute_aes_dec_key(ctx);
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+                                int is_last, unsigned int req_len,
+                                int count_add)
+{
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->op = op;
+       ctx->count = req_len;
+       ctx->first_hash = 1;
+       ctx->last_chunk = is_last;
+       ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+                                  unsigned req_len)
+{
+       ctx->last_chunk = is_last;
+       ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+                            tfm_ctx->count_add);
+       return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+       if (!req->nbytes)
+               return 0;
+
+       mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+       /* dummy buffer of 4 bytes */
+       sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+       /* I think I'm allowed to do that... */
+       ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+       mv_update_hash_req_ctx(ctx, 1, 0);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+       if (!req->nbytes)
+               return mv_hash_final(req);
+
+       mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+                            req->nbytes, tfm_ctx->count_add);
+       return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+                            const void *ostate)
+{
+       const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+       int i;
+       for (i = 0; i < 5; i++) {
+               ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+               ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+       }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+                         unsigned int keylen)
+{
+       int rc;
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+       int bs, ds, ss;
+
+       if (!ctx->base_hash)
+               return 0;
+
+       rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+       if (rc)
+               return rc;
+
+       /* Can't see a way to extract the ipad/opad from the fallback tfm
+          so I'm basically copying code from the hmac module */
+       bs = crypto_shash_blocksize(ctx->base_hash);
+       ds = crypto_shash_digestsize(ctx->base_hash);
+       ss = crypto_shash_statesize(ctx->base_hash);
+
+       {
+               struct {
+                       struct shash_desc shash;
+                       char ctx[crypto_shash_descsize(ctx->base_hash)];
+               } desc;
+               unsigned int i;
+               char ipad[ss];
+               char opad[ss];
+
+               desc.shash.tfm = ctx->base_hash;
+               desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+                   CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               if (keylen > bs) {
+                       int err;
+
+                       err =
+                           crypto_shash_digest(&desc.shash, key, keylen, ipad);
+                       if (err)
+                               return err;
+
+                       keylen = ds;
+               } else
+                       memcpy(ipad, key, keylen);
+
+               memset(ipad + keylen, 0, bs - keylen);
+               memcpy(opad, ipad, bs);
+
+               for (i = 0; i < bs; i++) {
+                       ipad[i] ^= 0x36;
+                       opad[i] ^= 0x5c;
+               }
+
+               rc = crypto_shash_init(&desc.shash) ? :
+                   crypto_shash_update(&desc.shash, ipad, bs) ? :
+                   crypto_shash_export(&desc.shash, ipad) ? :
+                   crypto_shash_init(&desc.shash) ? :
+                   crypto_shash_update(&desc.shash, opad, bs) ? :
+                   crypto_shash_export(&desc.shash, opad);
+
+               if (rc == 0)
+                       mv_hash_init_ivs(ctx, ipad, opad);
+
+               return rc;
+       }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+                           enum hash_op op, int count_add)
+{
+       const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_shash *fallback_tfm = NULL;
+       struct crypto_shash *base_hash = NULL;
+       int err = -ENOMEM;
+
+       ctx->op = op;
+       ctx->count_add = count_add;
+
+       /* Allocate a fallback and abort if it failed. */
+       fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+                                         CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback_tfm)) {
+               printk(KERN_WARNING MV_CESA
+                      "Fallback driver '%s' could not be loaded!\n",
+                      fallback_driver_name);
+               err = PTR_ERR(fallback_tfm);
+               goto out;
+       }
+       ctx->fallback = fallback_tfm;
+
+       if (base_hash_name) {
+               /* Allocate a hash to compute the ipad/opad of hmac. */
+               base_hash = crypto_alloc_shash(base_hash_name, 0,
+                                              CRYPTO_ALG_NEED_FALLBACK);
+               if (IS_ERR(base_hash)) {
+                       printk(KERN_WARNING MV_CESA
+                              "Base driver '%s' could not be loaded!\n",
+                              base_hash_name);
+                       err = PTR_ERR(fallback_tfm);
+                       goto err_bad_base;
+               }
+       }
+       ctx->base_hash = base_hash;
+
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct mv_req_hash_ctx) +
+                                crypto_shash_descsize(ctx->fallback));
+       return 0;
+err_bad_base:
+       crypto_free_shash(fallback_tfm);
+out:
+       return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_shash(ctx->fallback);
+       if (ctx->base_hash)
+               crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+       return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+       return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
 irqreturn_t crypto_int(int irq, void *priv)
 {
        u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
        },
 };
 
+struct ahash_alg mv_sha1_alg = {
+       .init = mv_hash_init,
+       .update = mv_hash_update,
+       .final = mv_hash_final,
+       .finup = mv_hash_finup,
+       .digest = mv_hash_digest,
+       .halg = {
+                .digestsize = SHA1_DIGEST_SIZE,
+                .base = {
+                         .cra_name = "sha1",
+                         .cra_driver_name = "mv-sha1",
+                         .cra_priority = 300,
+                         .cra_flags =
+                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+                         .cra_blocksize = SHA1_BLOCK_SIZE,
+                         .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+                         .cra_init = mv_cra_hash_sha1_init,
+                         .cra_exit = mv_cra_hash_exit,
+                         .cra_module = THIS_MODULE,
+                         }
+                }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+       .init = mv_hash_init,
+       .update = mv_hash_update,
+       .final = mv_hash_final,
+       .finup = mv_hash_finup,
+       .digest = mv_hash_digest,
+       .setkey = mv_hash_setkey,
+       .halg = {
+                .digestsize = SHA1_DIGEST_SIZE,
+                .base = {
+                         .cra_name = "hmac(sha1)",
+                         .cra_driver_name = "mv-hmac-sha1",
+                         .cra_priority = 300,
+                         .cra_flags =
+                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+                         .cra_blocksize = SHA1_BLOCK_SIZE,
+                         .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+                         .cra_init = mv_cra_hash_hmac_sha1_init,
+                         .cra_exit = mv_cra_hash_exit,
+                         .cra_module = THIS_MODULE,
+                         }
+                }
+};
+
 static int mv_probe(struct platform_device *pdev)
 {
        struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
        int ret;
 
        if (cpg) {
-               printk(KERN_ERR "Second crypto dev?\n");
+               printk(KERN_ERR MV_CESA "Second crypto dev?\n");
                return -EEXIST;
        }
 
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
        ret = crypto_register_alg(&mv_aes_alg_cbc);
        if (ret)
                goto err_unreg_ecb;
+
+       ret = crypto_register_ahash(&mv_sha1_alg);
+       if (ret == 0)
+               cpg->has_sha1 = 1;
+       else
+               printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+       ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+       if (ret == 0) {
+               cpg->has_hmac_sha1 = 1;
+       } else {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register hmac-sha1 driver\n");
+       }
+
        return 0;
 err_unreg_ecb:
        crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
 
        crypto_unregister_alg(&mv_aes_alg_ecb);
        crypto_unregister_alg(&mv_aes_alg_cbc);
+       if (cp->has_sha1)
+               crypto_unregister_ahash(&mv_sha1_alg);
+       if (cp->has_hmac_sha1)
+               crypto_unregister_ahash(&mv_hmac_sha1_alg);
        kthread_stop(cp->queue_th);
        free_irq(cp->irq, cp);
        memset(cp->sram, 0, cp->sram_size);