Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
authorHerbert Xu <herbert@gondor.apana.org.au>
Mon, 3 May 2010 03:28:58 +0000 (11:28 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 3 May 2010 03:28:58 +0000 (11:28 +0800)
1  2 
arch/arm/mach-omap2/clock2420_data.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/devices.c
crypto/algapi.c
crypto/tcrypt.c
drivers/crypto/mv_cesa.c
kernel/padata.c

index fc55ab4c32e3fab3adfa69ae81a5cd1525b44875,d932b142d0b66e1b81dd3329c3491476e9ce8a3e..1820a556361bf1e08d219ed43695783db4b58501
@@@ -1836,11 -1836,12 +1836,12 @@@ static struct omap_clk omap2420_clks[] 
        CLK(NULL,       "vlynq_ick",    &vlynq_ick,     CK_242X),
        CLK(NULL,       "vlynq_fck",    &vlynq_fck,     CK_242X),
        CLK(NULL,       "des_ick",      &des_ick,       CK_242X),
 -      CLK(NULL,       "sha_ick",      &sha_ick,       CK_242X),
 +      CLK("omap-sham",        "ick",  &sha_ick,       CK_242X),
        CLK("omap_rng", "ick",          &rng_ick,       CK_242X),
        CLK(NULL,       "aes_ick",      &aes_ick,       CK_242X),
        CLK(NULL,       "pka_ick",      &pka_ick,       CK_242X),
        CLK(NULL,       "usb_fck",      &usb_fck,       CK_242X),
+       CLK("musb_hdrc",        "fck",  &osc_ck,        CK_242X),
  };
  
  /*
index 5a974dcbcecc156853345643be576cba5959491b,9cba5560519b544f91c4d071bbb6cf2520e43fb1..52638df1545b00c65121a830bb31fded555c6596
@@@ -895,7 -895,7 +895,7 @@@ static struct clk dpll4_m4x2_ck = 
        .ops            = &clkops_omap2_dflt_wait,
        .parent         = &dpll4_m4_ck,
        .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_CAM_SHIFT,
+       .enable_bit     = OMAP3430_PWRDN_DSS1_SHIFT,
        .flags          = INVERT_ENABLE,
        .clkdm_name     = "dpll4_clkdm",
        .recalc         = &omap3_clkoutx2_recalc,
@@@ -3360,7 -3360,7 +3360,7 @@@ static struct omap_clk omap3xxx_clks[] 
        CLK("mmci-omap-hs.2",   "ick",  &mmchs3_ick,    CK_3430ES2 | CK_AM35XX),
        CLK(NULL,       "icr_ick",      &icr_ick,       CK_343X),
        CLK(NULL,       "aes2_ick",     &aes2_ick,      CK_343X),
 -      CLK(NULL,       "sha12_ick",    &sha12_ick,     CK_343X),
 +      CLK("omap-sham",        "ick",  &sha12_ick,     CK_343X),
        CLK(NULL,       "des2_ick",     &des2_ick,      CK_343X),
        CLK("mmci-omap-hs.1",   "ick",  &mmchs2_ick,    CK_3XXX),
        CLK("mmci-omap-hs.0",   "ick",  &mmchs1_ick,    CK_3XXX),
index 7e7acc19bed0c624ac5bc96d04c6788786c9fcbd,2271b9bd1f509fc1731aebc01f1a460561ce34eb..beac46c48c5aa1d73fe5ece2cf15a1e6e97c1231
@@@ -26,7 -26,6 +26,7 @@@
  #include <plat/mux.h>
  #include <mach/gpio.h>
  #include <plat/mmc.h>
 +#include <plat/dma.h>
  
  #include "mux.h"
  
@@@ -454,10 -453,8 +454,10 @@@ static void omap_init_mcspi(void
  static inline void omap_init_mcspi(void) {}
  #endif
  
 -#ifdef CONFIG_OMAP_SHA1_MD5
 -static struct resource sha1_md5_resources[] = {
 +#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
 +
 +#ifdef CONFIG_ARCH_OMAP24XX
 +static struct resource omap2_sham_resources[] = {
        {
                .start  = OMAP24XX_SEC_SHA1MD5_BASE,
                .end    = OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
                .flags  = IORESOURCE_IRQ,
        }
  };
 +static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
 +#else
 +#define omap2_sham_resources          NULL
 +#define omap2_sham_resources_sz               0
 +#endif
  
 -static struct platform_device sha1_md5_device = {
 -      .name           = "OMAP SHA1/MD5",
 +#ifdef CONFIG_ARCH_OMAP34XX
 +static struct resource omap3_sham_resources[] = {
 +      {
 +              .start  = OMAP34XX_SEC_SHA1MD5_BASE,
 +              .end    = OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
 +              .flags  = IORESOURCE_MEM,
 +      },
 +      {
 +              .start  = INT_34XX_SHA1MD52_IRQ,
 +              .flags  = IORESOURCE_IRQ,
 +      },
 +      {
 +              .start  = OMAP34XX_DMA_SHA1MD5_RX,
 +              .flags  = IORESOURCE_DMA,
 +      }
 +};
 +static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
 +#else
 +#define omap3_sham_resources          NULL
 +#define omap3_sham_resources_sz               0
 +#endif
 +
 +static struct platform_device sham_device = {
 +      .name           = "omap-sham",
        .id             = -1,
 -      .num_resources  = ARRAY_SIZE(sha1_md5_resources),
 -      .resource       = sha1_md5_resources,
  };
  
 -static void omap_init_sha1_md5(void)
 +static void omap_init_sham(void)
  {
 -      platform_device_register(&sha1_md5_device);
 +      if (cpu_is_omap24xx()) {
 +              sham_device.resource = omap2_sham_resources;
 +              sham_device.num_resources = omap2_sham_resources_sz;
 +      } else if (cpu_is_omap34xx()) {
 +              sham_device.resource = omap3_sham_resources;
 +              sham_device.num_resources = omap3_sham_resources_sz;
 +      } else {
 +              pr_err("%s: platform not supported\n", __func__);
 +              return;
 +      }
 +      platform_device_register(&sham_device);
  }
  #else
 -static inline void omap_init_sha1_md5(void) { }
 +static inline void omap_init_sham(void) { }
  #endif
  
  /*-------------------------------------------------------------------------*/
@@@ -764,7 -726,7 +764,7 @@@ void __init omap2_init_mmc(struct omap_
                        if (!cpu_is_omap44xx())
                                return;
                        base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET;
-                       irq = OMAP44XX_IRQ_MMC4;
+                       irq = OMAP44XX_IRQ_MMC5;
                        break;
                default:
                        continue;
@@@ -837,7 -799,7 +837,7 @@@ static int __init omap2_init_devices(vo
        omap_init_mcspi();
        omap_hdq_init();
        omap_init_sti();
 -      omap_init_sha1_md5();
 +      omap_init_sham();
  
        return 0;
  }
diff --combined crypto/algapi.c
index d49d7091cecfdce027c40b4ebea4f2987ff45ef1,76fae27ed01cb9834049e2b641d9e561ef0844e5..c3cf1a69a47a8dcaa57e62410c9823ee4e2d045b
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/list.h>
  #include <linux/module.h>
  #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
  #include <linux/string.h>
  
  #include "internal.h"
@@@ -543,7 -544,7 +544,7 @@@ int crypto_init_spawn2(struct crypto_sp
  {
        int err = -EINVAL;
  
 -      if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
 +      if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
                goto out;
  
        spawn->frontend = frontend;
diff --combined crypto/tcrypt.c
index 0b7a8435255bd65eb0a64a6301f553df31990be0,a35159947a262f779041fe7bff86671cac00ed98..ea610ad45aa112de0d427c2ccfa9b29d23be139d
@@@ -18,8 -18,8 +18,8 @@@
  #include <crypto/hash.h>
  #include <linux/err.h>
  #include <linux/init.h>
+ #include <linux/gfp.h>
  #include <linux/module.h>
- #include <linux/slab.h>
  #include <linux/scatterlist.h>
  #include <linux/string.h>
  #include <linux/moduleparam.h>
@@@ -437,9 -437,6 +437,9 @@@ static void test_hash_speed(const char 
                        goto out;
                }
  
 +              if (speed[i].klen)
 +                      crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
 +
                printk(KERN_INFO "test%3u "
                       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
                       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@@ -884,10 -881,6 +884,10 @@@ static int do_test(int m
                test_hash_speed("rmd320", sec, generic_hash_speed_template);
                if (mode > 300 && mode < 400) break;
  
 +      case 318:
 +              test_hash_speed("ghash-generic", sec, hash_speed_template_16);
 +              if (mode > 300 && mode < 400) break;
 +
        case 399:
                break;
  
diff --combined drivers/crypto/mv_cesa.c
index 1cee5a937092678cf12786a277b03b2ce3fe3cbe,6f29012bcc434dbcc1101782072678991d53a509..18a436cafc107f1004cb4686e7ca8946d2f8f2d2
  #include <linux/kthread.h>
  #include <linux/platform_device.h>
  #include <linux/scatterlist.h>
+ #include <linux/slab.h>
 +#include <crypto/internal/hash.h>
 +#include <crypto/sha.h>
  
  #include "mv_cesa.h"
 +
 +#define MV_CESA       "MV-CESA:"
 +#define MAX_HW_HASH_SIZE      0xFFFF
 +
  /*
   * STM:
   *   /---------------------------------------\
@@@ -44,12 -39,10 +45,12 @@@ enum engine_status 
   * @dst_sg_it:                sg iterator for dst
   * @sg_src_left:      bytes left in src to process (scatter list)
   * @src_start:                offset to add to src start position (scatter list)
 - * @crypt_len:                length of current crypt process
 + * @crypt_len:                length of current hw crypt/hash process
 + * @hw_nbytes:                total bytes to process in hw for this request
 + * @copy_back:                whether to copy data back (crypt) or not (hash)
   * @sg_dst_left:      bytes left dst to process in this scatter list
   * @dst_start:                offset to add to dst start position (scatter list)
 - * @total_req_bytes:  total number of bytes processed (request).
 + * @hw_processed_bytes:       number of bytes processed by hw (request).
   *
   * sg helper are used to iterate over the scatterlist. Since the size of the
   * SRAM may be less than the scatter size, this struct struct is used to keep
  struct req_progress {
        struct sg_mapping_iter src_sg_it;
        struct sg_mapping_iter dst_sg_it;
 +      void (*complete) (void);
 +      void (*process) (int is_first);
  
        /* src mostly */
        int sg_src_left;
        int src_start;
        int crypt_len;
 +      int hw_nbytes;
        /* dst mostly */
 +      int copy_back;
        int sg_dst_left;
        int dst_start;
 -      int total_req_bytes;
 +      int hw_processed_bytes;
  };
  
  struct crypto_priv {
        spinlock_t lock;
        struct crypto_queue queue;
        enum engine_status eng_st;
 -      struct ablkcipher_request *cur_req;
 +      struct crypto_async_request *cur_req;
        struct req_progress p;
        int max_req_size;
        int sram_size;
 +      int has_sha1;
 +      int has_hmac_sha1;
  };
  
  static struct crypto_priv *cpg;
@@@ -110,31 -97,6 +111,31 @@@ struct mv_req_ctx 
        int decrypt;
  };
  
 +enum hash_op {
 +      COP_SHA1,
 +      COP_HMAC_SHA1
 +};
 +
 +struct mv_tfm_hash_ctx {
 +      struct crypto_shash *fallback;
 +      struct crypto_shash *base_hash;
 +      u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
 +      int count_add;
 +      enum hash_op op;
 +};
 +
 +struct mv_req_hash_ctx {
 +      u64 count;
 +      u32 state[SHA1_DIGEST_SIZE / 4];
 +      u8 buffer[SHA1_BLOCK_SIZE];
 +      int first_hash;         /* marks that we don't have previous state */
 +      int last_chunk;         /* marks that this is the 'final' request */
 +      int extra_bytes;        /* unprocessed bytes in buffer */
 +      enum hash_op op;
 +      int count_add;
 +      struct scatterlist dummysg;
 +};
 +
  static void compute_aes_dec_key(struct mv_ctx *ctx)
  {
        struct crypto_aes_ctx gen_aes_key;
@@@ -182,51 -144,32 +183,51 @@@ static int mv_setkey_aes(struct crypto_
        return 0;
  }
  
 -static void setup_data_in(struct ablkcipher_request *req)
 +static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
  {
        int ret;
 -      void *buf;
 +      void *sbuf;
 +      int copied = 0;
  
 -      if (!cpg->p.sg_src_left) {
 -              ret = sg_miter_next(&cpg->p.src_sg_it);
 -              BUG_ON(!ret);
 -              cpg->p.sg_src_left = cpg->p.src_sg_it.length;
 -              cpg->p.src_start = 0;
 -      }
 -
 -      cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
 -
 -      buf = cpg->p.src_sg_it.addr;
 -      buf += cpg->p.src_start;
 +      while (1) {
 +              if (!p->sg_src_left) {
 +                      ret = sg_miter_next(&p->src_sg_it);
 +                      BUG_ON(!ret);
 +                      p->sg_src_left = p->src_sg_it.length;
 +                      p->src_start = 0;
 +              }
  
 -      memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
 +              sbuf = p->src_sg_it.addr + p->src_start;
 +
 +              if (p->sg_src_left <= len - copied) {
 +                      memcpy(dbuf + copied, sbuf, p->sg_src_left);
 +                      copied += p->sg_src_left;
 +                      p->sg_src_left = 0;
 +                      if (copied >= len)
 +                              break;
 +              } else {
 +                      int copy_len = len - copied;
 +                      memcpy(dbuf + copied, sbuf, copy_len);
 +                      p->src_start += copy_len;
 +                      p->sg_src_left -= copy_len;
 +                      break;
 +              }
 +      }
 +}
  
 -      cpg->p.sg_src_left -= cpg->p.crypt_len;
 -      cpg->p.src_start += cpg->p.crypt_len;
 +static void setup_data_in(void)
 +{
 +      struct req_progress *p = &cpg->p;
 +      int data_in_sram =
 +          min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
 +      copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
 +                      data_in_sram - p->crypt_len);
 +      p->crypt_len = data_in_sram;
  }
  
  static void mv_process_current_q(int first_block)
  {
 -      struct ablkcipher_request *req = cpg->cur_req;
 +      struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
        struct sec_accel_config op;
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
                break;
        case COP_AES_CBC:
 +      default:
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
                op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
                        ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
                ENC_P_DST(SRAM_DATA_OUT_START);
        op.enc_key_p = SRAM_DATA_KEY_P;
  
 -      setup_data_in(req);
 +      setup_data_in();
        op.enc_len = cpg->p.crypt_len;
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
  
  static void mv_crypto_algo_completion(void)
  {
 -      struct ablkcipher_request *req = cpg->cur_req;
 +      struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
  
 +      sg_miter_stop(&cpg->p.src_sg_it);
 +      sg_miter_stop(&cpg->p.dst_sg_it);
 +
        if (req_ctx->op != COP_AES_CBC)
                return ;
  
        memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
  }
  
 +static void mv_process_hash_current(int first_block)
 +{
 +      struct ahash_request *req = ahash_request_cast(cpg->cur_req);
 +      struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
 +      struct req_progress *p = &cpg->p;
 +      struct sec_accel_config op = { 0 };
 +      int is_last;
 +
 +      switch (req_ctx->op) {
 +      case COP_SHA1:
 +      default:
 +              op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
 +              break;
 +      case COP_HMAC_SHA1:
 +              op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
 +              break;
 +      }
 +
 +      op.mac_src_p =
 +              MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
 +              req_ctx->
 +              count);
 +
 +      setup_data_in();
 +
 +      op.mac_digest =
 +              MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
 +      op.mac_iv =
 +              MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
 +              MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
 +
 +      is_last = req_ctx->last_chunk
 +              && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
 +              && (req_ctx->count <= MAX_HW_HASH_SIZE);
 +      if (req_ctx->first_hash) {
 +              if (is_last)
 +                      op.config |= CFG_NOT_FRAG;
 +              else
 +                      op.config |= CFG_FIRST_FRAG;
 +
 +              req_ctx->first_hash = 0;
 +      } else {
 +              if (is_last)
 +                      op.config |= CFG_LAST_FRAG;
 +              else
 +                      op.config |= CFG_MID_FRAG;
 +      }
 +
 +      memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
 +
 +      writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
 +      /* GO */
 +      writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 +
 +      /*
 +      * XXX: add timer if the interrupt does not occur for some mystery
 +      * reason
 +      */
 +}
 +
 +static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
 +                                        struct shash_desc *desc)
 +{
 +      int i;
 +      struct sha1_state shash_state;
 +
 +      shash_state.count = ctx->count + ctx->count_add;
 +      for (i = 0; i < 5; i++)
 +              shash_state.state[i] = ctx->state[i];
 +      memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
 +      return crypto_shash_import(desc, &shash_state);
 +}
 +
 +static int mv_hash_final_fallback(struct ahash_request *req)
 +{
 +      const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 +      struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
 +      struct {
 +              struct shash_desc shash;
 +              char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
 +      } desc;
 +      int rc;
 +
 +      desc.shash.tfm = tfm_ctx->fallback;
 +      desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 +      if (unlikely(req_ctx->first_hash)) {
 +              crypto_shash_init(&desc.shash);
 +              crypto_shash_update(&desc.shash, req_ctx->buffer,
 +                                  req_ctx->extra_bytes);
 +      } else {
 +              /* only SHA1 for now....
 +               */
 +              rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
 +              if (rc)
 +                      goto out;
 +      }
 +      rc = crypto_shash_final(&desc.shash, req->result);
 +out:
 +      return rc;
 +}
 +
 +static void mv_hash_algo_completion(void)
 +{
 +      struct ahash_request *req = ahash_request_cast(cpg->cur_req);
 +      struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 +
 +      if (ctx->extra_bytes)
 +              copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
 +      sg_miter_stop(&cpg->p.src_sg_it);
 +
 +      ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
 +      ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
 +      ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
 +      ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
 +      ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
 +
 +      if (likely(ctx->last_chunk)) {
 +              if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
 +                      memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
 +                             crypto_ahash_digestsize(crypto_ahash_reqtfm
 +                                                     (req)));
 +              } else
 +                      mv_hash_final_fallback(req);
 +      }
 +}
 +
  static void dequeue_complete_req(void)
  {
 -      struct ablkcipher_request *req = cpg->cur_req;
 +      struct crypto_async_request *req = cpg->cur_req;
        void *buf;
        int ret;
 +      cpg->p.hw_processed_bytes += cpg->p.crypt_len;
 +      if (cpg->p.copy_back) {
 +              int need_copy_len = cpg->p.crypt_len;
 +              int sram_offset = 0;
 +              do {
 +                      int dst_copy;
 +
 +                      if (!cpg->p.sg_dst_left) {
 +                              ret = sg_miter_next(&cpg->p.dst_sg_it);
 +                              BUG_ON(!ret);
 +                              cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
 +                              cpg->p.dst_start = 0;
 +                      }
  
 -      cpg->p.total_req_bytes += cpg->p.crypt_len;
 -      do {
 -              int dst_copy;
 -
 -              if (!cpg->p.sg_dst_left) {
 -                      ret = sg_miter_next(&cpg->p.dst_sg_it);
 -                      BUG_ON(!ret);
 -                      cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
 -                      cpg->p.dst_start = 0;
 -              }
 -
 -              buf = cpg->p.dst_sg_it.addr;
 -              buf += cpg->p.dst_start;
 +                      buf = cpg->p.dst_sg_it.addr;
 +                      buf += cpg->p.dst_start;
  
 -              dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
 +                      dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
  
 -              memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
 +                      memcpy(buf,
 +                             cpg->sram + SRAM_DATA_OUT_START + sram_offset,
 +                             dst_copy);
 +                      sram_offset += dst_copy;
 +                      cpg->p.sg_dst_left -= dst_copy;
 +                      need_copy_len -= dst_copy;
 +                      cpg->p.dst_start += dst_copy;
 +              } while (need_copy_len > 0);
 +      }
  
 -              cpg->p.sg_dst_left -= dst_copy;
 -              cpg->p.crypt_len -= dst_copy;
 -              cpg->p.dst_start += dst_copy;
 -      } while (cpg->p.crypt_len > 0);
 +      cpg->p.crypt_len = 0;
  
        BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
 -      if (cpg->p.total_req_bytes < req->nbytes) {
 +      if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
                /* process next scatter list entry */
                cpg->eng_st = ENGINE_BUSY;
 -              mv_process_current_q(0);
 +              cpg->p.process(0);
        } else {
 -              sg_miter_stop(&cpg->p.src_sg_it);
 -              sg_miter_stop(&cpg->p.dst_sg_it);
 -              mv_crypto_algo_completion();
 +              cpg->p.complete();
                cpg->eng_st = ENGINE_IDLE;
 -              req->base.complete(&req->base, 0);
 +              local_bh_disable();
 +              req->complete(req, 0);
 +              local_bh_enable();
        }
  }
  
  static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
  {
        int i = 0;
 -
 -      do {
 -              total_bytes -= sl[i].length;
 -              i++;
 -
 -      } while (total_bytes > 0);
 +      size_t cur_len;
 +
 +      while (1) {
 +              cur_len = sl[i].length;
 +              ++i;
 +              if (total_bytes > cur_len)
 +                      total_bytes -= cur_len;
 +              else
 +                      break;
 +      }
  
        return i;
  }
  
 -static void mv_enqueue_new_req(struct ablkcipher_request *req)
 +static void mv_start_new_crypt_req(struct ablkcipher_request *req)
  {
 +      struct req_progress *p = &cpg->p;
        int num_sgs;
  
 -      cpg->cur_req = req;
 -      memset(&cpg->p, 0, sizeof(struct req_progress));
 +      cpg->cur_req = &req->base;
 +      memset(p, 0, sizeof(struct req_progress));
 +      p->hw_nbytes = req->nbytes;
 +      p->complete = mv_crypto_algo_completion;
 +      p->process = mv_process_current_q;
 +      p->copy_back = 1;
  
        num_sgs = count_sgs(req->src, req->nbytes);
 -      sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 +      sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
  
        num_sgs = count_sgs(req->dst, req->nbytes);
 -      sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
 +      sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
 +
        mv_process_current_q(1);
  }
  
 +static void mv_start_new_hash_req(struct ahash_request *req)
 +{
 +      struct req_progress *p = &cpg->p;
 +      struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 +      const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 +      int num_sgs, hw_bytes, old_extra_bytes, rc;
 +      cpg->cur_req = &req->base;
 +      memset(p, 0, sizeof(struct req_progress));
 +      hw_bytes = req->nbytes + ctx->extra_bytes;
 +      old_extra_bytes = ctx->extra_bytes;
 +
 +      if (unlikely(ctx->extra_bytes)) {
 +              memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
 +                     ctx->extra_bytes);
 +              p->crypt_len = ctx->extra_bytes;
 +      }
 +
 +      memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
 +
 +      if (unlikely(!ctx->first_hash)) {
 +              writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
 +              writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
 +              writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
 +              writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
 +              writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
 +      }
 +
 +      ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
 +      if (ctx->extra_bytes != 0
 +          && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
 +              hw_bytes -= ctx->extra_bytes;
 +      else
 +              ctx->extra_bytes = 0;
 +
 +      num_sgs = count_sgs(req->src, req->nbytes);
 +      sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 +
 +      if (hw_bytes) {
 +              p->hw_nbytes = hw_bytes;
 +              p->complete = mv_hash_algo_completion;
 +              p->process = mv_process_hash_current;
 +
 +              mv_process_hash_current(1);
 +      } else {
 +              copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
 +                              ctx->extra_bytes - old_extra_bytes);
 +              sg_miter_stop(&p->src_sg_it);
 +              if (ctx->last_chunk)
 +                      rc = mv_hash_final_fallback(req);
 +              else
 +                      rc = 0;
 +              cpg->eng_st = ENGINE_IDLE;
 +              local_bh_disable();
 +              req->base.complete(&req->base, rc);
 +              local_bh_enable();
 +      }
 +}
 +
  static int queue_manag(void *data)
  {
        cpg->eng_st = ENGINE_IDLE;
        do {
 -              struct ablkcipher_request *req;
                struct crypto_async_request *async_req = NULL;
                struct crypto_async_request *backlog;
  
                }
  
                if (async_req) {
 -                      req = container_of(async_req,
 -                                      struct ablkcipher_request, base);
 -                      mv_enqueue_new_req(req);
 +                      if (async_req->tfm->__crt_alg->cra_type !=
 +                          &crypto_ahash_type) {
 +                              struct ablkcipher_request *req =
 +                                  container_of(async_req,
 +                                               struct ablkcipher_request,
 +                                               base);
 +                              mv_start_new_crypt_req(req);
 +                      } else {
 +                              struct ahash_request *req =
 +                                  ahash_request_cast(async_req);
 +                              mv_start_new_hash_req(req);
 +                      }
                        async_req = NULL;
                }
  
        return 0;
  }
  
 -static int mv_handle_req(struct ablkcipher_request *req)
 +static int mv_handle_req(struct crypto_async_request *req)
  {
        unsigned long flags;
        int ret;
  
        spin_lock_irqsave(&cpg->lock, flags);
 -      ret = ablkcipher_enqueue_request(&cpg->queue, req);
 +      ret = crypto_enqueue_request(&cpg->queue, req);
        spin_unlock_irqrestore(&cpg->lock, flags);
        wake_up_process(cpg->queue_th);
        return ret;
@@@ -639,7 -369,7 +640,7 @@@ static int mv_enc_aes_ecb(struct ablkci
        req_ctx->op = COP_AES_ECB;
        req_ctx->decrypt = 0;
  
 -      return mv_handle_req(req);
 +      return mv_handle_req(&req->base);
  }
  
  static int mv_dec_aes_ecb(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
  
        compute_aes_dec_key(ctx);
 -      return mv_handle_req(req);
 +      return mv_handle_req(&req->base);
  }
  
  static int mv_enc_aes_cbc(struct ablkcipher_request *req)
        req_ctx->op = COP_AES_CBC;
        req_ctx->decrypt = 0;
  
 -      return mv_handle_req(req);
 +      return mv_handle_req(&req->base);
  }
  
  static int mv_dec_aes_cbc(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
  
        compute_aes_dec_key(ctx);
 -      return mv_handle_req(req);
 +      return mv_handle_req(&req->base);
  }
  
  static int mv_cra_init(struct crypto_tfm *tfm)
        return 0;
  }
  
 +static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
 +                               int is_last, unsigned int req_len,
 +                               int count_add)
 +{
 +      memset(ctx, 0, sizeof(*ctx));
 +      ctx->op = op;
 +      ctx->count = req_len;
 +      ctx->first_hash = 1;
 +      ctx->last_chunk = is_last;
 +      ctx->count_add = count_add;
 +}
 +
 +static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
 +                                 unsigned req_len)
 +{
 +      ctx->last_chunk = is_last;
 +      ctx->count += req_len;
 +}
 +
 +static int mv_hash_init(struct ahash_request *req)
 +{
 +      const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 +      mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
 +                           tfm_ctx->count_add);
 +      return 0;
 +}
 +
 +static int mv_hash_update(struct ahash_request *req)
 +{
 +      if (!req->nbytes)
 +              return 0;
 +
 +      mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
 +      return mv_handle_req(&req->base);
 +}
 +
 +static int mv_hash_final(struct ahash_request *req)
 +{
 +      struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 +      /* dummy buffer of 4 bytes */
 +      sg_init_one(&ctx->dummysg, ctx->buffer, 4);
 +      /* I think I'm allowed to do that... */
 +      ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
 +      mv_update_hash_req_ctx(ctx, 1, 0);
 +      return mv_handle_req(&req->base);
 +}
 +
 +static int mv_hash_finup(struct ahash_request *req)
 +{
 +      if (!req->nbytes)
 +              return mv_hash_final(req);
 +
 +      mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
 +      return mv_handle_req(&req->base);
 +}
 +
 +static int mv_hash_digest(struct ahash_request *req)
 +{
 +      const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 +      mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
 +                           req->nbytes, tfm_ctx->count_add);
 +      return mv_handle_req(&req->base);
 +}
 +
 +static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
 +                           const void *ostate)
 +{
 +      const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
 +      int i;
 +      for (i = 0; i < 5; i++) {
 +              ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
 +              ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
 +      }
 +}
 +
 +static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
 +                        unsigned int keylen)
 +{
 +      int rc;
 +      struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
 +      int bs, ds, ss;
 +
 +      if (!ctx->base_hash)
 +              return 0;
 +
 +      rc = crypto_shash_setkey(ctx->fallback, key, keylen);
 +      if (rc)
 +              return rc;
 +
 +      /* Can't see a way to extract the ipad/opad from the fallback tfm
 +         so I'm basically copying code from the hmac module */
 +      bs = crypto_shash_blocksize(ctx->base_hash);
 +      ds = crypto_shash_digestsize(ctx->base_hash);
 +      ss = crypto_shash_statesize(ctx->base_hash);
 +
 +      {
 +              struct {
 +                      struct shash_desc shash;
 +                      char ctx[crypto_shash_descsize(ctx->base_hash)];
 +              } desc;
 +              unsigned int i;
 +              char ipad[ss];
 +              char opad[ss];
 +
 +              desc.shash.tfm = ctx->base_hash;
 +              desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
 +                  CRYPTO_TFM_REQ_MAY_SLEEP;
 +
 +              if (keylen > bs) {
 +                      int err;
 +
 +                      err =
 +                          crypto_shash_digest(&desc.shash, key, keylen, ipad);
 +                      if (err)
 +                              return err;
 +
 +                      keylen = ds;
 +              } else
 +                      memcpy(ipad, key, keylen);
 +
 +              memset(ipad + keylen, 0, bs - keylen);
 +              memcpy(opad, ipad, bs);
 +
 +              for (i = 0; i < bs; i++) {
 +                      ipad[i] ^= 0x36;
 +                      opad[i] ^= 0x5c;
 +              }
 +
 +              rc = crypto_shash_init(&desc.shash) ? :
 +                  crypto_shash_update(&desc.shash, ipad, bs) ? :
 +                  crypto_shash_export(&desc.shash, ipad) ? :
 +                  crypto_shash_init(&desc.shash) ? :
 +                  crypto_shash_update(&desc.shash, opad, bs) ? :
 +                  crypto_shash_export(&desc.shash, opad);
 +
 +              if (rc == 0)
 +                      mv_hash_init_ivs(ctx, ipad, opad);
 +
 +              return rc;
 +      }
 +}
 +
 +static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
 +                          enum hash_op op, int count_add)
 +{
 +      const char *fallback_driver_name = tfm->__crt_alg->cra_name;
 +      struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 +      struct crypto_shash *fallback_tfm = NULL;
 +      struct crypto_shash *base_hash = NULL;
 +      int err = -ENOMEM;
 +
 +      ctx->op = op;
 +      ctx->count_add = count_add;
 +
 +      /* Allocate a fallback and abort if it failed. */
 +      fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
 +                                        CRYPTO_ALG_NEED_FALLBACK);
 +      if (IS_ERR(fallback_tfm)) {
 +              printk(KERN_WARNING MV_CESA
 +                     "Fallback driver '%s' could not be loaded!\n",
 +                     fallback_driver_name);
 +              err = PTR_ERR(fallback_tfm);
 +              goto out;
 +      }
 +      ctx->fallback = fallback_tfm;
 +
 +      if (base_hash_name) {
 +              /* Allocate a hash to compute the ipad/opad of hmac. */
 +              base_hash = crypto_alloc_shash(base_hash_name, 0,
 +                                             CRYPTO_ALG_NEED_FALLBACK);
 +              if (IS_ERR(base_hash)) {
 +                      printk(KERN_WARNING MV_CESA
 +                             "Base driver '%s' could not be loaded!\n",
 +                             base_hash_name);
 +                      err = PTR_ERR(fallback_tfm);
 +                      goto err_bad_base;
 +              }
 +      }
 +      ctx->base_hash = base_hash;
 +
 +      crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 +                               sizeof(struct mv_req_hash_ctx) +
 +                               crypto_shash_descsize(ctx->fallback));
 +      return 0;
 +err_bad_base:
 +      crypto_free_shash(fallback_tfm);
 +out:
 +      return err;
 +}
 +
 +static void mv_cra_hash_exit(struct crypto_tfm *tfm)
 +{
 +      struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 +
 +      crypto_free_shash(ctx->fallback);
 +      if (ctx->base_hash)
 +              crypto_free_shash(ctx->base_hash);
 +}
 +
 +static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
 +{
 +      return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
 +}
 +
 +static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
 +{
 +      return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
 +}
 +
  irqreturn_t crypto_int(int irq, void *priv)
  {
        u32 val;
@@@ -953,53 -474,6 +954,53 @@@ struct crypto_alg mv_aes_alg_cbc = 
        },
  };
  
 +struct ahash_alg mv_sha1_alg = {
 +      .init = mv_hash_init,
 +      .update = mv_hash_update,
 +      .final = mv_hash_final,
 +      .finup = mv_hash_finup,
 +      .digest = mv_hash_digest,
 +      .halg = {
 +               .digestsize = SHA1_DIGEST_SIZE,
 +               .base = {
 +                        .cra_name = "sha1",
 +                        .cra_driver_name = "mv-sha1",
 +                        .cra_priority = 300,
 +                        .cra_flags =
 +                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 +                        .cra_blocksize = SHA1_BLOCK_SIZE,
 +                        .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
 +                        .cra_init = mv_cra_hash_sha1_init,
 +                        .cra_exit = mv_cra_hash_exit,
 +                        .cra_module = THIS_MODULE,
 +                        }
 +               }
 +};
 +
 +struct ahash_alg mv_hmac_sha1_alg = {
 +      .init = mv_hash_init,
 +      .update = mv_hash_update,
 +      .final = mv_hash_final,
 +      .finup = mv_hash_finup,
 +      .digest = mv_hash_digest,
 +      .setkey = mv_hash_setkey,
 +      .halg = {
 +               .digestsize = SHA1_DIGEST_SIZE,
 +               .base = {
 +                        .cra_name = "hmac(sha1)",
 +                        .cra_driver_name = "mv-hmac-sha1",
 +                        .cra_priority = 300,
 +                        .cra_flags =
 +                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 +                        .cra_blocksize = SHA1_BLOCK_SIZE,
 +                        .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
 +                        .cra_init = mv_cra_hash_hmac_sha1_init,
 +                        .cra_exit = mv_cra_hash_exit,
 +                        .cra_module = THIS_MODULE,
 +                        }
 +               }
 +};
 +
  static int mv_probe(struct platform_device *pdev)
  {
        struct crypto_priv *cp;
        int ret;
  
        if (cpg) {
 -              printk(KERN_ERR "Second crypto dev?\n");
 +              printk(KERN_ERR MV_CESA "Second crypto dev?\n");
                return -EEXIST;
        }
  
        ret = crypto_register_alg(&mv_aes_alg_cbc);
        if (ret)
                goto err_unreg_ecb;
 +
 +      ret = crypto_register_ahash(&mv_sha1_alg);
 +      if (ret == 0)
 +              cpg->has_sha1 = 1;
 +      else
 +              printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
 +
 +      ret = crypto_register_ahash(&mv_hmac_sha1_alg);
 +      if (ret == 0) {
 +              cpg->has_hmac_sha1 = 1;
 +      } else {
 +              printk(KERN_WARNING MV_CESA
 +                     "Could not register hmac-sha1 driver\n");
 +      }
 +
        return 0;
  err_unreg_ecb:
        crypto_unregister_alg(&mv_aes_alg_ecb);
@@@ -1111,10 -570,6 +1112,10 @@@ static int mv_remove(struct platform_de
  
        crypto_unregister_alg(&mv_aes_alg_ecb);
        crypto_unregister_alg(&mv_aes_alg_cbc);
 +      if (cp->has_sha1)
 +              crypto_unregister_ahash(&mv_sha1_alg);
 +      if (cp->has_hmac_sha1)
 +              crypto_unregister_ahash(&mv_hmac_sha1_alg);
        kthread_stop(cp->queue_th);
        free_irq(cp->irq, cp);
        memset(cp->sram, 0, cp->sram_size);
diff --combined kernel/padata.c
index 5085046d83fb5a5762838c91209d885e0ec950a9,fd03513c7327f548c9eb990bfa94e78196fbefe8..5b44d0fa358e355b6de6335ec9691af340e8baca
  #include <linux/padata.h>
  #include <linux/mutex.h>
  #include <linux/sched.h>
+ #include <linux/slab.h>
  #include <linux/rcupdate.h>
  
  #define MAX_SEQ_NR INT_MAX - NR_CPUS
 -#define MAX_OBJ_NUM 10000 * NR_CPUS
 +#define MAX_OBJ_NUM 1000
  
  static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  {
@@@ -569,8 -570,8 +570,8 @@@ void padata_stop(struct padata_instanc
  }
  EXPORT_SYMBOL(padata_stop);
  
 -static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
 -                                       unsigned long action, void *hcpu)
 +static int padata_cpu_callback(struct notifier_block *nfb,
 +                             unsigned long action, void *hcpu)
  {
        int err;
        struct padata_instance *pinst;
@@@ -642,6 -643,9 +643,9 @@@ struct padata_instance *padata_alloc(co
        if (!pd)
                goto err_free_inst;
  
+       if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL))
+               goto err_free_pd;
        rcu_assign_pointer(pinst->pd, pd);
  
        pinst->wq = wq;
        pinst->cpu_notifier.priority = 0;
        err = register_hotcpu_notifier(&pinst->cpu_notifier);
        if (err)
-               goto err_free_pd;
+               goto err_free_cpumask;
  
        mutex_init(&pinst->lock);
  
        return pinst;
  
+ err_free_cpumask:
+       free_cpumask_var(pinst->cpumask);
  err_free_pd:
        padata_free_pd(pd);
  err_free_inst:
@@@ -685,6 -691,7 +691,7 @@@ void padata_free(struct padata_instanc
  
        unregister_hotcpu_notifier(&pinst->cpu_notifier);
        padata_free_pd(pinst->pd);
+       free_cpumask_var(pinst->cpumask);
        kfree(pinst);
  }
  EXPORT_SYMBOL(padata_free);