Merge tag 'devicetree-for-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aes.h>
39 #include <crypto/sha.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43
44 #define PPC4XX_SEC_VERSION_STR                  "0.5"
45
46 /**
47  * PPC4xx Crypto Engine Initialization Routine
48  */
49 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
50 {
51         union ce_ring_size ring_size;
52         union ce_ring_contol ring_ctrl;
53         union ce_part_ring_size part_ring_size;
54         union ce_io_threshold io_threshold;
55         u32 rand_num;
56         union ce_pe_dma_cfg pe_dma_cfg;
57         u32 device_ctrl;
58
59         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
60         /* setup pe dma, include reset sg, pdr and pe, then release reset */
61         pe_dma_cfg.w = 0;
62         pe_dma_cfg.bf.bo_sgpd_en = 1;
63         pe_dma_cfg.bf.bo_data_en = 0;
64         pe_dma_cfg.bf.bo_sa_en = 1;
65         pe_dma_cfg.bf.bo_pd_en = 1;
66         pe_dma_cfg.bf.dynamic_sa_en = 1;
67         pe_dma_cfg.bf.reset_sg = 1;
68         pe_dma_cfg.bf.reset_pdr = 1;
69         pe_dma_cfg.bf.reset_pe = 1;
70         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
71         /* un reset pe,sg and pdr */
72         pe_dma_cfg.bf.pe_mode = 0;
73         pe_dma_cfg.bf.reset_sg = 0;
74         pe_dma_cfg.bf.reset_pdr = 0;
75         pe_dma_cfg.bf.reset_pe = 0;
76         pe_dma_cfg.bf.bo_td_en = 0;
77         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
78         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
79         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
80         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
81         get_random_bytes(&rand_num, sizeof(rand_num));
82         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
83         get_random_bytes(&rand_num, sizeof(rand_num));
84         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
85         ring_size.w = 0;
86         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
87         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
88         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
89         ring_ctrl.w = 0;
90         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
91         device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
92         device_ctrl |= PPC4XX_DC_3DES_EN;
93         writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
94         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
95         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
96         part_ring_size.w = 0;
97         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
98         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
99         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
100         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
101         io_threshold.w = 0;
102         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
103         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
104         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
105         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
106         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
107         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
108         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
109         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
110         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
111         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
112         /* un reset pe,sg and pdr */
113         pe_dma_cfg.bf.pe_mode = 1;
114         pe_dma_cfg.bf.reset_sg = 0;
115         pe_dma_cfg.bf.reset_pdr = 0;
116         pe_dma_cfg.bf.reset_pe = 0;
117         pe_dma_cfg.bf.bo_td_en = 0;
118         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
119         /*clear all pending interrupt*/
120         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
121         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
122         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
124         writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
125 }
126
127 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
128 {
129         ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
130                                         &ctx->sa_in_dma_addr, GFP_ATOMIC);
131         if (ctx->sa_in == NULL)
132                 return -ENOMEM;
133
134         ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
135                                          &ctx->sa_out_dma_addr, GFP_ATOMIC);
136         if (ctx->sa_out == NULL) {
137                 dma_free_coherent(ctx->dev->core_dev->device,
138                                   ctx->sa_len * 4,
139                                   ctx->sa_in, ctx->sa_in_dma_addr);
140                 return -ENOMEM;
141         }
142
143         memset(ctx->sa_in, 0, size * 4);
144         memset(ctx->sa_out, 0, size * 4);
145         ctx->sa_len = size;
146
147         return 0;
148 }
149
150 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
151 {
152         if (ctx->sa_in != NULL)
153                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
154                                   ctx->sa_in, ctx->sa_in_dma_addr);
155         if (ctx->sa_out != NULL)
156                 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
157                                   ctx->sa_out, ctx->sa_out_dma_addr);
158
159         ctx->sa_in_dma_addr = 0;
160         ctx->sa_out_dma_addr = 0;
161         ctx->sa_len = 0;
162 }
163
164 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
165 {
166         ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
167                                 sizeof(struct sa_state_record),
168                                 &ctx->state_record_dma_addr, GFP_ATOMIC);
169         if (!ctx->state_record_dma_addr)
170                 return -ENOMEM;
171         memset(ctx->state_record, 0, sizeof(struct sa_state_record));
172
173         return 0;
174 }
175
176 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
177 {
178         if (ctx->state_record != NULL)
179                 dma_free_coherent(ctx->dev->core_dev->device,
180                                   sizeof(struct sa_state_record),
181                                   ctx->state_record,
182                                   ctx->state_record_dma_addr);
183         ctx->state_record_dma_addr = 0;
184 }
185
186 /**
187  * alloc memory for the gather ring
188  * no need to alloc buf for the ring
189  * gdr_tail, gdr_head and gdr_count are initialized by this function
190  */
191 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
192 {
193         int i;
194         struct pd_uinfo *pd_uinfo;
195         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
196                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
197                                       &dev->pdr_pa, GFP_ATOMIC);
198         if (!dev->pdr)
199                 return -ENOMEM;
200
201         dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
202                                 GFP_KERNEL);
203         if (!dev->pdr_uinfo) {
204                 dma_free_coherent(dev->core_dev->device,
205                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
206                                   dev->pdr,
207                                   dev->pdr_pa);
208                 return -ENOMEM;
209         }
210         memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
211         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
212                                    256 * PPC4XX_NUM_PD,
213                                    &dev->shadow_sa_pool_pa,
214                                    GFP_ATOMIC);
215         if (!dev->shadow_sa_pool)
216                 return -ENOMEM;
217
218         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
219                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
220                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
221         if (!dev->shadow_sr_pool)
222                 return -ENOMEM;
223         for (i = 0; i < PPC4XX_NUM_PD; i++) {
224                 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
225                                                 sizeof(struct pd_uinfo) * i);
226
227                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
228                 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
229                 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
230
231                 /* alloc state record */
232                 pd_uinfo->sr_va = dev->shadow_sr_pool +
233                     sizeof(struct sa_state_record) * i;
234                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
235                     sizeof(struct sa_state_record) * i;
236         }
237
238         return 0;
239 }
240
241 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
242 {
243         if (dev->pdr != NULL)
244                 dma_free_coherent(dev->core_dev->device,
245                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
246                                   dev->pdr, dev->pdr_pa);
247         if (dev->shadow_sa_pool)
248                 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
249                                   dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
250         if (dev->shadow_sr_pool)
251                 dma_free_coherent(dev->core_dev->device,
252                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
253                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
254
255         kfree(dev->pdr_uinfo);
256 }
257
258 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
259 {
260         u32 retval;
261         u32 tmp;
262
263         retval = dev->pdr_head;
264         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
265
266         if (tmp == dev->pdr_tail)
267                 return ERING_WAS_FULL;
268
269         dev->pdr_head = tmp;
270
271         return retval;
272 }
273
274 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
275 {
276         struct pd_uinfo *pd_uinfo;
277         unsigned long flags;
278
279         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
280                                        sizeof(struct pd_uinfo) * idx);
281         spin_lock_irqsave(&dev->core_dev->lock, flags);
282         if (dev->pdr_tail != PPC4XX_LAST_PD)
283                 dev->pdr_tail++;
284         else
285                 dev->pdr_tail = 0;
286         pd_uinfo->state = PD_ENTRY_FREE;
287         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
288
289         return 0;
290 }
291
292 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
293                                        dma_addr_t *pd_dma, u32 idx)
294 {
295         *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
296
297         return dev->pdr + sizeof(struct ce_pd) * idx;
298 }
299
300 /**
301  * alloc memory for the gather ring
302  * no need to alloc buf for the ring
303  * gdr_tail, gdr_head and gdr_count are initialized by this function
304  */
305 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
306 {
307         dev->gdr = dma_alloc_coherent(dev->core_dev->device,
308                                       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
309                                       &dev->gdr_pa, GFP_ATOMIC);
310         if (!dev->gdr)
311                 return -ENOMEM;
312
313         memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
314
315         return 0;
316 }
317
318 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
319 {
320         dma_free_coherent(dev->core_dev->device,
321                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
322                           dev->gdr, dev->gdr_pa);
323 }
324
325 /*
326  * when this function is called.
327  * preemption or interrupt must be disabled
328  */
329 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
330 {
331         u32 retval;
332         u32 tmp;
333         if (n >= PPC4XX_NUM_GD)
334                 return ERING_WAS_FULL;
335
336         retval = dev->gdr_head;
337         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
338         if (dev->gdr_head > dev->gdr_tail) {
339                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
340                         return ERING_WAS_FULL;
341         } else if (dev->gdr_head < dev->gdr_tail) {
342                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
343                         return ERING_WAS_FULL;
344         }
345         dev->gdr_head = tmp;
346
347         return retval;
348 }
349
350 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
351 {
352         unsigned long flags;
353
354         spin_lock_irqsave(&dev->core_dev->lock, flags);
355         if (dev->gdr_tail == dev->gdr_head) {
356                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
357                 return 0;
358         }
359
360         if (dev->gdr_tail != PPC4XX_LAST_GD)
361                 dev->gdr_tail++;
362         else
363                 dev->gdr_tail = 0;
364
365         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
366
367         return 0;
368 }
369
370 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
371                                               dma_addr_t *gd_dma, u32 idx)
372 {
373         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
374
375         return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
376 }
377
378 /**
379  * alloc memory for the scatter ring
380  * need to alloc buf for the ring
381  * sdr_tail, sdr_head and sdr_count are initialized by this function
382  */
383 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
384 {
385         int i;
386         struct ce_sd *sd_array;
387
388         /* alloc memory for scatter descriptor ring */
389         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
390                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
391                                       &dev->sdr_pa, GFP_ATOMIC);
392         if (!dev->sdr)
393                 return -ENOMEM;
394
395         dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
396         dev->scatter_buffer_va =
397                 dma_alloc_coherent(dev->core_dev->device,
398                         dev->scatter_buffer_size * PPC4XX_NUM_SD,
399                         &dev->scatter_buffer_pa, GFP_ATOMIC);
400         if (!dev->scatter_buffer_va) {
401                 dma_free_coherent(dev->core_dev->device,
402                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
403                                   dev->sdr, dev->sdr_pa);
404                 return -ENOMEM;
405         }
406
407         sd_array = dev->sdr;
408
409         for (i = 0; i < PPC4XX_NUM_SD; i++) {
410                 sd_array[i].ptr = dev->scatter_buffer_pa +
411                                   dev->scatter_buffer_size * i;
412         }
413
414         return 0;
415 }
416
417 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
418 {
419         if (dev->sdr != NULL)
420                 dma_free_coherent(dev->core_dev->device,
421                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
422                                   dev->sdr, dev->sdr_pa);
423
424         if (dev->scatter_buffer_va != NULL)
425                 dma_free_coherent(dev->core_dev->device,
426                                   dev->scatter_buffer_size * PPC4XX_NUM_SD,
427                                   dev->scatter_buffer_va,
428                                   dev->scatter_buffer_pa);
429 }
430
431 /*
432  * when this function is called.
433  * preemption or interrupt must be disabled
434  */
435 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
436 {
437         u32 retval;
438         u32 tmp;
439
440         if (n >= PPC4XX_NUM_SD)
441                 return ERING_WAS_FULL;
442
443         retval = dev->sdr_head;
444         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
445         if (dev->sdr_head > dev->gdr_tail) {
446                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
447                         return ERING_WAS_FULL;
448         } else if (dev->sdr_head < dev->sdr_tail) {
449                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
450                         return ERING_WAS_FULL;
451         } /* the head = tail, or empty case is already take cared */
452         dev->sdr_head = tmp;
453
454         return retval;
455 }
456
457 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&dev->core_dev->lock, flags);
462         if (dev->sdr_tail == dev->sdr_head) {
463                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
464                 return 0;
465         }
466         if (dev->sdr_tail != PPC4XX_LAST_SD)
467                 dev->sdr_tail++;
468         else
469                 dev->sdr_tail = 0;
470         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
471
472         return 0;
473 }
474
475 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
476                                               dma_addr_t *sd_dma, u32 idx)
477 {
478         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
479
480         return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
481 }
482
483 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
484                                    dma_addr_t *addr, u32 *length,
485                                    u32 *idx, u32 *offset, u32 *nbytes)
486 {
487         u32 len;
488
489         if (*length > dev->scatter_buffer_size) {
490                 memcpy(phys_to_virt(*addr),
491                         dev->scatter_buffer_va +
492                         *idx * dev->scatter_buffer_size + *offset,
493                         dev->scatter_buffer_size);
494                 *offset = 0;
495                 *length -= dev->scatter_buffer_size;
496                 *nbytes -= dev->scatter_buffer_size;
497                 if (*idx == PPC4XX_LAST_SD)
498                         *idx = 0;
499                 else
500                         (*idx)++;
501                 *addr = *addr +  dev->scatter_buffer_size;
502                 return 1;
503         } else if (*length < dev->scatter_buffer_size) {
504                 memcpy(phys_to_virt(*addr),
505                         dev->scatter_buffer_va +
506                         *idx * dev->scatter_buffer_size + *offset, *length);
507                 if ((*offset + *length) == dev->scatter_buffer_size) {
508                         if (*idx == PPC4XX_LAST_SD)
509                                 *idx = 0;
510                         else
511                                 (*idx)++;
512                         *nbytes -= *length;
513                         *offset = 0;
514                 } else {
515                         *nbytes -= *length;
516                         *offset += *length;
517                 }
518
519                 return 0;
520         } else {
521                 len = (*nbytes <= dev->scatter_buffer_size) ?
522                                 (*nbytes) : dev->scatter_buffer_size;
523                 memcpy(phys_to_virt(*addr),
524                         dev->scatter_buffer_va +
525                         *idx * dev->scatter_buffer_size + *offset,
526                         len);
527                 *offset = 0;
528                 *nbytes -= len;
529
530                 if (*idx == PPC4XX_LAST_SD)
531                         *idx = 0;
532                 else
533                         (*idx)++;
534
535                 return 0;
536     }
537 }
538
539 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
540                                       struct ce_pd *pd,
541                                       struct pd_uinfo *pd_uinfo,
542                                       u32 nbytes,
543                                       struct scatterlist *dst)
544 {
545         dma_addr_t addr;
546         u32 this_sd;
547         u32 offset;
548         u32 len;
549         u32 i;
550         u32 sg_len;
551         struct scatterlist *sg;
552
553         this_sd = pd_uinfo->first_sd;
554         offset = 0;
555         i = 0;
556
557         while (nbytes) {
558                 sg = &dst[i];
559                 sg_len = sg->length;
560                 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
561                                 sg->offset, sg->length, DMA_TO_DEVICE);
562
563                 if (offset == 0) {
564                         len = (nbytes <= sg->length) ? nbytes : sg->length;
565                         while (crypto4xx_fill_one_page(dev, &addr, &len,
566                                 &this_sd, &offset, &nbytes))
567                                 ;
568                         if (!nbytes)
569                                 return;
570                         i++;
571                 } else {
572                         len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
573                                 nbytes : (dev->scatter_buffer_size - offset);
574                         len = (sg->length < len) ? sg->length : len;
575                         while (crypto4xx_fill_one_page(dev, &addr, &len,
576                                                &this_sd, &offset, &nbytes))
577                                 ;
578                         if (!nbytes)
579                                 return;
580                         sg_len -= len;
581                         if (sg_len) {
582                                 addr += len;
583                                 while (crypto4xx_fill_one_page(dev, &addr,
584                                         &sg_len, &this_sd, &offset, &nbytes))
585                                         ;
586                         }
587                         i++;
588                 }
589         }
590 }
591
592 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
593                                         struct crypto4xx_ctx *ctx)
594 {
595         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
596         struct sa_state_record *state_record =
597                                 (struct sa_state_record *) pd_uinfo->sr_va;
598
599         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
600                 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
601                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
602         }
603
604         return 0;
605 }
606
607 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
608                                   struct pd_uinfo *pd_uinfo)
609 {
610         int i;
611         if (pd_uinfo->num_gd) {
612                 for (i = 0; i < pd_uinfo->num_gd; i++)
613                         crypto4xx_put_gd_to_gdr(dev);
614                 pd_uinfo->first_gd = 0xffffffff;
615                 pd_uinfo->num_gd = 0;
616         }
617         if (pd_uinfo->num_sd) {
618                 for (i = 0; i < pd_uinfo->num_sd; i++)
619                         crypto4xx_put_sd_to_sdr(dev);
620
621                 pd_uinfo->first_sd = 0xffffffff;
622                 pd_uinfo->num_sd = 0;
623         }
624 }
625
626 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
627                                      struct pd_uinfo *pd_uinfo,
628                                      struct ce_pd *pd)
629 {
630         struct crypto4xx_ctx *ctx;
631         struct ablkcipher_request *ablk_req;
632         struct scatterlist *dst;
633         dma_addr_t addr;
634
635         ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
636         ctx  = crypto_tfm_ctx(ablk_req->base.tfm);
637
638         if (pd_uinfo->using_sd) {
639                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
640                                           ablk_req->dst);
641         } else {
642                 dst = pd_uinfo->dest_va;
643                 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
644                                     dst->offset, dst->length, DMA_FROM_DEVICE);
645         }
646         crypto4xx_ret_sg_desc(dev, pd_uinfo);
647         if (ablk_req->base.complete != NULL)
648                 ablk_req->base.complete(&ablk_req->base, 0);
649
650         return 0;
651 }
652
653 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
654                                 struct pd_uinfo *pd_uinfo)
655 {
656         struct crypto4xx_ctx *ctx;
657         struct ahash_request *ahash_req;
658
659         ahash_req = ahash_request_cast(pd_uinfo->async_req);
660         ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
661
662         crypto4xx_copy_digest_to_dst(pd_uinfo,
663                                      crypto_tfm_ctx(ahash_req->base.tfm));
664         crypto4xx_ret_sg_desc(dev, pd_uinfo);
665         /* call user provided callback function x */
666         if (ahash_req->base.complete != NULL)
667                 ahash_req->base.complete(&ahash_req->base, 0);
668
669         return 0;
670 }
671
672 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
673 {
674         struct ce_pd *pd;
675         struct pd_uinfo *pd_uinfo;
676
677         pd =  dev->pdr + sizeof(struct ce_pd)*idx;
678         pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
679         if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
680                         CRYPTO_ALG_TYPE_ABLKCIPHER)
681                 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
682         else
683                 return crypto4xx_ahash_done(dev, pd_uinfo);
684 }
685
686 /**
687  * Note: Only use this function to copy items that is word aligned.
688  */
689 void crypto4xx_memcpy_le(unsigned int *dst,
690                          const unsigned char *buf,
691                          int len)
692 {
693         u8 *tmp;
694         for (; len >= 4; buf += 4, len -= 4)
695                 *dst++ = cpu_to_le32(*(unsigned int *) buf);
696
697         tmp = (u8 *)dst;
698         switch (len) {
699         case 3:
700                 *tmp++ = 0;
701                 *tmp++ = *(buf+2);
702                 *tmp++ = *(buf+1);
703                 *tmp++ = *buf;
704                 break;
705         case 2:
706                 *tmp++ = 0;
707                 *tmp++ = 0;
708                 *tmp++ = *(buf+1);
709                 *tmp++ = *buf;
710                 break;
711         case 1:
712                 *tmp++ = 0;
713                 *tmp++ = 0;
714                 *tmp++ = 0;
715                 *tmp++ = *buf;
716                 break;
717         default:
718                 break;
719         }
720 }
721
722 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
723 {
724         crypto4xx_destroy_pdr(core_dev->dev);
725         crypto4xx_destroy_gdr(core_dev->dev);
726         crypto4xx_destroy_sdr(core_dev->dev);
727         dev_set_drvdata(core_dev->device, NULL);
728         iounmap(core_dev->dev->ce_base);
729         kfree(core_dev->dev);
730         kfree(core_dev);
731 }
732
733 void crypto4xx_return_pd(struct crypto4xx_device *dev,
734                          u32 pd_entry, struct ce_pd *pd,
735                          struct pd_uinfo *pd_uinfo)
736 {
737         /* irq should be already disabled */
738         dev->pdr_head = pd_entry;
739         pd->pd_ctl.w = 0;
740         pd->pd_ctl_len.w = 0;
741         pd_uinfo->state = PD_ENTRY_FREE;
742 }
743
744 /*
745  * derive number of elements in scatterlist
746  * Shamlessly copy from talitos.c
747  */
748 static int get_sg_count(struct scatterlist *sg_list, int nbytes)
749 {
750         struct scatterlist *sg = sg_list;
751         int sg_nents = 0;
752
753         while (nbytes) {
754                 sg_nents++;
755                 if (sg->length > nbytes)
756                         break;
757                 nbytes -= sg->length;
758                 sg = sg_next(sg);
759         }
760
761         return sg_nents;
762 }
763
764 static u32 get_next_gd(u32 current)
765 {
766         if (current != PPC4XX_LAST_GD)
767                 return current + 1;
768         else
769                 return 0;
770 }
771
772 static u32 get_next_sd(u32 current)
773 {
774         if (current != PPC4XX_LAST_SD)
775                 return current + 1;
776         else
777                 return 0;
778 }
779
780 u32 crypto4xx_build_pd(struct crypto_async_request *req,
781                        struct crypto4xx_ctx *ctx,
782                        struct scatterlist *src,
783                        struct scatterlist *dst,
784                        unsigned int datalen,
785                        void *iv, u32 iv_len)
786 {
787         struct crypto4xx_device *dev = ctx->dev;
788         dma_addr_t addr, pd_dma, sd_dma, gd_dma;
789         struct dynamic_sa_ctl *sa;
790         struct scatterlist *sg;
791         struct ce_gd *gd;
792         struct ce_pd *pd;
793         u32 num_gd, num_sd;
794         u32 fst_gd = 0xffffffff;
795         u32 fst_sd = 0xffffffff;
796         u32 pd_entry;
797         unsigned long flags;
798         struct pd_uinfo *pd_uinfo = NULL;
799         unsigned int nbytes = datalen, idx;
800         unsigned int ivlen = 0;
801         u32 gd_idx = 0;
802
803         /* figure how many gd is needed */
804         num_gd = get_sg_count(src, datalen);
805         if (num_gd == 1)
806                 num_gd = 0;
807
808         /* figure how many sd is needed */
809         if (sg_is_last(dst) || ctx->is_hash) {
810                 num_sd = 0;
811         } else {
812                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
813                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
814                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
815                                 num_sd++;
816                 } else {
817                         num_sd = 1;
818                 }
819         }
820
821         /*
822          * The follow section of code needs to be protected
823          * The gather ring and scatter ring needs to be consecutive
824          * In case of run out of any kind of descriptor, the descriptor
825          * already got must be return the original place.
826          */
827         spin_lock_irqsave(&dev->core_dev->lock, flags);
828         if (num_gd) {
829                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
830                 if (fst_gd == ERING_WAS_FULL) {
831                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
832                         return -EAGAIN;
833                 }
834         }
835         if (num_sd) {
836                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
837                 if (fst_sd == ERING_WAS_FULL) {
838                         if (num_gd)
839                                 dev->gdr_head = fst_gd;
840                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
841                         return -EAGAIN;
842                 }
843         }
844         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
845         if (pd_entry == ERING_WAS_FULL) {
846                 if (num_gd)
847                         dev->gdr_head = fst_gd;
848                 if (num_sd)
849                         dev->sdr_head = fst_sd;
850                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
851                 return -EAGAIN;
852         }
853         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
854
855         pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
856                                        sizeof(struct pd_uinfo) * pd_entry);
857         pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
858         pd_uinfo->async_req = req;
859         pd_uinfo->num_gd = num_gd;
860         pd_uinfo->num_sd = num_sd;
861
862         if (iv_len || ctx->is_hash) {
863                 ivlen = iv_len;
864                 pd->sa = pd_uinfo->sa_pa;
865                 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
866                 if (ctx->direction == DIR_INBOUND)
867                         memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
868                 else
869                         memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
870
871                 memcpy((void *) sa + ctx->offset_to_sr_ptr,
872                         &pd_uinfo->sr_pa, 4);
873
874                 if (iv_len)
875                         crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
876         } else {
877                 if (ctx->direction == DIR_INBOUND) {
878                         pd->sa = ctx->sa_in_dma_addr;
879                         sa = (struct dynamic_sa_ctl *) ctx->sa_in;
880                 } else {
881                         pd->sa = ctx->sa_out_dma_addr;
882                         sa = (struct dynamic_sa_ctl *) ctx->sa_out;
883                 }
884         }
885         pd->sa_len = ctx->sa_len;
886         if (num_gd) {
887                 /* get first gd we are going to use */
888                 gd_idx = fst_gd;
889                 pd_uinfo->first_gd = fst_gd;
890                 pd_uinfo->num_gd = num_gd;
891                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
892                 pd->src = gd_dma;
893                 /* enable gather */
894                 sa->sa_command_0.bf.gather = 1;
895                 idx = 0;
896                 src = &src[0];
897                 /* walk the sg, and setup gather array */
898                 while (nbytes) {
899                         sg = &src[idx];
900                         addr = dma_map_page(dev->core_dev->device, sg_page(sg),
901                                     sg->offset, sg->length, DMA_TO_DEVICE);
902                         gd->ptr = addr;
903                         gd->ctl_len.len = sg->length;
904                         gd->ctl_len.done = 0;
905                         gd->ctl_len.ready = 1;
906                         if (sg->length >= nbytes)
907                                 break;
908                         nbytes -= sg->length;
909                         gd_idx = get_next_gd(gd_idx);
910                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
911                         idx++;
912                 }
913         } else {
914                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
915                                 src->offset, src->length, DMA_TO_DEVICE);
916                 /*
917                  * Disable gather in sa command
918                  */
919                 sa->sa_command_0.bf.gather = 0;
920                 /*
921                  * Indicate gather array is not used
922                  */
923                 pd_uinfo->first_gd = 0xffffffff;
924                 pd_uinfo->num_gd = 0;
925         }
926         if (ctx->is_hash || sg_is_last(dst)) {
927                 /*
928                  * we know application give us dst a whole piece of memory
929                  * no need to use scatter ring.
930                  * In case of is_hash, the icv is always at end of src data.
931                  */
932                 pd_uinfo->using_sd = 0;
933                 pd_uinfo->first_sd = 0xffffffff;
934                 pd_uinfo->num_sd = 0;
935                 pd_uinfo->dest_va = dst;
936                 sa->sa_command_0.bf.scatter = 0;
937                 if (ctx->is_hash)
938                         pd->dest = virt_to_phys((void *)dst);
939                 else
940                         pd->dest = (u32)dma_map_page(dev->core_dev->device,
941                                         sg_page(dst), dst->offset,
942                                         dst->length, DMA_TO_DEVICE);
943         } else {
944                 struct ce_sd *sd = NULL;
945                 u32 sd_idx = fst_sd;
946                 nbytes = datalen;
947                 sa->sa_command_0.bf.scatter = 1;
948                 pd_uinfo->using_sd = 1;
949                 pd_uinfo->dest_va = dst;
950                 pd_uinfo->first_sd = fst_sd;
951                 pd_uinfo->num_sd = num_sd;
952                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
953                 pd->dest = sd_dma;
954                 /* setup scatter descriptor */
955                 sd->ctl.done = 0;
956                 sd->ctl.rdy = 1;
957                 /* sd->ptr should be setup by sd_init routine*/
958                 idx = 0;
959                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
960                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
961                 else
962                         nbytes = 0;
963                 while (nbytes) {
964                         sd_idx = get_next_sd(sd_idx);
965                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
966                         /* setup scatter descriptor */
967                         sd->ctl.done = 0;
968                         sd->ctl.rdy = 1;
969                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
970                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
971                         else
972                                 /*
973                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
974                                  * which is more than nbytes, so done.
975                                  */
976                                 nbytes = 0;
977                 }
978         }
979
980         sa->sa_command_1.bf.hash_crypto_offset = 0;
981         pd->pd_ctl.w = ctx->pd_ctl;
982         pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
983         pd_uinfo->state = PD_ENTRY_INUSE;
984         wmb();
985         /* write any value to push engine to read a pd */
986         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
987         return -EINPROGRESS;
988 }
989
990 /**
991  * Algorithm Registration Functions
992  */
993 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
994 {
995         struct crypto_alg *alg = tfm->__crt_alg;
996         struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
997         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
998
999         ctx->dev = amcc_alg->dev;
1000         ctx->sa_in = NULL;
1001         ctx->sa_out = NULL;
1002         ctx->sa_in_dma_addr = 0;
1003         ctx->sa_out_dma_addr = 0;
1004         ctx->sa_len = 0;
1005
1006         switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1007         default:
1008                 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1009                 break;
1010         case CRYPTO_ALG_TYPE_AHASH:
1011                 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1012                                          sizeof(struct crypto4xx_ctx));
1013                 break;
1014         }
1015
1016         return 0;
1017 }
1018
1019 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1020 {
1021         struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1022
1023         crypto4xx_free_sa(ctx);
1024         crypto4xx_free_state_record(ctx);
1025 }
1026
1027 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1028                            struct crypto4xx_alg_common *crypto_alg,
1029                            int array_size)
1030 {
1031         struct crypto4xx_alg *alg;
1032         int i;
1033         int rc = 0;
1034
1035         for (i = 0; i < array_size; i++) {
1036                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1037                 if (!alg)
1038                         return -ENOMEM;
1039
1040                 alg->alg = crypto_alg[i];
1041                 alg->dev = sec_dev;
1042
1043                 switch (alg->alg.type) {
1044                 case CRYPTO_ALG_TYPE_AHASH:
1045                         rc = crypto_register_ahash(&alg->alg.u.hash);
1046                         break;
1047
1048                 default:
1049                         rc = crypto_register_alg(&alg->alg.u.cipher);
1050                         break;
1051                 }
1052
1053                 if (rc) {
1054                         list_del(&alg->entry);
1055                         kfree(alg);
1056                 } else {
1057                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1058                 }
1059         }
1060
1061         return 0;
1062 }
1063
1064 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1065 {
1066         struct crypto4xx_alg *alg, *tmp;
1067
1068         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1069                 list_del(&alg->entry);
1070                 switch (alg->alg.type) {
1071                 case CRYPTO_ALG_TYPE_AHASH:
1072                         crypto_unregister_ahash(&alg->alg.u.hash);
1073                         break;
1074
1075                 default:
1076                         crypto_unregister_alg(&alg->alg.u.cipher);
1077                 }
1078                 kfree(alg);
1079         }
1080 }
1081
1082 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1083 {
1084         struct device *dev = (struct device *)data;
1085         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1086         struct pd_uinfo *pd_uinfo;
1087         struct ce_pd *pd;
1088         u32 tail;
1089
1090         while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1091                 tail = core_dev->dev->pdr_tail;
1092                 pd_uinfo = core_dev->dev->pdr_uinfo +
1093                         sizeof(struct pd_uinfo)*tail;
1094                 pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1095                 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1096                                    pd->pd_ctl.bf.pe_done &&
1097                                    !pd->pd_ctl.bf.host_ready) {
1098                         pd->pd_ctl.bf.pe_done = 0;
1099                         crypto4xx_pd_done(core_dev->dev, tail);
1100                         crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1101                         pd_uinfo->state = PD_ENTRY_FREE;
1102                 } else {
1103                         /* if tail not done, break */
1104                         break;
1105                 }
1106         }
1107 }
1108
1109 /**
1110  * Top Half of isr.
1111  */
1112 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1113 {
1114         struct device *dev = (struct device *)data;
1115         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1116
1117         if (core_dev->dev->ce_base == 0)
1118                 return 0;
1119
1120         writel(PPC4XX_INTERRUPT_CLR,
1121                core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1122         tasklet_schedule(&core_dev->tasklet);
1123
1124         return IRQ_HANDLED;
1125 }
1126
1127 /**
1128  * Supported Crypto Algorithms
1129  */
1130 struct crypto4xx_alg_common crypto4xx_alg[] = {
1131         /* Crypto AES modes */
1132         { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1133                 .cra_name       = "cbc(aes)",
1134                 .cra_driver_name = "cbc-aes-ppc4xx",
1135                 .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1136                 .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1137                 .cra_blocksize  = AES_BLOCK_SIZE,
1138                 .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1139                 .cra_type       = &crypto_ablkcipher_type,
1140                 .cra_init       = crypto4xx_alg_init,
1141                 .cra_exit       = crypto4xx_alg_exit,
1142                 .cra_module     = THIS_MODULE,
1143                 .cra_u          = {
1144                         .ablkcipher = {
1145                                 .min_keysize    = AES_MIN_KEY_SIZE,
1146                                 .max_keysize    = AES_MAX_KEY_SIZE,
1147                                 .ivsize         = AES_IV_SIZE,
1148                                 .setkey         = crypto4xx_setkey_aes_cbc,
1149                                 .encrypt        = crypto4xx_encrypt,
1150                                 .decrypt        = crypto4xx_decrypt,
1151                         }
1152                 }
1153         }},
1154 };
1155
1156 /**
1157  * Module Initialization Routine
1158  */
1159 static int __init crypto4xx_probe(struct platform_device *ofdev)
1160 {
1161         int rc;
1162         struct resource res;
1163         struct device *dev = &ofdev->dev;
1164         struct crypto4xx_core_device *core_dev;
1165
1166         rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1167         if (rc)
1168                 return -ENODEV;
1169
1170         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1171                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1172                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1173                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1174                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1175         } else if (of_find_compatible_node(NULL, NULL,
1176                         "amcc,ppc405ex-crypto")) {
1177                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1178                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1179                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1180                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1181         } else if (of_find_compatible_node(NULL, NULL,
1182                         "amcc,ppc460sx-crypto")) {
1183                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1184                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1185                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1186                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1187         } else {
1188                 printk(KERN_ERR "Crypto Function Not supported!\n");
1189                 return -EINVAL;
1190         }
1191
1192         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1193         if (!core_dev)
1194                 return -ENOMEM;
1195
1196         dev_set_drvdata(dev, core_dev);
1197         core_dev->ofdev = ofdev;
1198         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1199         if (!core_dev->dev)
1200                 goto err_alloc_dev;
1201
1202         core_dev->dev->core_dev = core_dev;
1203         core_dev->device = dev;
1204         spin_lock_init(&core_dev->lock);
1205         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1206         rc = crypto4xx_build_pdr(core_dev->dev);
1207         if (rc)
1208                 goto err_build_pdr;
1209
1210         rc = crypto4xx_build_gdr(core_dev->dev);
1211         if (rc)
1212                 goto err_build_gdr;
1213
1214         rc = crypto4xx_build_sdr(core_dev->dev);
1215         if (rc)
1216                 goto err_build_sdr;
1217
1218         /* Init tasklet for bottom half processing */
1219         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1220                      (unsigned long) dev);
1221
1222         /* Register for Crypto isr, Crypto Engine IRQ */
1223         core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1224         rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1225                          core_dev->dev->name, dev);
1226         if (rc)
1227                 goto err_request_irq;
1228
1229         core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1230         if (!core_dev->dev->ce_base) {
1231                 dev_err(dev, "failed to of_iomap\n");
1232                 rc = -ENOMEM;
1233                 goto err_iomap;
1234         }
1235
1236         /* need to setup pdr, rdr, gdr and sdr before this */
1237         crypto4xx_hw_init(core_dev->dev);
1238
1239         /* Register security algorithms with Linux CryptoAPI */
1240         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1241                                ARRAY_SIZE(crypto4xx_alg));
1242         if (rc)
1243                 goto err_start_dev;
1244
1245         return 0;
1246
1247 err_start_dev:
1248         iounmap(core_dev->dev->ce_base);
1249 err_iomap:
1250         free_irq(core_dev->irq, dev);
1251 err_request_irq:
1252         irq_dispose_mapping(core_dev->irq);
1253         tasklet_kill(&core_dev->tasklet);
1254         crypto4xx_destroy_sdr(core_dev->dev);
1255 err_build_sdr:
1256         crypto4xx_destroy_gdr(core_dev->dev);
1257 err_build_gdr:
1258         crypto4xx_destroy_pdr(core_dev->dev);
1259 err_build_pdr:
1260         kfree(core_dev->dev);
1261 err_alloc_dev:
1262         kfree(core_dev);
1263
1264         return rc;
1265 }
1266
1267 static int __exit crypto4xx_remove(struct platform_device *ofdev)
1268 {
1269         struct device *dev = &ofdev->dev;
1270         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1271
1272         free_irq(core_dev->irq, dev);
1273         irq_dispose_mapping(core_dev->irq);
1274
1275         tasklet_kill(&core_dev->tasklet);
1276         /* Un-register with Linux CryptoAPI */
1277         crypto4xx_unregister_alg(core_dev->dev);
1278         /* Free all allocated memory */
1279         crypto4xx_stop_all(core_dev);
1280
1281         return 0;
1282 }
1283
1284 static const struct of_device_id crypto4xx_match[] = {
1285         { .compatible      = "amcc,ppc4xx-crypto",},
1286         { },
1287 };
1288
1289 static struct platform_driver crypto4xx_driver = {
1290         .driver = {
1291                 .name = "crypto4xx",
1292                 .owner = THIS_MODULE,
1293                 .of_match_table = crypto4xx_match,
1294         },
1295         .probe          = crypto4xx_probe,
1296         .remove         = crypto4xx_remove,
1297 };
1298
1299 module_platform_driver(crypto4xx_driver);
1300
1301 MODULE_LICENSE("GPL");
1302 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1303 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1304