MALI: rockchip: add utgard(mali400) src dir
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (!is_sec1)
63                 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67                              struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69         dst_ptr->ptr = src_ptr->ptr;
70         if (!is_sec1)
71                 dst_ptr->eptr = src_ptr->eptr;
72 }
73
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75                                bool is_sec1)
76 {
77         if (is_sec1) {
78                 ptr->res = 0;
79                 ptr->len1 = cpu_to_be16(len);
80         } else {
81                 ptr->len = cpu_to_be16(len);
82         }
83 }
84
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86                                            bool is_sec1)
87 {
88         if (is_sec1)
89                 return be16_to_cpu(ptr->len1);
90         else
91                 return be16_to_cpu(ptr->len);
92 }
93
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
95 {
96         if (!is_sec1)
97                 ptr->j_extent = 0;
98 }
99
100 /*
101  * map virtual single (contiguous) pointer to h/w descriptor pointer
102  */
103 static void map_single_talitos_ptr(struct device *dev,
104                                    struct talitos_ptr *ptr,
105                                    unsigned int len, void *data,
106                                    enum dma_data_direction dir)
107 {
108         dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109         struct talitos_private *priv = dev_get_drvdata(dev);
110         bool is_sec1 = has_ftr_sec1(priv);
111
112         to_talitos_ptr_len(ptr, len, is_sec1);
113         to_talitos_ptr(ptr, dma_addr, is_sec1);
114         to_talitos_ptr_extent_clear(ptr, is_sec1);
115 }
116
117 /*
118  * unmap bus single (contiguous) h/w descriptor pointer
119  */
120 static void unmap_single_talitos_ptr(struct device *dev,
121                                      struct talitos_ptr *ptr,
122                                      enum dma_data_direction dir)
123 {
124         struct talitos_private *priv = dev_get_drvdata(dev);
125         bool is_sec1 = has_ftr_sec1(priv);
126
127         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128                          from_talitos_ptr_len(ptr, is_sec1), dir);
129 }
130
131 static int reset_channel(struct device *dev, int ch)
132 {
133         struct talitos_private *priv = dev_get_drvdata(dev);
134         unsigned int timeout = TALITOS_TIMEOUT;
135         bool is_sec1 = has_ftr_sec1(priv);
136
137         if (is_sec1) {
138                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139                           TALITOS1_CCCR_LO_RESET);
140
141                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142                         TALITOS1_CCCR_LO_RESET) && --timeout)
143                         cpu_relax();
144         } else {
145                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146                           TALITOS2_CCCR_RESET);
147
148                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149                         TALITOS2_CCCR_RESET) && --timeout)
150                         cpu_relax();
151         }
152
153         if (timeout == 0) {
154                 dev_err(dev, "failed to reset channel %d\n", ch);
155                 return -EIO;
156         }
157
158         /* set 36-bit addressing, done writeback enable and done IRQ enable */
159         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
161
162         /* and ICCR writeback, if available */
163         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165                           TALITOS_CCCR_LO_IWSE);
166
167         return 0;
168 }
169
170 static int reset_device(struct device *dev)
171 {
172         struct talitos_private *priv = dev_get_drvdata(dev);
173         unsigned int timeout = TALITOS_TIMEOUT;
174         bool is_sec1 = has_ftr_sec1(priv);
175         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
176
177         setbits32(priv->reg + TALITOS_MCR, mcr);
178
179         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
180                && --timeout)
181                 cpu_relax();
182
183         if (priv->irq[1]) {
184                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185                 setbits32(priv->reg + TALITOS_MCR, mcr);
186         }
187
188         if (timeout == 0) {
189                 dev_err(dev, "failed to reset device\n");
190                 return -EIO;
191         }
192
193         return 0;
194 }
195
196 /*
197  * Reset and initialize the device
198  */
199 static int init_device(struct device *dev)
200 {
201         struct talitos_private *priv = dev_get_drvdata(dev);
202         int ch, err;
203         bool is_sec1 = has_ftr_sec1(priv);
204
205         /*
206          * Master reset
207          * errata documentation: warning: certain SEC interrupts
208          * are not fully cleared by writing the MCR:SWR bit,
209          * set bit twice to completely reset
210          */
211         err = reset_device(dev);
212         if (err)
213                 return err;
214
215         err = reset_device(dev);
216         if (err)
217                 return err;
218
219         /* reset channels */
220         for (ch = 0; ch < priv->num_channels; ch++) {
221                 err = reset_channel(dev, ch);
222                 if (err)
223                         return err;
224         }
225
226         /* enable channel done and error interrupts */
227         if (is_sec1) {
228                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230                 /* disable parity error check in DEU (erroneous? test vect.) */
231                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232         } else {
233                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235         }
236
237         /* disable integrity check error interrupts (use writeback instead) */
238         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240                           TALITOS_MDEUICR_LO_ICE);
241
242         return 0;
243 }
244
245 /**
246  * talitos_submit - submits a descriptor to the device for processing
247  * @dev:        the SEC device to be used
248  * @ch:         the SEC device channel to be used
249  * @desc:       the descriptor to be processed by the device
250  * @callback:   whom to call when processing is complete
251  * @context:    a handle for use by caller (optional)
252  *
253  * desc must contain valid dma-mapped (bus physical) address pointers.
254  * callback must check err and feedback in descriptor header
255  * for device processing status.
256  */
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258                    void (*callback)(struct device *dev,
259                                     struct talitos_desc *desc,
260                                     void *context, int error),
261                    void *context)
262 {
263         struct talitos_private *priv = dev_get_drvdata(dev);
264         struct talitos_request *request;
265         unsigned long flags;
266         int head;
267         bool is_sec1 = has_ftr_sec1(priv);
268
269         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
270
271         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272                 /* h/w fifo is full */
273                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
274                 return -EAGAIN;
275         }
276
277         head = priv->chan[ch].head;
278         request = &priv->chan[ch].fifo[head];
279
280         /* map descriptor and save caller data */
281         if (is_sec1) {
282                 desc->hdr1 = desc->hdr;
283                 desc->next_desc = 0;
284                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285                                                    TALITOS_DESC_SIZE,
286                                                    DMA_BIDIRECTIONAL);
287         } else {
288                 request->dma_desc = dma_map_single(dev, desc,
289                                                    TALITOS_DESC_SIZE,
290                                                    DMA_BIDIRECTIONAL);
291         }
292         request->callback = callback;
293         request->context = context;
294
295         /* increment fifo head */
296         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
297
298         smp_wmb();
299         request->desc = desc;
300
301         /* GO! */
302         wmb();
303         out_be32(priv->chan[ch].reg + TALITOS_FF,
304                  upper_32_bits(request->dma_desc));
305         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306                  lower_32_bits(request->dma_desc));
307
308         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
309
310         return -EINPROGRESS;
311 }
312 EXPORT_SYMBOL(talitos_submit);
313
314 /*
315  * process what was done, notify callback of error if not
316  */
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318 {
319         struct talitos_private *priv = dev_get_drvdata(dev);
320         struct talitos_request *request, saved_req;
321         unsigned long flags;
322         int tail, status;
323         bool is_sec1 = has_ftr_sec1(priv);
324
325         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
326
327         tail = priv->chan[ch].tail;
328         while (priv->chan[ch].fifo[tail].desc) {
329                 __be32 hdr;
330
331                 request = &priv->chan[ch].fifo[tail];
332
333                 /* descriptors with their done bits set don't get the error */
334                 rmb();
335                 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
338                         status = 0;
339                 else
340                         if (!error)
341                                 break;
342                         else
343                                 status = error;
344
345                 dma_unmap_single(dev, request->dma_desc,
346                                  TALITOS_DESC_SIZE,
347                                  DMA_BIDIRECTIONAL);
348
349                 /* copy entries so we can call callback outside lock */
350                 saved_req.desc = request->desc;
351                 saved_req.callback = request->callback;
352                 saved_req.context = request->context;
353
354                 /* release request entry in fifo */
355                 smp_wmb();
356                 request->desc = NULL;
357
358                 /* increment fifo tail */
359                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
360
361                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
362
363                 atomic_dec(&priv->chan[ch].submit_count);
364
365                 saved_req.callback(dev, saved_req.desc, saved_req.context,
366                                    status);
367                 /* channel may resume processing in single desc error case */
368                 if (error && !reset_ch && status == error)
369                         return;
370                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371                 tail = priv->chan[ch].tail;
372         }
373
374         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
375 }
376
377 /*
378  * process completed requests for channels that have done status
379  */
380 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
381 static void talitos1_done_##name(unsigned long data)                    \
382 {                                                                       \
383         struct device *dev = (struct device *)data;                     \
384         struct talitos_private *priv = dev_get_drvdata(dev);            \
385         unsigned long flags;                                            \
386                                                                         \
387         if (ch_done_mask & 0x10000000)                                  \
388                 flush_channel(dev, 0, 0, 0);                    \
389         if (priv->num_channels == 1)                                    \
390                 goto out;                                               \
391         if (ch_done_mask & 0x40000000)                                  \
392                 flush_channel(dev, 1, 0, 0);                    \
393         if (ch_done_mask & 0x00010000)                                  \
394                 flush_channel(dev, 2, 0, 0);                    \
395         if (ch_done_mask & 0x00040000)                                  \
396                 flush_channel(dev, 3, 0, 0);                    \
397                                                                         \
398 out:                                                                    \
399         /* At this point, all completed channels have been processed */ \
400         /* Unmask done interrupts for channels completed later on. */   \
401         spin_lock_irqsave(&priv->reg_lock, flags);                      \
402         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
403         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
404         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
405 }
406
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
410 static void talitos2_done_##name(unsigned long data)                    \
411 {                                                                       \
412         struct device *dev = (struct device *)data;                     \
413         struct talitos_private *priv = dev_get_drvdata(dev);            \
414         unsigned long flags;                                            \
415                                                                         \
416         if (ch_done_mask & 1)                                           \
417                 flush_channel(dev, 0, 0, 0);                            \
418         if (priv->num_channels == 1)                                    \
419                 goto out;                                               \
420         if (ch_done_mask & (1 << 2))                                    \
421                 flush_channel(dev, 1, 0, 0);                            \
422         if (ch_done_mask & (1 << 4))                                    \
423                 flush_channel(dev, 2, 0, 0);                            \
424         if (ch_done_mask & (1 << 6))                                    \
425                 flush_channel(dev, 3, 0, 0);                            \
426                                                                         \
427 out:                                                                    \
428         /* At this point, all completed channels have been processed */ \
429         /* Unmask done interrupts for channels completed later on. */   \
430         spin_lock_irqsave(&priv->reg_lock, flags);                      \
431         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
432         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
433         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
434 }
435
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
439
440 /*
441  * locate current (offending) descriptor
442  */
443 static u32 current_desc_hdr(struct device *dev, int ch)
444 {
445         struct talitos_private *priv = dev_get_drvdata(dev);
446         int tail, iter;
447         dma_addr_t cur_desc;
448
449         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
451
452         if (!cur_desc) {
453                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454                 return 0;
455         }
456
457         tail = priv->chan[ch].tail;
458
459         iter = tail;
460         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461                 iter = (iter + 1) & (priv->fifo_len - 1);
462                 if (iter == tail) {
463                         dev_err(dev, "couldn't locate current descriptor\n");
464                         return 0;
465                 }
466         }
467
468         return priv->chan[ch].fifo[iter].desc->hdr;
469 }
470
471 /*
472  * user diagnostics; report root cause of error based on execution unit status
473  */
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
475 {
476         struct talitos_private *priv = dev_get_drvdata(dev);
477         int i;
478
479         if (!desc_hdr)
480                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
481
482         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483         case DESC_HDR_SEL0_AFEU:
484                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485                         in_be32(priv->reg_afeu + TALITOS_EUISR),
486                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
487                 break;
488         case DESC_HDR_SEL0_DEU:
489                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490                         in_be32(priv->reg_deu + TALITOS_EUISR),
491                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
492                 break;
493         case DESC_HDR_SEL0_MDEUA:
494         case DESC_HDR_SEL0_MDEUB:
495                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
497                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
498                 break;
499         case DESC_HDR_SEL0_RNG:
500                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501                         in_be32(priv->reg_rngu + TALITOS_ISR),
502                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
503                 break;
504         case DESC_HDR_SEL0_PKEU:
505                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
507                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
508                 break;
509         case DESC_HDR_SEL0_AESU:
510                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511                         in_be32(priv->reg_aesu + TALITOS_EUISR),
512                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
513                 break;
514         case DESC_HDR_SEL0_CRCU:
515                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516                         in_be32(priv->reg_crcu + TALITOS_EUISR),
517                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
518                 break;
519         case DESC_HDR_SEL0_KEU:
520                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
522                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523                 break;
524         }
525
526         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527         case DESC_HDR_SEL1_MDEUA:
528         case DESC_HDR_SEL1_MDEUB:
529                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
531                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
532                 break;
533         case DESC_HDR_SEL1_CRCU:
534                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535                         in_be32(priv->reg_crcu + TALITOS_EUISR),
536                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
537                 break;
538         }
539
540         for (i = 0; i < 8; i++)
541                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
544 }
545
546 /*
547  * recover from error interrupts
548  */
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
550 {
551         struct talitos_private *priv = dev_get_drvdata(dev);
552         unsigned int timeout = TALITOS_TIMEOUT;
553         int ch, error, reset_dev = 0;
554         u32 v_lo;
555         bool is_sec1 = has_ftr_sec1(priv);
556         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
557
558         for (ch = 0; ch < priv->num_channels; ch++) {
559                 /* skip channels without errors */
560                 if (is_sec1) {
561                         /* bits 29, 31, 17, 19 */
562                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563                                 continue;
564                 } else {
565                         if (!(isr & (1 << (ch * 2 + 1))))
566                                 continue;
567                 }
568
569                 error = -EINVAL;
570
571                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
572
573                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574                         dev_err(dev, "double fetch fifo overflow error\n");
575                         error = -EAGAIN;
576                         reset_ch = 1;
577                 }
578                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579                         /* h/w dropped descriptor */
580                         dev_err(dev, "single fetch fifo overflow error\n");
581                         error = -EAGAIN;
582                 }
583                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584                         dev_err(dev, "master data transfer error\n");
585                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586                         dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587                                              : "s/g data length zero error\n");
588                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589                         dev_err(dev, is_sec1 ? "parity error\n"
590                                              : "fetch pointer zero error\n");
591                 if (v_lo & TALITOS_CCPSR_LO_IDH)
592                         dev_err(dev, "illegal descriptor header error\n");
593                 if (v_lo & TALITOS_CCPSR_LO_IEU)
594                         dev_err(dev, is_sec1 ? "static assignment error\n"
595                                              : "invalid exec unit error\n");
596                 if (v_lo & TALITOS_CCPSR_LO_EU)
597                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
598                 if (!is_sec1) {
599                         if (v_lo & TALITOS_CCPSR_LO_GB)
600                                 dev_err(dev, "gather boundary error\n");
601                         if (v_lo & TALITOS_CCPSR_LO_GRL)
602                                 dev_err(dev, "gather return/length error\n");
603                         if (v_lo & TALITOS_CCPSR_LO_SB)
604                                 dev_err(dev, "scatter boundary error\n");
605                         if (v_lo & TALITOS_CCPSR_LO_SRL)
606                                 dev_err(dev, "scatter return/length error\n");
607                 }
608
609                 flush_channel(dev, ch, error, reset_ch);
610
611                 if (reset_ch) {
612                         reset_channel(dev, ch);
613                 } else {
614                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
615                                   TALITOS2_CCCR_CONT);
616                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618                                TALITOS2_CCCR_CONT) && --timeout)
619                                 cpu_relax();
620                         if (timeout == 0) {
621                                 dev_err(dev, "failed to restart channel %d\n",
622                                         ch);
623                                 reset_dev = 1;
624                         }
625                 }
626         }
627         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631                                 isr, isr_lo);
632                 else
633                         dev_err(dev, "done overflow, internal time out, or "
634                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
635
636                 /* purge request queues */
637                 for (ch = 0; ch < priv->num_channels; ch++)
638                         flush_channel(dev, ch, -EIO, 1);
639
640                 /* reset and reinitialize the device */
641                 init_device(dev);
642         }
643 }
644
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
647 {                                                                              \
648         struct device *dev = data;                                             \
649         struct talitos_private *priv = dev_get_drvdata(dev);                   \
650         u32 isr, isr_lo;                                                       \
651         unsigned long flags;                                                   \
652                                                                                \
653         spin_lock_irqsave(&priv->reg_lock, flags);                             \
654         isr = in_be32(priv->reg + TALITOS_ISR);                                \
655         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
656         /* Acknowledge interrupt */                                            \
657         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
659                                                                                \
660         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
661                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
662                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
663         }                                                                      \
664         else {                                                                 \
665                 if (likely(isr & ch_done_mask)) {                              \
666                         /* mask further done interrupts. */                    \
667                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
668                         /* done_task will unmask done interrupts at exit */    \
669                         tasklet_schedule(&priv->done_task[tlet]);              \
670                 }                                                              \
671                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
672         }                                                                      \
673                                                                                \
674         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
675                                                                 IRQ_NONE;      \
676 }
677
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
682 {                                                                              \
683         struct device *dev = data;                                             \
684         struct talitos_private *priv = dev_get_drvdata(dev);                   \
685         u32 isr, isr_lo;                                                       \
686         unsigned long flags;                                                   \
687                                                                                \
688         spin_lock_irqsave(&priv->reg_lock, flags);                             \
689         isr = in_be32(priv->reg + TALITOS_ISR);                                \
690         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
691         /* Acknowledge interrupt */                                            \
692         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
694                                                                                \
695         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
696                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
697                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
698         }                                                                      \
699         else {                                                                 \
700                 if (likely(isr & ch_done_mask)) {                              \
701                         /* mask further done interrupts. */                    \
702                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
703                         /* done_task will unmask done interrupts at exit */    \
704                         tasklet_schedule(&priv->done_task[tlet]);              \
705                 }                                                              \
706                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
707         }                                                                      \
708                                                                                \
709         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
710                                                                 IRQ_NONE;      \
711 }
712
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715                        0)
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717                        1)
718
719 /*
720  * hwrng
721  */
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
723 {
724         struct device *dev = (struct device *)rng->priv;
725         struct talitos_private *priv = dev_get_drvdata(dev);
726         u32 ofl;
727         int i;
728
729         for (i = 0; i < 20; i++) {
730                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731                       TALITOS_RNGUSR_LO_OFL;
732                 if (ofl || !wait)
733                         break;
734                 udelay(10);
735         }
736
737         return !!ofl;
738 }
739
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741 {
742         struct device *dev = (struct device *)rng->priv;
743         struct talitos_private *priv = dev_get_drvdata(dev);
744
745         /* rng fifo requires 64-bit accesses */
746         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
748
749         return sizeof(u32);
750 }
751
752 static int talitos_rng_init(struct hwrng *rng)
753 {
754         struct device *dev = (struct device *)rng->priv;
755         struct talitos_private *priv = dev_get_drvdata(dev);
756         unsigned int timeout = TALITOS_TIMEOUT;
757
758         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760                  & TALITOS_RNGUSR_LO_RD)
761                && --timeout)
762                 cpu_relax();
763         if (timeout == 0) {
764                 dev_err(dev, "failed to reset rng hw\n");
765                 return -ENODEV;
766         }
767
768         /* start generating */
769         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
770
771         return 0;
772 }
773
774 static int talitos_register_rng(struct device *dev)
775 {
776         struct talitos_private *priv = dev_get_drvdata(dev);
777         int err;
778
779         priv->rng.name          = dev_driver_string(dev),
780         priv->rng.init          = talitos_rng_init,
781         priv->rng.data_present  = talitos_rng_data_present,
782         priv->rng.data_read     = talitos_rng_data_read,
783         priv->rng.priv          = (unsigned long)dev;
784
785         err = hwrng_register(&priv->rng);
786         if (!err)
787                 priv->rng_registered = true;
788
789         return err;
790 }
791
792 static void talitos_unregister_rng(struct device *dev)
793 {
794         struct talitos_private *priv = dev_get_drvdata(dev);
795
796         if (!priv->rng_registered)
797                 return;
798
799         hwrng_unregister(&priv->rng);
800         priv->rng_registered = false;
801 }
802
803 /*
804  * crypto alg
805  */
806 #define TALITOS_CRA_PRIORITY            3000
807 #define TALITOS_MAX_KEY_SIZE            96
808 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
809
810 struct talitos_ctx {
811         struct device *dev;
812         int ch;
813         __be32 desc_hdr_template;
814         u8 key[TALITOS_MAX_KEY_SIZE];
815         u8 iv[TALITOS_MAX_IV_LENGTH];
816         unsigned int keylen;
817         unsigned int enckeylen;
818         unsigned int authkeylen;
819 };
820
821 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824 struct talitos_ahash_req_ctx {
825         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826         unsigned int hw_context_size;
827         u8 buf[HASH_MAX_BLOCK_SIZE];
828         u8 bufnext[HASH_MAX_BLOCK_SIZE];
829         unsigned int swinit;
830         unsigned int first;
831         unsigned int last;
832         unsigned int to_hash_later;
833         unsigned int nbuf;
834         struct scatterlist bufsl[2];
835         struct scatterlist *psrc;
836 };
837
838 static int aead_setkey(struct crypto_aead *authenc,
839                        const u8 *key, unsigned int keylen)
840 {
841         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
842         struct crypto_authenc_keys keys;
843
844         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
845                 goto badkey;
846
847         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
848                 goto badkey;
849
850         memcpy(ctx->key, keys.authkey, keys.authkeylen);
851         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
852
853         ctx->keylen = keys.authkeylen + keys.enckeylen;
854         ctx->enckeylen = keys.enckeylen;
855         ctx->authkeylen = keys.authkeylen;
856
857         return 0;
858
859 badkey:
860         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
861         return -EINVAL;
862 }
863
864 /*
865  * talitos_edesc - s/w-extended descriptor
866  * @src_nents: number of segments in input scatterlist
867  * @dst_nents: number of segments in output scatterlist
868  * @icv_ool: whether ICV is out-of-line
869  * @iv_dma: dma address of iv for checking continuity and link table
870  * @dma_len: length of dma mapped link_tbl space
871  * @dma_link_tbl: bus physical address of link_tbl/buf
872  * @desc: h/w descriptor
873  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
874  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
875  *
876  * if decrypting (with authcheck), or either one of src_nents or dst_nents
877  * is greater than 1, an integrity check value is concatenated to the end
878  * of link_tbl data
879  */
880 struct talitos_edesc {
881         int src_nents;
882         int dst_nents;
883         bool icv_ool;
884         dma_addr_t iv_dma;
885         int dma_len;
886         dma_addr_t dma_link_tbl;
887         struct talitos_desc desc;
888         union {
889                 struct talitos_ptr link_tbl[0];
890                 u8 buf[0];
891         };
892 };
893
894 static void talitos_sg_unmap(struct device *dev,
895                              struct talitos_edesc *edesc,
896                              struct scatterlist *src,
897                              struct scatterlist *dst)
898 {
899         unsigned int src_nents = edesc->src_nents ? : 1;
900         unsigned int dst_nents = edesc->dst_nents ? : 1;
901
902         if (src != dst) {
903                 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
904
905                 if (dst) {
906                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
907                 }
908         } else
909                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
910 }
911
912 static void ipsec_esp_unmap(struct device *dev,
913                             struct talitos_edesc *edesc,
914                             struct aead_request *areq)
915 {
916         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
917         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
918         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
919         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
920
921         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
922
923         if (edesc->dma_len)
924                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
925                                  DMA_BIDIRECTIONAL);
926 }
927
928 /*
929  * ipsec_esp descriptor callbacks
930  */
931 static void ipsec_esp_encrypt_done(struct device *dev,
932                                    struct talitos_desc *desc, void *context,
933                                    int err)
934 {
935         struct aead_request *areq = context;
936         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
937         unsigned int authsize = crypto_aead_authsize(authenc);
938         struct talitos_edesc *edesc;
939         struct scatterlist *sg;
940         void *icvdata;
941
942         edesc = container_of(desc, struct talitos_edesc, desc);
943
944         ipsec_esp_unmap(dev, edesc, areq);
945
946         /* copy the generated ICV to dst */
947         if (edesc->icv_ool) {
948                 icvdata = &edesc->link_tbl[edesc->src_nents +
949                                            edesc->dst_nents + 2];
950                 sg = sg_last(areq->dst, edesc->dst_nents);
951                 memcpy((char *)sg_virt(sg) + sg->length - authsize,
952                        icvdata, authsize);
953         }
954
955         kfree(edesc);
956
957         aead_request_complete(areq, err);
958 }
959
960 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
961                                           struct talitos_desc *desc,
962                                           void *context, int err)
963 {
964         struct aead_request *req = context;
965         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
966         unsigned int authsize = crypto_aead_authsize(authenc);
967         struct talitos_edesc *edesc;
968         struct scatterlist *sg;
969         char *oicv, *icv;
970
971         edesc = container_of(desc, struct talitos_edesc, desc);
972
973         ipsec_esp_unmap(dev, edesc, req);
974
975         if (!err) {
976                 /* auth check */
977                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
978                 icv = (char *)sg_virt(sg) + sg->length - authsize;
979
980                 if (edesc->dma_len) {
981                         oicv = (char *)&edesc->link_tbl[edesc->src_nents +
982                                                         edesc->dst_nents + 2];
983                         if (edesc->icv_ool)
984                                 icv = oicv + authsize;
985                 } else
986                         oicv = (char *)&edesc->link_tbl[0];
987
988                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
989         }
990
991         kfree(edesc);
992
993         aead_request_complete(req, err);
994 }
995
996 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
997                                           struct talitos_desc *desc,
998                                           void *context, int err)
999 {
1000         struct aead_request *req = context;
1001         struct talitos_edesc *edesc;
1002
1003         edesc = container_of(desc, struct talitos_edesc, desc);
1004
1005         ipsec_esp_unmap(dev, edesc, req);
1006
1007         /* check ICV auth status */
1008         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1009                      DESC_HDR_LO_ICCR1_PASS))
1010                 err = -EBADMSG;
1011
1012         kfree(edesc);
1013
1014         aead_request_complete(req, err);
1015 }
1016
1017 /*
1018  * convert scatterlist to SEC h/w link table format
1019  * stop at cryptlen bytes
1020  */
1021 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1022                                  unsigned int offset, int cryptlen,
1023                                  struct talitos_ptr *link_tbl_ptr)
1024 {
1025         int n_sg = sg_count;
1026         int count = 0;
1027
1028         while (cryptlen && sg && n_sg--) {
1029                 unsigned int len = sg_dma_len(sg);
1030
1031                 if (offset >= len) {
1032                         offset -= len;
1033                         goto next;
1034                 }
1035
1036                 len -= offset;
1037
1038                 if (len > cryptlen)
1039                         len = cryptlen;
1040
1041                 to_talitos_ptr(link_tbl_ptr + count,
1042                                sg_dma_address(sg) + offset, 0);
1043                 link_tbl_ptr[count].len = cpu_to_be16(len);
1044                 link_tbl_ptr[count].j_extent = 0;
1045                 count++;
1046                 cryptlen -= len;
1047                 offset = 0;
1048
1049 next:
1050                 sg = sg_next(sg);
1051         }
1052
1053         /* tag end of link table */
1054         if (count > 0)
1055                 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1056
1057         return count;
1058 }
1059
1060 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1061                                  int cryptlen,
1062                                  struct talitos_ptr *link_tbl_ptr)
1063 {
1064         return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1065                                      link_tbl_ptr);
1066 }
1067
1068 /*
1069  * fill in and submit ipsec_esp descriptor
1070  */
1071 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1072                      void (*callback)(struct device *dev,
1073                                       struct talitos_desc *desc,
1074                                       void *context, int error))
1075 {
1076         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1077         unsigned int authsize = crypto_aead_authsize(aead);
1078         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1079         struct device *dev = ctx->dev;
1080         struct talitos_desc *desc = &edesc->desc;
1081         unsigned int cryptlen = areq->cryptlen;
1082         unsigned int ivsize = crypto_aead_ivsize(aead);
1083         int tbl_off = 0;
1084         int sg_count, ret;
1085         int sg_link_tbl_len;
1086
1087         /* hmac key */
1088         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1089                                DMA_TO_DEVICE);
1090
1091         sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1092                               (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1093                                                            : DMA_TO_DEVICE);
1094         /* hmac data */
1095         desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1096         if (sg_count > 1 &&
1097             (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1098                                          areq->assoclen,
1099                                          &edesc->link_tbl[tbl_off])) > 1) {
1100                 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1101                                sizeof(struct talitos_ptr), 0);
1102                 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1103
1104                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1105                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1106
1107                 tbl_off += ret;
1108         } else {
1109                 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1110                 desc->ptr[1].j_extent = 0;
1111         }
1112
1113         /* cipher iv */
1114         to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1115         desc->ptr[2].len = cpu_to_be16(ivsize);
1116         desc->ptr[2].j_extent = 0;
1117
1118         /* cipher key */
1119         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1120                                (char *)&ctx->key + ctx->authkeylen,
1121                                DMA_TO_DEVICE);
1122
1123         /*
1124          * cipher in
1125          * map and adjust cipher len to aead request cryptlen.
1126          * extent is bytes of HMAC postpended to ciphertext,
1127          * typically 12 for ipsec
1128          */
1129         desc->ptr[4].len = cpu_to_be16(cryptlen);
1130         desc->ptr[4].j_extent = authsize;
1131
1132         sg_link_tbl_len = cryptlen;
1133         if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1134                 sg_link_tbl_len += authsize;
1135
1136         if (sg_count == 1) {
1137                 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1138                                areq->assoclen, 0);
1139         } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1140                                                 areq->assoclen, sg_link_tbl_len,
1141                                                 &edesc->link_tbl[tbl_off])) >
1142                    1) {
1143                 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1144                 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1145                                               tbl_off *
1146                                               sizeof(struct talitos_ptr), 0);
1147                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1148                                            edesc->dma_len,
1149                                            DMA_BIDIRECTIONAL);
1150                 tbl_off += ret;
1151         } else {
1152                 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153         }
1154
1155         /* cipher out */
1156         desc->ptr[5].len = cpu_to_be16(cryptlen);
1157         desc->ptr[5].j_extent = authsize;
1158
1159         if (areq->src != areq->dst)
1160                 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1161                                       DMA_FROM_DEVICE);
1162
1163         edesc->icv_ool = false;
1164
1165         if (sg_count == 1) {
1166                 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167                                areq->assoclen, 0);
1168         } else if ((sg_count =
1169                         sg_to_link_tbl_offset(areq->dst, sg_count,
1170                                               areq->assoclen, cryptlen,
1171                                               &edesc->link_tbl[tbl_off])) > 1) {
1172                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1173
1174                 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1175                                tbl_off * sizeof(struct talitos_ptr), 0);
1176
1177                 /* Add an entry to the link table for ICV data */
1178                 tbl_ptr += sg_count - 1;
1179                 tbl_ptr->j_extent = 0;
1180                 tbl_ptr++;
1181                 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1182                 tbl_ptr->len = cpu_to_be16(authsize);
1183
1184                 /* icv data follows link tables */
1185                 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1186                                         (edesc->src_nents + edesc->dst_nents +
1187                                          2) * sizeof(struct talitos_ptr) +
1188                                         authsize, 0);
1189                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1190                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1191                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1192
1193                 edesc->icv_ool = true;
1194         } else {
1195                 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196         }
1197
1198         /* iv out */
1199         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1200                                DMA_FROM_DEVICE);
1201
1202         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1203         if (ret != -EINPROGRESS) {
1204                 ipsec_esp_unmap(dev, edesc, areq);
1205                 kfree(edesc);
1206         }
1207         return ret;
1208 }
1209
1210 /*
1211  * allocate and map the extended descriptor
1212  */
1213 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1214                                                  struct scatterlist *src,
1215                                                  struct scatterlist *dst,
1216                                                  u8 *iv,
1217                                                  unsigned int assoclen,
1218                                                  unsigned int cryptlen,
1219                                                  unsigned int authsize,
1220                                                  unsigned int ivsize,
1221                                                  int icv_stashing,
1222                                                  u32 cryptoflags,
1223                                                  bool encrypt)
1224 {
1225         struct talitos_edesc *edesc;
1226         int src_nents, dst_nents, alloc_len, dma_len;
1227         dma_addr_t iv_dma = 0;
1228         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1229                       GFP_ATOMIC;
1230         struct talitos_private *priv = dev_get_drvdata(dev);
1231         bool is_sec1 = has_ftr_sec1(priv);
1232         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1233
1234         if (cryptlen + authsize > max_len) {
1235                 dev_err(dev, "length exceeds h/w max limit\n");
1236                 return ERR_PTR(-EINVAL);
1237         }
1238
1239         if (ivsize)
1240                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1241
1242         if (!dst || dst == src) {
1243                 src_nents = sg_nents_for_len(src,
1244                                              assoclen + cryptlen + authsize);
1245                 src_nents = (src_nents == 1) ? 0 : src_nents;
1246                 dst_nents = dst ? src_nents : 0;
1247         } else { /* dst && dst != src*/
1248                 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1249                                                  (encrypt ? 0 : authsize));
1250                 src_nents = (src_nents == 1) ? 0 : src_nents;
1251                 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1252                                                  (encrypt ? authsize : 0));
1253                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1254         }
1255
1256         /*
1257          * allocate space for base edesc plus the link tables,
1258          * allowing for two separate entries for AD and generated ICV (+ 2),
1259          * and space for two sets of ICVs (stashed and generated)
1260          */
1261         alloc_len = sizeof(struct talitos_edesc);
1262         if (src_nents || dst_nents) {
1263                 if (is_sec1)
1264                         dma_len = (src_nents ? cryptlen : 0) +
1265                                   (dst_nents ? cryptlen : 0);
1266                 else
1267                         dma_len = (src_nents + dst_nents + 2) *
1268                                   sizeof(struct talitos_ptr) + authsize * 2;
1269                 alloc_len += dma_len;
1270         } else {
1271                 dma_len = 0;
1272                 alloc_len += icv_stashing ? authsize : 0;
1273         }
1274
1275         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1276         if (!edesc) {
1277                 if (iv_dma)
1278                         dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1279
1280                 dev_err(dev, "could not allocate edescriptor\n");
1281                 return ERR_PTR(-ENOMEM);
1282         }
1283
1284         edesc->src_nents = src_nents;
1285         edesc->dst_nents = dst_nents;
1286         edesc->iv_dma = iv_dma;
1287         edesc->dma_len = dma_len;
1288         if (dma_len)
1289                 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1290                                                      edesc->dma_len,
1291                                                      DMA_BIDIRECTIONAL);
1292
1293         return edesc;
1294 }
1295
1296 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1297                                               int icv_stashing, bool encrypt)
1298 {
1299         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1300         unsigned int authsize = crypto_aead_authsize(authenc);
1301         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1302         unsigned int ivsize = crypto_aead_ivsize(authenc);
1303
1304         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1305                                    iv, areq->assoclen, areq->cryptlen,
1306                                    authsize, ivsize, icv_stashing,
1307                                    areq->base.flags, encrypt);
1308 }
1309
1310 static int aead_encrypt(struct aead_request *req)
1311 {
1312         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1313         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1314         struct talitos_edesc *edesc;
1315
1316         /* allocate extended descriptor */
1317         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1318         if (IS_ERR(edesc))
1319                 return PTR_ERR(edesc);
1320
1321         /* set encrypt */
1322         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1323
1324         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1325 }
1326
1327 static int aead_decrypt(struct aead_request *req)
1328 {
1329         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1330         unsigned int authsize = crypto_aead_authsize(authenc);
1331         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1332         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1333         struct talitos_edesc *edesc;
1334         struct scatterlist *sg;
1335         void *icvdata;
1336
1337         req->cryptlen -= authsize;
1338
1339         /* allocate extended descriptor */
1340         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1341         if (IS_ERR(edesc))
1342                 return PTR_ERR(edesc);
1343
1344         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1345             ((!edesc->src_nents && !edesc->dst_nents) ||
1346              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1347
1348                 /* decrypt and check the ICV */
1349                 edesc->desc.hdr = ctx->desc_hdr_template |
1350                                   DESC_HDR_DIR_INBOUND |
1351                                   DESC_HDR_MODE1_MDEU_CICV;
1352
1353                 /* reset integrity check result bits */
1354                 edesc->desc.hdr_lo = 0;
1355
1356                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1357         }
1358
1359         /* Have to check the ICV with software */
1360         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1361
1362         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1363         if (edesc->dma_len)
1364                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1365                                                    edesc->dst_nents + 2];
1366         else
1367                 icvdata = &edesc->link_tbl[0];
1368
1369         sg = sg_last(req->src, edesc->src_nents ? : 1);
1370
1371         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1372
1373         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1374 }
1375
1376 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1377                              const u8 *key, unsigned int keylen)
1378 {
1379         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1380
1381         memcpy(&ctx->key, key, keylen);
1382         ctx->keylen = keylen;
1383
1384         return 0;
1385 }
1386
1387 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1388                                  struct scatterlist *dst, unsigned int len,
1389                                  struct talitos_edesc *edesc)
1390 {
1391         struct talitos_private *priv = dev_get_drvdata(dev);
1392         bool is_sec1 = has_ftr_sec1(priv);
1393
1394         if (is_sec1) {
1395                 if (!edesc->src_nents) {
1396                         dma_unmap_sg(dev, src, 1,
1397                                      dst != src ? DMA_TO_DEVICE
1398                                                 : DMA_BIDIRECTIONAL);
1399                 }
1400                 if (dst && edesc->dst_nents) {
1401                         dma_sync_single_for_device(dev,
1402                                                    edesc->dma_link_tbl + len,
1403                                                    len, DMA_FROM_DEVICE);
1404                         sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1405                                             edesc->buf + len, len);
1406                 } else if (dst && dst != src) {
1407                         dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1408                 }
1409         } else {
1410                 talitos_sg_unmap(dev, edesc, src, dst);
1411         }
1412 }
1413
1414 static void common_nonsnoop_unmap(struct device *dev,
1415                                   struct talitos_edesc *edesc,
1416                                   struct ablkcipher_request *areq)
1417 {
1418         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1419
1420         unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1421         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1422         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1423
1424         if (edesc->dma_len)
1425                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1426                                  DMA_BIDIRECTIONAL);
1427 }
1428
1429 static void ablkcipher_done(struct device *dev,
1430                             struct talitos_desc *desc, void *context,
1431                             int err)
1432 {
1433         struct ablkcipher_request *areq = context;
1434         struct talitos_edesc *edesc;
1435
1436         edesc = container_of(desc, struct talitos_edesc, desc);
1437
1438         common_nonsnoop_unmap(dev, edesc, areq);
1439
1440         kfree(edesc);
1441
1442         areq->base.complete(&areq->base, err);
1443 }
1444
1445 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1446                           unsigned int len, struct talitos_edesc *edesc,
1447                           enum dma_data_direction dir, struct talitos_ptr *ptr)
1448 {
1449         int sg_count;
1450         struct talitos_private *priv = dev_get_drvdata(dev);
1451         bool is_sec1 = has_ftr_sec1(priv);
1452
1453         to_talitos_ptr_len(ptr, len, is_sec1);
1454
1455         if (is_sec1) {
1456                 sg_count = edesc->src_nents ? : 1;
1457
1458                 if (sg_count == 1) {
1459                         dma_map_sg(dev, src, 1, dir);
1460                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1461                 } else {
1462                         sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1463                         to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1464                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1465                                                    len, DMA_TO_DEVICE);
1466                 }
1467         } else {
1468                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1469
1470                 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1471
1472                 if (sg_count == 1) {
1473                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1474                 } else {
1475                         sg_count = sg_to_link_tbl(src, sg_count, len,
1476                                                   &edesc->link_tbl[0]);
1477                         if (sg_count > 1) {
1478                                 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1479                                 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1480                                 dma_sync_single_for_device(dev,
1481                                                            edesc->dma_link_tbl,
1482                                                            edesc->dma_len,
1483                                                            DMA_BIDIRECTIONAL);
1484                         } else {
1485                                 /* Only one segment now, so no link tbl needed*/
1486                                 to_talitos_ptr(ptr, sg_dma_address(src),
1487                                                is_sec1);
1488                         }
1489                 }
1490         }
1491         return sg_count;
1492 }
1493
1494 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1495                             unsigned int len, struct talitos_edesc *edesc,
1496                             enum dma_data_direction dir,
1497                             struct talitos_ptr *ptr, int sg_count)
1498 {
1499         struct talitos_private *priv = dev_get_drvdata(dev);
1500         bool is_sec1 = has_ftr_sec1(priv);
1501
1502         if (dir != DMA_NONE)
1503                 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1504
1505         to_talitos_ptr_len(ptr, len, is_sec1);
1506
1507         if (is_sec1) {
1508                 if (sg_count == 1) {
1509                         if (dir != DMA_NONE)
1510                                 dma_map_sg(dev, dst, 1, dir);
1511                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1512                 } else {
1513                         to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1514                         dma_sync_single_for_device(dev,
1515                                                    edesc->dma_link_tbl + len,
1516                                                    len, DMA_FROM_DEVICE);
1517                 }
1518         } else {
1519                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1520
1521                 if (sg_count == 1) {
1522                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1523                 } else {
1524                         struct talitos_ptr *link_tbl_ptr =
1525                                 &edesc->link_tbl[edesc->src_nents + 1];
1526
1527                         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1528                                             (edesc->src_nents + 1) *
1529                                              sizeof(struct talitos_ptr), 0);
1530                         ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1531                         sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1532                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1533                                                    edesc->dma_len,
1534                                                    DMA_BIDIRECTIONAL);
1535                 }
1536         }
1537 }
1538
1539 static int common_nonsnoop(struct talitos_edesc *edesc,
1540                            struct ablkcipher_request *areq,
1541                            void (*callback) (struct device *dev,
1542                                              struct talitos_desc *desc,
1543                                              void *context, int error))
1544 {
1545         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1546         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1547         struct device *dev = ctx->dev;
1548         struct talitos_desc *desc = &edesc->desc;
1549         unsigned int cryptlen = areq->nbytes;
1550         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1551         int sg_count, ret;
1552         struct talitos_private *priv = dev_get_drvdata(dev);
1553         bool is_sec1 = has_ftr_sec1(priv);
1554
1555         /* first DWORD empty */
1556         desc->ptr[0] = zero_entry;
1557
1558         /* cipher iv */
1559         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1560         to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1561         to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1562
1563         /* cipher key */
1564         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1565                                (char *)&ctx->key, DMA_TO_DEVICE);
1566
1567         /*
1568          * cipher in
1569          */
1570         sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1571                                          (areq->src == areq->dst) ?
1572                                           DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1573                                           &desc->ptr[3]);
1574
1575         /* cipher out */
1576         map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1577                                (areq->src == areq->dst) ? DMA_NONE
1578                                                         : DMA_FROM_DEVICE,
1579                                &desc->ptr[4], sg_count);
1580
1581         /* iv out */
1582         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1583                                DMA_FROM_DEVICE);
1584
1585         /* last DWORD empty */
1586         desc->ptr[6] = zero_entry;
1587
1588         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1589         if (ret != -EINPROGRESS) {
1590                 common_nonsnoop_unmap(dev, edesc, areq);
1591                 kfree(edesc);
1592         }
1593         return ret;
1594 }
1595
1596 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1597                                                     areq, bool encrypt)
1598 {
1599         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1600         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1601         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1602
1603         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1604                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1605                                    areq->base.flags, encrypt);
1606 }
1607
1608 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1609 {
1610         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1611         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1612         struct talitos_edesc *edesc;
1613
1614         /* allocate extended descriptor */
1615         edesc = ablkcipher_edesc_alloc(areq, true);
1616         if (IS_ERR(edesc))
1617                 return PTR_ERR(edesc);
1618
1619         /* set encrypt */
1620         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1621
1622         return common_nonsnoop(edesc, areq, ablkcipher_done);
1623 }
1624
1625 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1626 {
1627         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1628         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1629         struct talitos_edesc *edesc;
1630
1631         /* allocate extended descriptor */
1632         edesc = ablkcipher_edesc_alloc(areq, false);
1633         if (IS_ERR(edesc))
1634                 return PTR_ERR(edesc);
1635
1636         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1637
1638         return common_nonsnoop(edesc, areq, ablkcipher_done);
1639 }
1640
1641 static void common_nonsnoop_hash_unmap(struct device *dev,
1642                                        struct talitos_edesc *edesc,
1643                                        struct ahash_request *areq)
1644 {
1645         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1646         struct talitos_private *priv = dev_get_drvdata(dev);
1647         bool is_sec1 = has_ftr_sec1(priv);
1648
1649         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1650
1651         unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1652
1653         /* When using hashctx-in, must unmap it. */
1654         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1655                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1656                                          DMA_TO_DEVICE);
1657
1658         if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1659                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1660                                          DMA_TO_DEVICE);
1661
1662         if (edesc->dma_len)
1663                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1664                                  DMA_BIDIRECTIONAL);
1665
1666 }
1667
1668 static void ahash_done(struct device *dev,
1669                        struct talitos_desc *desc, void *context,
1670                        int err)
1671 {
1672         struct ahash_request *areq = context;
1673         struct talitos_edesc *edesc =
1674                  container_of(desc, struct talitos_edesc, desc);
1675         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1676
1677         if (!req_ctx->last && req_ctx->to_hash_later) {
1678                 /* Position any partial block for next update/final/finup */
1679                 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1680                 req_ctx->nbuf = req_ctx->to_hash_later;
1681         }
1682         common_nonsnoop_hash_unmap(dev, edesc, areq);
1683
1684         kfree(edesc);
1685
1686         areq->base.complete(&areq->base, err);
1687 }
1688
1689 /*
1690  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1691  * ourself and submit a padded block
1692  */
1693 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1694                                struct talitos_edesc *edesc,
1695                                struct talitos_ptr *ptr)
1696 {
1697         static u8 padded_hash[64] = {
1698                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1699                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1700                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1701                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1702         };
1703
1704         pr_err_once("Bug in SEC1, padding ourself\n");
1705         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1706         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1707                                (char *)padded_hash, DMA_TO_DEVICE);
1708 }
1709
1710 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1711                                 struct ahash_request *areq, unsigned int length,
1712                                 void (*callback) (struct device *dev,
1713                                                   struct talitos_desc *desc,
1714                                                   void *context, int error))
1715 {
1716         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1717         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1718         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1719         struct device *dev = ctx->dev;
1720         struct talitos_desc *desc = &edesc->desc;
1721         int ret;
1722         struct talitos_private *priv = dev_get_drvdata(dev);
1723         bool is_sec1 = has_ftr_sec1(priv);
1724
1725         /* first DWORD empty */
1726         desc->ptr[0] = zero_entry;
1727
1728         /* hash context in */
1729         if (!req_ctx->first || req_ctx->swinit) {
1730                 map_single_talitos_ptr(dev, &desc->ptr[1],
1731                                        req_ctx->hw_context_size,
1732                                        (char *)req_ctx->hw_context,
1733                                        DMA_TO_DEVICE);
1734                 req_ctx->swinit = 0;
1735         } else {
1736                 desc->ptr[1] = zero_entry;
1737                 /* Indicate next op is not the first. */
1738                 req_ctx->first = 0;
1739         }
1740
1741         /* HMAC key */
1742         if (ctx->keylen)
1743                 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1744                                        (char *)&ctx->key, DMA_TO_DEVICE);
1745         else
1746                 desc->ptr[2] = zero_entry;
1747
1748         /*
1749          * data in
1750          */
1751         map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1752                               DMA_TO_DEVICE, &desc->ptr[3]);
1753
1754         /* fifth DWORD empty */
1755         desc->ptr[4] = zero_entry;
1756
1757         /* hash/HMAC out -or- hash context out */
1758         if (req_ctx->last)
1759                 map_single_talitos_ptr(dev, &desc->ptr[5],
1760                                        crypto_ahash_digestsize(tfm),
1761                                        areq->result, DMA_FROM_DEVICE);
1762         else
1763                 map_single_talitos_ptr(dev, &desc->ptr[5],
1764                                        req_ctx->hw_context_size,
1765                                        req_ctx->hw_context, DMA_FROM_DEVICE);
1766
1767         /* last DWORD empty */
1768         desc->ptr[6] = zero_entry;
1769
1770         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1771                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1772
1773         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1774         if (ret != -EINPROGRESS) {
1775                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1776                 kfree(edesc);
1777         }
1778         return ret;
1779 }
1780
1781 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1782                                                unsigned int nbytes)
1783 {
1784         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1785         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1786         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1787
1788         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1789                                    nbytes, 0, 0, 0, areq->base.flags, false);
1790 }
1791
1792 static int ahash_init(struct ahash_request *areq)
1793 {
1794         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1796
1797         /* Initialize the context */
1798         req_ctx->nbuf = 0;
1799         req_ctx->first = 1; /* first indicates h/w must init its context */
1800         req_ctx->swinit = 0; /* assume h/w init of context */
1801         req_ctx->hw_context_size =
1802                 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1803                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1804                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1805
1806         return 0;
1807 }
1808
1809 /*
1810  * on h/w without explicit sha224 support, we initialize h/w context
1811  * manually with sha224 constants, and tell it to run sha256.
1812  */
1813 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1814 {
1815         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1816
1817         ahash_init(areq);
1818         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1819
1820         req_ctx->hw_context[0] = SHA224_H0;
1821         req_ctx->hw_context[1] = SHA224_H1;
1822         req_ctx->hw_context[2] = SHA224_H2;
1823         req_ctx->hw_context[3] = SHA224_H3;
1824         req_ctx->hw_context[4] = SHA224_H4;
1825         req_ctx->hw_context[5] = SHA224_H5;
1826         req_ctx->hw_context[6] = SHA224_H6;
1827         req_ctx->hw_context[7] = SHA224_H7;
1828
1829         /* init 64-bit count */
1830         req_ctx->hw_context[8] = 0;
1831         req_ctx->hw_context[9] = 0;
1832
1833         return 0;
1834 }
1835
1836 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1837 {
1838         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1839         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1840         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1841         struct talitos_edesc *edesc;
1842         unsigned int blocksize =
1843                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1844         unsigned int nbytes_to_hash;
1845         unsigned int to_hash_later;
1846         unsigned int nsg;
1847
1848         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1849                 /* Buffer up to one whole block */
1850                 sg_copy_to_buffer(areq->src,
1851                                   sg_nents_for_len(areq->src, nbytes),
1852                                   req_ctx->buf + req_ctx->nbuf, nbytes);
1853                 req_ctx->nbuf += nbytes;
1854                 return 0;
1855         }
1856
1857         /* At least (blocksize + 1) bytes are available to hash */
1858         nbytes_to_hash = nbytes + req_ctx->nbuf;
1859         to_hash_later = nbytes_to_hash & (blocksize - 1);
1860
1861         if (req_ctx->last)
1862                 to_hash_later = 0;
1863         else if (to_hash_later)
1864                 /* There is a partial block. Hash the full block(s) now */
1865                 nbytes_to_hash -= to_hash_later;
1866         else {
1867                 /* Keep one block buffered */
1868                 nbytes_to_hash -= blocksize;
1869                 to_hash_later = blocksize;
1870         }
1871
1872         /* Chain in any previously buffered data */
1873         if (req_ctx->nbuf) {
1874                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1875                 sg_init_table(req_ctx->bufsl, nsg);
1876                 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1877                 if (nsg > 1)
1878                         sg_chain(req_ctx->bufsl, 2, areq->src);
1879                 req_ctx->psrc = req_ctx->bufsl;
1880         } else
1881                 req_ctx->psrc = areq->src;
1882
1883         if (to_hash_later) {
1884                 int nents = sg_nents_for_len(areq->src, nbytes);
1885                 sg_pcopy_to_buffer(areq->src, nents,
1886                                       req_ctx->bufnext,
1887                                       to_hash_later,
1888                                       nbytes - to_hash_later);
1889         }
1890         req_ctx->to_hash_later = to_hash_later;
1891
1892         /* Allocate extended descriptor */
1893         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1894         if (IS_ERR(edesc))
1895                 return PTR_ERR(edesc);
1896
1897         edesc->desc.hdr = ctx->desc_hdr_template;
1898
1899         /* On last one, request SEC to pad; otherwise continue */
1900         if (req_ctx->last)
1901                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1902         else
1903                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1904
1905         /* request SEC to INIT hash. */
1906         if (req_ctx->first && !req_ctx->swinit)
1907                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1908
1909         /* When the tfm context has a keylen, it's an HMAC.
1910          * A first or last (ie. not middle) descriptor must request HMAC.
1911          */
1912         if (ctx->keylen && (req_ctx->first || req_ctx->last))
1913                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1914
1915         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1916                                     ahash_done);
1917 }
1918
1919 static int ahash_update(struct ahash_request *areq)
1920 {
1921         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1922
1923         req_ctx->last = 0;
1924
1925         return ahash_process_req(areq, areq->nbytes);
1926 }
1927
1928 static int ahash_final(struct ahash_request *areq)
1929 {
1930         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1931
1932         req_ctx->last = 1;
1933
1934         return ahash_process_req(areq, 0);
1935 }
1936
1937 static int ahash_finup(struct ahash_request *areq)
1938 {
1939         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1940
1941         req_ctx->last = 1;
1942
1943         return ahash_process_req(areq, areq->nbytes);
1944 }
1945
1946 static int ahash_digest(struct ahash_request *areq)
1947 {
1948         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1950
1951         ahash->init(areq);
1952         req_ctx->last = 1;
1953
1954         return ahash_process_req(areq, areq->nbytes);
1955 }
1956
1957 struct keyhash_result {
1958         struct completion completion;
1959         int err;
1960 };
1961
1962 static void keyhash_complete(struct crypto_async_request *req, int err)
1963 {
1964         struct keyhash_result *res = req->data;
1965
1966         if (err == -EINPROGRESS)
1967                 return;
1968
1969         res->err = err;
1970         complete(&res->completion);
1971 }
1972
1973 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1974                    u8 *hash)
1975 {
1976         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1977
1978         struct scatterlist sg[1];
1979         struct ahash_request *req;
1980         struct keyhash_result hresult;
1981         int ret;
1982
1983         init_completion(&hresult.completion);
1984
1985         req = ahash_request_alloc(tfm, GFP_KERNEL);
1986         if (!req)
1987                 return -ENOMEM;
1988
1989         /* Keep tfm keylen == 0 during hash of the long key */
1990         ctx->keylen = 0;
1991         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1992                                    keyhash_complete, &hresult);
1993
1994         sg_init_one(&sg[0], key, keylen);
1995
1996         ahash_request_set_crypt(req, sg, hash, keylen);
1997         ret = crypto_ahash_digest(req);
1998         switch (ret) {
1999         case 0:
2000                 break;
2001         case -EINPROGRESS:
2002         case -EBUSY:
2003                 ret = wait_for_completion_interruptible(
2004                         &hresult.completion);
2005                 if (!ret)
2006                         ret = hresult.err;
2007                 break;
2008         default:
2009                 break;
2010         }
2011         ahash_request_free(req);
2012
2013         return ret;
2014 }
2015
2016 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2017                         unsigned int keylen)
2018 {
2019         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2020         unsigned int blocksize =
2021                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2022         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2023         unsigned int keysize = keylen;
2024         u8 hash[SHA512_DIGEST_SIZE];
2025         int ret;
2026
2027         if (keylen <= blocksize)
2028                 memcpy(ctx->key, key, keysize);
2029         else {
2030                 /* Must get the hash of the long key */
2031                 ret = keyhash(tfm, key, keylen, hash);
2032
2033                 if (ret) {
2034                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2035                         return -EINVAL;
2036                 }
2037
2038                 keysize = digestsize;
2039                 memcpy(ctx->key, hash, digestsize);
2040         }
2041
2042         ctx->keylen = keysize;
2043
2044         return 0;
2045 }
2046
2047
2048 struct talitos_alg_template {
2049         u32 type;
2050         union {
2051                 struct crypto_alg crypto;
2052                 struct ahash_alg hash;
2053                 struct aead_alg aead;
2054         } alg;
2055         __be32 desc_hdr_template;
2056 };
2057
2058 static struct talitos_alg_template driver_algs[] = {
2059         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2060         {       .type = CRYPTO_ALG_TYPE_AEAD,
2061                 .alg.aead = {
2062                         .base = {
2063                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2064                                 .cra_driver_name = "authenc-hmac-sha1-"
2065                                                    "cbc-aes-talitos",
2066                                 .cra_blocksize = AES_BLOCK_SIZE,
2067                                 .cra_flags = CRYPTO_ALG_ASYNC,
2068                         },
2069                         .ivsize = AES_BLOCK_SIZE,
2070                         .maxauthsize = SHA1_DIGEST_SIZE,
2071                 },
2072                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2073                                      DESC_HDR_SEL0_AESU |
2074                                      DESC_HDR_MODE0_AESU_CBC |
2075                                      DESC_HDR_SEL1_MDEUA |
2076                                      DESC_HDR_MODE1_MDEU_INIT |
2077                                      DESC_HDR_MODE1_MDEU_PAD |
2078                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2079         },
2080         {       .type = CRYPTO_ALG_TYPE_AEAD,
2081                 .alg.aead = {
2082                         .base = {
2083                                 .cra_name = "authenc(hmac(sha1),"
2084                                             "cbc(des3_ede))",
2085                                 .cra_driver_name = "authenc-hmac-sha1-"
2086                                                    "cbc-3des-talitos",
2087                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2088                                 .cra_flags = CRYPTO_ALG_ASYNC,
2089                         },
2090                         .ivsize = DES3_EDE_BLOCK_SIZE,
2091                         .maxauthsize = SHA1_DIGEST_SIZE,
2092                 },
2093                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2094                                      DESC_HDR_SEL0_DEU |
2095                                      DESC_HDR_MODE0_DEU_CBC |
2096                                      DESC_HDR_MODE0_DEU_3DES |
2097                                      DESC_HDR_SEL1_MDEUA |
2098                                      DESC_HDR_MODE1_MDEU_INIT |
2099                                      DESC_HDR_MODE1_MDEU_PAD |
2100                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2101         },
2102         {       .type = CRYPTO_ALG_TYPE_AEAD,
2103                 .alg.aead = {
2104                         .base = {
2105                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2106                                 .cra_driver_name = "authenc-hmac-sha224-"
2107                                                    "cbc-aes-talitos",
2108                                 .cra_blocksize = AES_BLOCK_SIZE,
2109                                 .cra_flags = CRYPTO_ALG_ASYNC,
2110                         },
2111                         .ivsize = AES_BLOCK_SIZE,
2112                         .maxauthsize = SHA224_DIGEST_SIZE,
2113                 },
2114                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2115                                      DESC_HDR_SEL0_AESU |
2116                                      DESC_HDR_MODE0_AESU_CBC |
2117                                      DESC_HDR_SEL1_MDEUA |
2118                                      DESC_HDR_MODE1_MDEU_INIT |
2119                                      DESC_HDR_MODE1_MDEU_PAD |
2120                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2121         },
2122         {       .type = CRYPTO_ALG_TYPE_AEAD,
2123                 .alg.aead = {
2124                         .base = {
2125                                 .cra_name = "authenc(hmac(sha224),"
2126                                             "cbc(des3_ede))",
2127                                 .cra_driver_name = "authenc-hmac-sha224-"
2128                                                    "cbc-3des-talitos",
2129                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2130                                 .cra_flags = CRYPTO_ALG_ASYNC,
2131                         },
2132                         .ivsize = DES3_EDE_BLOCK_SIZE,
2133                         .maxauthsize = SHA224_DIGEST_SIZE,
2134                 },
2135                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2136                                      DESC_HDR_SEL0_DEU |
2137                                      DESC_HDR_MODE0_DEU_CBC |
2138                                      DESC_HDR_MODE0_DEU_3DES |
2139                                      DESC_HDR_SEL1_MDEUA |
2140                                      DESC_HDR_MODE1_MDEU_INIT |
2141                                      DESC_HDR_MODE1_MDEU_PAD |
2142                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2143         },
2144         {       .type = CRYPTO_ALG_TYPE_AEAD,
2145                 .alg.aead = {
2146                         .base = {
2147                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2148                                 .cra_driver_name = "authenc-hmac-sha256-"
2149                                                    "cbc-aes-talitos",
2150                                 .cra_blocksize = AES_BLOCK_SIZE,
2151                                 .cra_flags = CRYPTO_ALG_ASYNC,
2152                         },
2153                         .ivsize = AES_BLOCK_SIZE,
2154                         .maxauthsize = SHA256_DIGEST_SIZE,
2155                 },
2156                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2157                                      DESC_HDR_SEL0_AESU |
2158                                      DESC_HDR_MODE0_AESU_CBC |
2159                                      DESC_HDR_SEL1_MDEUA |
2160                                      DESC_HDR_MODE1_MDEU_INIT |
2161                                      DESC_HDR_MODE1_MDEU_PAD |
2162                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2163         },
2164         {       .type = CRYPTO_ALG_TYPE_AEAD,
2165                 .alg.aead = {
2166                         .base = {
2167                                 .cra_name = "authenc(hmac(sha256),"
2168                                             "cbc(des3_ede))",
2169                                 .cra_driver_name = "authenc-hmac-sha256-"
2170                                                    "cbc-3des-talitos",
2171                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2172                                 .cra_flags = CRYPTO_ALG_ASYNC,
2173                         },
2174                         .ivsize = DES3_EDE_BLOCK_SIZE,
2175                         .maxauthsize = SHA256_DIGEST_SIZE,
2176                 },
2177                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2178                                      DESC_HDR_SEL0_DEU |
2179                                      DESC_HDR_MODE0_DEU_CBC |
2180                                      DESC_HDR_MODE0_DEU_3DES |
2181                                      DESC_HDR_SEL1_MDEUA |
2182                                      DESC_HDR_MODE1_MDEU_INIT |
2183                                      DESC_HDR_MODE1_MDEU_PAD |
2184                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2185         },
2186         {       .type = CRYPTO_ALG_TYPE_AEAD,
2187                 .alg.aead = {
2188                         .base = {
2189                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2190                                 .cra_driver_name = "authenc-hmac-sha384-"
2191                                                    "cbc-aes-talitos",
2192                                 .cra_blocksize = AES_BLOCK_SIZE,
2193                                 .cra_flags = CRYPTO_ALG_ASYNC,
2194                         },
2195                         .ivsize = AES_BLOCK_SIZE,
2196                         .maxauthsize = SHA384_DIGEST_SIZE,
2197                 },
2198                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2199                                      DESC_HDR_SEL0_AESU |
2200                                      DESC_HDR_MODE0_AESU_CBC |
2201                                      DESC_HDR_SEL1_MDEUB |
2202                                      DESC_HDR_MODE1_MDEU_INIT |
2203                                      DESC_HDR_MODE1_MDEU_PAD |
2204                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2205         },
2206         {       .type = CRYPTO_ALG_TYPE_AEAD,
2207                 .alg.aead = {
2208                         .base = {
2209                                 .cra_name = "authenc(hmac(sha384),"
2210                                             "cbc(des3_ede))",
2211                                 .cra_driver_name = "authenc-hmac-sha384-"
2212                                                    "cbc-3des-talitos",
2213                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2214                                 .cra_flags = CRYPTO_ALG_ASYNC,
2215                         },
2216                         .ivsize = DES3_EDE_BLOCK_SIZE,
2217                         .maxauthsize = SHA384_DIGEST_SIZE,
2218                 },
2219                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2220                                      DESC_HDR_SEL0_DEU |
2221                                      DESC_HDR_MODE0_DEU_CBC |
2222                                      DESC_HDR_MODE0_DEU_3DES |
2223                                      DESC_HDR_SEL1_MDEUB |
2224                                      DESC_HDR_MODE1_MDEU_INIT |
2225                                      DESC_HDR_MODE1_MDEU_PAD |
2226                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2227         },
2228         {       .type = CRYPTO_ALG_TYPE_AEAD,
2229                 .alg.aead = {
2230                         .base = {
2231                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2232                                 .cra_driver_name = "authenc-hmac-sha512-"
2233                                                    "cbc-aes-talitos",
2234                                 .cra_blocksize = AES_BLOCK_SIZE,
2235                                 .cra_flags = CRYPTO_ALG_ASYNC,
2236                         },
2237                         .ivsize = AES_BLOCK_SIZE,
2238                         .maxauthsize = SHA512_DIGEST_SIZE,
2239                 },
2240                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2241                                      DESC_HDR_SEL0_AESU |
2242                                      DESC_HDR_MODE0_AESU_CBC |
2243                                      DESC_HDR_SEL1_MDEUB |
2244                                      DESC_HDR_MODE1_MDEU_INIT |
2245                                      DESC_HDR_MODE1_MDEU_PAD |
2246                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2247         },
2248         {       .type = CRYPTO_ALG_TYPE_AEAD,
2249                 .alg.aead = {
2250                         .base = {
2251                                 .cra_name = "authenc(hmac(sha512),"
2252                                             "cbc(des3_ede))",
2253                                 .cra_driver_name = "authenc-hmac-sha512-"
2254                                                    "cbc-3des-talitos",
2255                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2256                                 .cra_flags = CRYPTO_ALG_ASYNC,
2257                         },
2258                         .ivsize = DES3_EDE_BLOCK_SIZE,
2259                         .maxauthsize = SHA512_DIGEST_SIZE,
2260                 },
2261                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2262                                      DESC_HDR_SEL0_DEU |
2263                                      DESC_HDR_MODE0_DEU_CBC |
2264                                      DESC_HDR_MODE0_DEU_3DES |
2265                                      DESC_HDR_SEL1_MDEUB |
2266                                      DESC_HDR_MODE1_MDEU_INIT |
2267                                      DESC_HDR_MODE1_MDEU_PAD |
2268                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2269         },
2270         {       .type = CRYPTO_ALG_TYPE_AEAD,
2271                 .alg.aead = {
2272                         .base = {
2273                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2274                                 .cra_driver_name = "authenc-hmac-md5-"
2275                                                    "cbc-aes-talitos",
2276                                 .cra_blocksize = AES_BLOCK_SIZE,
2277                                 .cra_flags = CRYPTO_ALG_ASYNC,
2278                         },
2279                         .ivsize = AES_BLOCK_SIZE,
2280                         .maxauthsize = MD5_DIGEST_SIZE,
2281                 },
2282                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2283                                      DESC_HDR_SEL0_AESU |
2284                                      DESC_HDR_MODE0_AESU_CBC |
2285                                      DESC_HDR_SEL1_MDEUA |
2286                                      DESC_HDR_MODE1_MDEU_INIT |
2287                                      DESC_HDR_MODE1_MDEU_PAD |
2288                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2289         },
2290         {       .type = CRYPTO_ALG_TYPE_AEAD,
2291                 .alg.aead = {
2292                         .base = {
2293                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2294                                 .cra_driver_name = "authenc-hmac-md5-"
2295                                                    "cbc-3des-talitos",
2296                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2297                                 .cra_flags = CRYPTO_ALG_ASYNC,
2298                         },
2299                         .ivsize = DES3_EDE_BLOCK_SIZE,
2300                         .maxauthsize = MD5_DIGEST_SIZE,
2301                 },
2302                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2303                                      DESC_HDR_SEL0_DEU |
2304                                      DESC_HDR_MODE0_DEU_CBC |
2305                                      DESC_HDR_MODE0_DEU_3DES |
2306                                      DESC_HDR_SEL1_MDEUA |
2307                                      DESC_HDR_MODE1_MDEU_INIT |
2308                                      DESC_HDR_MODE1_MDEU_PAD |
2309                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2310         },
2311         /* ABLKCIPHER algorithms. */
2312         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2313                 .alg.crypto = {
2314                         .cra_name = "cbc(aes)",
2315                         .cra_driver_name = "cbc-aes-talitos",
2316                         .cra_blocksize = AES_BLOCK_SIZE,
2317                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2318                                      CRYPTO_ALG_ASYNC,
2319                         .cra_ablkcipher = {
2320                                 .min_keysize = AES_MIN_KEY_SIZE,
2321                                 .max_keysize = AES_MAX_KEY_SIZE,
2322                                 .ivsize = AES_BLOCK_SIZE,
2323                         }
2324                 },
2325                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2326                                      DESC_HDR_SEL0_AESU |
2327                                      DESC_HDR_MODE0_AESU_CBC,
2328         },
2329         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2330                 .alg.crypto = {
2331                         .cra_name = "cbc(des3_ede)",
2332                         .cra_driver_name = "cbc-3des-talitos",
2333                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2334                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2335                                      CRYPTO_ALG_ASYNC,
2336                         .cra_ablkcipher = {
2337                                 .min_keysize = DES3_EDE_KEY_SIZE,
2338                                 .max_keysize = DES3_EDE_KEY_SIZE,
2339                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2340                         }
2341                 },
2342                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2343                                      DESC_HDR_SEL0_DEU |
2344                                      DESC_HDR_MODE0_DEU_CBC |
2345                                      DESC_HDR_MODE0_DEU_3DES,
2346         },
2347         /* AHASH algorithms. */
2348         {       .type = CRYPTO_ALG_TYPE_AHASH,
2349                 .alg.hash = {
2350                         .halg.digestsize = MD5_DIGEST_SIZE,
2351                         .halg.base = {
2352                                 .cra_name = "md5",
2353                                 .cra_driver_name = "md5-talitos",
2354                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2355                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2356                                              CRYPTO_ALG_ASYNC,
2357                         }
2358                 },
2359                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2360                                      DESC_HDR_SEL0_MDEUA |
2361                                      DESC_HDR_MODE0_MDEU_MD5,
2362         },
2363         {       .type = CRYPTO_ALG_TYPE_AHASH,
2364                 .alg.hash = {
2365                         .halg.digestsize = SHA1_DIGEST_SIZE,
2366                         .halg.base = {
2367                                 .cra_name = "sha1",
2368                                 .cra_driver_name = "sha1-talitos",
2369                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2370                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2371                                              CRYPTO_ALG_ASYNC,
2372                         }
2373                 },
2374                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2375                                      DESC_HDR_SEL0_MDEUA |
2376                                      DESC_HDR_MODE0_MDEU_SHA1,
2377         },
2378         {       .type = CRYPTO_ALG_TYPE_AHASH,
2379                 .alg.hash = {
2380                         .halg.digestsize = SHA224_DIGEST_SIZE,
2381                         .halg.base = {
2382                                 .cra_name = "sha224",
2383                                 .cra_driver_name = "sha224-talitos",
2384                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2385                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2386                                              CRYPTO_ALG_ASYNC,
2387                         }
2388                 },
2389                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2390                                      DESC_HDR_SEL0_MDEUA |
2391                                      DESC_HDR_MODE0_MDEU_SHA224,
2392         },
2393         {       .type = CRYPTO_ALG_TYPE_AHASH,
2394                 .alg.hash = {
2395                         .halg.digestsize = SHA256_DIGEST_SIZE,
2396                         .halg.base = {
2397                                 .cra_name = "sha256",
2398                                 .cra_driver_name = "sha256-talitos",
2399                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2400                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2401                                              CRYPTO_ALG_ASYNC,
2402                         }
2403                 },
2404                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405                                      DESC_HDR_SEL0_MDEUA |
2406                                      DESC_HDR_MODE0_MDEU_SHA256,
2407         },
2408         {       .type = CRYPTO_ALG_TYPE_AHASH,
2409                 .alg.hash = {
2410                         .halg.digestsize = SHA384_DIGEST_SIZE,
2411                         .halg.base = {
2412                                 .cra_name = "sha384",
2413                                 .cra_driver_name = "sha384-talitos",
2414                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2415                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2416                                              CRYPTO_ALG_ASYNC,
2417                         }
2418                 },
2419                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2420                                      DESC_HDR_SEL0_MDEUB |
2421                                      DESC_HDR_MODE0_MDEUB_SHA384,
2422         },
2423         {       .type = CRYPTO_ALG_TYPE_AHASH,
2424                 .alg.hash = {
2425                         .halg.digestsize = SHA512_DIGEST_SIZE,
2426                         .halg.base = {
2427                                 .cra_name = "sha512",
2428                                 .cra_driver_name = "sha512-talitos",
2429                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2430                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2431                                              CRYPTO_ALG_ASYNC,
2432                         }
2433                 },
2434                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2435                                      DESC_HDR_SEL0_MDEUB |
2436                                      DESC_HDR_MODE0_MDEUB_SHA512,
2437         },
2438         {       .type = CRYPTO_ALG_TYPE_AHASH,
2439                 .alg.hash = {
2440                         .halg.digestsize = MD5_DIGEST_SIZE,
2441                         .halg.base = {
2442                                 .cra_name = "hmac(md5)",
2443                                 .cra_driver_name = "hmac-md5-talitos",
2444                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2445                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2446                                              CRYPTO_ALG_ASYNC,
2447                         }
2448                 },
2449                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2450                                      DESC_HDR_SEL0_MDEUA |
2451                                      DESC_HDR_MODE0_MDEU_MD5,
2452         },
2453         {       .type = CRYPTO_ALG_TYPE_AHASH,
2454                 .alg.hash = {
2455                         .halg.digestsize = SHA1_DIGEST_SIZE,
2456                         .halg.base = {
2457                                 .cra_name = "hmac(sha1)",
2458                                 .cra_driver_name = "hmac-sha1-talitos",
2459                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2460                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2461                                              CRYPTO_ALG_ASYNC,
2462                         }
2463                 },
2464                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2465                                      DESC_HDR_SEL0_MDEUA |
2466                                      DESC_HDR_MODE0_MDEU_SHA1,
2467         },
2468         {       .type = CRYPTO_ALG_TYPE_AHASH,
2469                 .alg.hash = {
2470                         .halg.digestsize = SHA224_DIGEST_SIZE,
2471                         .halg.base = {
2472                                 .cra_name = "hmac(sha224)",
2473                                 .cra_driver_name = "hmac-sha224-talitos",
2474                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2475                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2476                                              CRYPTO_ALG_ASYNC,
2477                         }
2478                 },
2479                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2480                                      DESC_HDR_SEL0_MDEUA |
2481                                      DESC_HDR_MODE0_MDEU_SHA224,
2482         },
2483         {       .type = CRYPTO_ALG_TYPE_AHASH,
2484                 .alg.hash = {
2485                         .halg.digestsize = SHA256_DIGEST_SIZE,
2486                         .halg.base = {
2487                                 .cra_name = "hmac(sha256)",
2488                                 .cra_driver_name = "hmac-sha256-talitos",
2489                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2490                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2491                                              CRYPTO_ALG_ASYNC,
2492                         }
2493                 },
2494                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2495                                      DESC_HDR_SEL0_MDEUA |
2496                                      DESC_HDR_MODE0_MDEU_SHA256,
2497         },
2498         {       .type = CRYPTO_ALG_TYPE_AHASH,
2499                 .alg.hash = {
2500                         .halg.digestsize = SHA384_DIGEST_SIZE,
2501                         .halg.base = {
2502                                 .cra_name = "hmac(sha384)",
2503                                 .cra_driver_name = "hmac-sha384-talitos",
2504                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2505                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2506                                              CRYPTO_ALG_ASYNC,
2507                         }
2508                 },
2509                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2510                                      DESC_HDR_SEL0_MDEUB |
2511                                      DESC_HDR_MODE0_MDEUB_SHA384,
2512         },
2513         {       .type = CRYPTO_ALG_TYPE_AHASH,
2514                 .alg.hash = {
2515                         .halg.digestsize = SHA512_DIGEST_SIZE,
2516                         .halg.base = {
2517                                 .cra_name = "hmac(sha512)",
2518                                 .cra_driver_name = "hmac-sha512-talitos",
2519                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2520                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2521                                              CRYPTO_ALG_ASYNC,
2522                         }
2523                 },
2524                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2525                                      DESC_HDR_SEL0_MDEUB |
2526                                      DESC_HDR_MODE0_MDEUB_SHA512,
2527         }
2528 };
2529
2530 struct talitos_crypto_alg {
2531         struct list_head entry;
2532         struct device *dev;
2533         struct talitos_alg_template algt;
2534 };
2535
2536 static int talitos_init_common(struct talitos_ctx *ctx,
2537                                struct talitos_crypto_alg *talitos_alg)
2538 {
2539         struct talitos_private *priv;
2540
2541         /* update context with ptr to dev */
2542         ctx->dev = talitos_alg->dev;
2543
2544         /* assign SEC channel to tfm in round-robin fashion */
2545         priv = dev_get_drvdata(ctx->dev);
2546         ctx->ch = atomic_inc_return(&priv->last_chan) &
2547                   (priv->num_channels - 1);
2548
2549         /* copy descriptor header template value */
2550         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2551
2552         /* select done notification */
2553         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2554
2555         return 0;
2556 }
2557
2558 static int talitos_cra_init(struct crypto_tfm *tfm)
2559 {
2560         struct crypto_alg *alg = tfm->__crt_alg;
2561         struct talitos_crypto_alg *talitos_alg;
2562         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2563
2564         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2565                 talitos_alg = container_of(__crypto_ahash_alg(alg),
2566                                            struct talitos_crypto_alg,
2567                                            algt.alg.hash);
2568         else
2569                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2570                                            algt.alg.crypto);
2571
2572         return talitos_init_common(ctx, talitos_alg);
2573 }
2574
2575 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2576 {
2577         struct aead_alg *alg = crypto_aead_alg(tfm);
2578         struct talitos_crypto_alg *talitos_alg;
2579         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2580
2581         talitos_alg = container_of(alg, struct talitos_crypto_alg,
2582                                    algt.alg.aead);
2583
2584         return talitos_init_common(ctx, talitos_alg);
2585 }
2586
2587 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2588 {
2589         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2590
2591         talitos_cra_init(tfm);
2592
2593         ctx->keylen = 0;
2594         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2595                                  sizeof(struct talitos_ahash_req_ctx));
2596
2597         return 0;
2598 }
2599
2600 /*
2601  * given the alg's descriptor header template, determine whether descriptor
2602  * type and primary/secondary execution units required match the hw
2603  * capabilities description provided in the device tree node.
2604  */
2605 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2606 {
2607         struct talitos_private *priv = dev_get_drvdata(dev);
2608         int ret;
2609
2610         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2611               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2612
2613         if (SECONDARY_EU(desc_hdr_template))
2614                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2615                               & priv->exec_units);
2616
2617         return ret;
2618 }
2619
2620 static int talitos_remove(struct platform_device *ofdev)
2621 {
2622         struct device *dev = &ofdev->dev;
2623         struct talitos_private *priv = dev_get_drvdata(dev);
2624         struct talitos_crypto_alg *t_alg, *n;
2625         int i;
2626
2627         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2628                 switch (t_alg->algt.type) {
2629                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2630                         break;
2631                 case CRYPTO_ALG_TYPE_AEAD:
2632                         crypto_unregister_aead(&t_alg->algt.alg.aead);
2633                 case CRYPTO_ALG_TYPE_AHASH:
2634                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
2635                         break;
2636                 }
2637                 list_del(&t_alg->entry);
2638                 kfree(t_alg);
2639         }
2640
2641         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2642                 talitos_unregister_rng(dev);
2643
2644         for (i = 0; priv->chan && i < priv->num_channels; i++)
2645                 kfree(priv->chan[i].fifo);
2646
2647         kfree(priv->chan);
2648
2649         for (i = 0; i < 2; i++)
2650                 if (priv->irq[i]) {
2651                         free_irq(priv->irq[i], dev);
2652                         irq_dispose_mapping(priv->irq[i]);
2653                 }
2654
2655         tasklet_kill(&priv->done_task[0]);
2656         if (priv->irq[1])
2657                 tasklet_kill(&priv->done_task[1]);
2658
2659         iounmap(priv->reg);
2660
2661         kfree(priv);
2662
2663         return 0;
2664 }
2665
2666 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2667                                                     struct talitos_alg_template
2668                                                            *template)
2669 {
2670         struct talitos_private *priv = dev_get_drvdata(dev);
2671         struct talitos_crypto_alg *t_alg;
2672         struct crypto_alg *alg;
2673
2674         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2675         if (!t_alg)
2676                 return ERR_PTR(-ENOMEM);
2677
2678         t_alg->algt = *template;
2679
2680         switch (t_alg->algt.type) {
2681         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2682                 alg = &t_alg->algt.alg.crypto;
2683                 alg->cra_init = talitos_cra_init;
2684                 alg->cra_type = &crypto_ablkcipher_type;
2685                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2686                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2687                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2688                 alg->cra_ablkcipher.geniv = "eseqiv";
2689                 break;
2690         case CRYPTO_ALG_TYPE_AEAD:
2691                 alg = &t_alg->algt.alg.aead.base;
2692                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2693                 t_alg->algt.alg.aead.setkey = aead_setkey;
2694                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2695                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2696                 break;
2697         case CRYPTO_ALG_TYPE_AHASH:
2698                 alg = &t_alg->algt.alg.hash.halg.base;
2699                 alg->cra_init = talitos_cra_init_ahash;
2700                 alg->cra_type = &crypto_ahash_type;
2701                 t_alg->algt.alg.hash.init = ahash_init;
2702                 t_alg->algt.alg.hash.update = ahash_update;
2703                 t_alg->algt.alg.hash.final = ahash_final;
2704                 t_alg->algt.alg.hash.finup = ahash_finup;
2705                 t_alg->algt.alg.hash.digest = ahash_digest;
2706                 t_alg->algt.alg.hash.setkey = ahash_setkey;
2707
2708                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2709                     !strncmp(alg->cra_name, "hmac", 4)) {
2710                         kfree(t_alg);
2711                         return ERR_PTR(-ENOTSUPP);
2712                 }
2713                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2714                     (!strcmp(alg->cra_name, "sha224") ||
2715                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
2716                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2717                         t_alg->algt.desc_hdr_template =
2718                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2719                                         DESC_HDR_SEL0_MDEUA |
2720                                         DESC_HDR_MODE0_MDEU_SHA256;
2721                 }
2722                 break;
2723         default:
2724                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2725                 kfree(t_alg);
2726                 return ERR_PTR(-EINVAL);
2727         }
2728
2729         alg->cra_module = THIS_MODULE;
2730         alg->cra_priority = TALITOS_CRA_PRIORITY;
2731         alg->cra_alignmask = 0;
2732         alg->cra_ctxsize = sizeof(struct talitos_ctx);
2733         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2734
2735         t_alg->dev = dev;
2736
2737         return t_alg;
2738 }
2739
2740 static int talitos_probe_irq(struct platform_device *ofdev)
2741 {
2742         struct device *dev = &ofdev->dev;
2743         struct device_node *np = ofdev->dev.of_node;
2744         struct talitos_private *priv = dev_get_drvdata(dev);
2745         int err;
2746         bool is_sec1 = has_ftr_sec1(priv);
2747
2748         priv->irq[0] = irq_of_parse_and_map(np, 0);
2749         if (!priv->irq[0]) {
2750                 dev_err(dev, "failed to map irq\n");
2751                 return -EINVAL;
2752         }
2753         if (is_sec1) {
2754                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2755                                   dev_driver_string(dev), dev);
2756                 goto primary_out;
2757         }
2758
2759         priv->irq[1] = irq_of_parse_and_map(np, 1);
2760
2761         /* get the primary irq line */
2762         if (!priv->irq[1]) {
2763                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2764                                   dev_driver_string(dev), dev);
2765                 goto primary_out;
2766         }
2767
2768         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2769                           dev_driver_string(dev), dev);
2770         if (err)
2771                 goto primary_out;
2772
2773         /* get the secondary irq line */
2774         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2775                           dev_driver_string(dev), dev);
2776         if (err) {
2777                 dev_err(dev, "failed to request secondary irq\n");
2778                 irq_dispose_mapping(priv->irq[1]);
2779                 priv->irq[1] = 0;
2780         }
2781
2782         return err;
2783
2784 primary_out:
2785         if (err) {
2786                 dev_err(dev, "failed to request primary irq\n");
2787                 irq_dispose_mapping(priv->irq[0]);
2788                 priv->irq[0] = 0;
2789         }
2790
2791         return err;
2792 }
2793
2794 static int talitos_probe(struct platform_device *ofdev)
2795 {
2796         struct device *dev = &ofdev->dev;
2797         struct device_node *np = ofdev->dev.of_node;
2798         struct talitos_private *priv;
2799         const unsigned int *prop;
2800         int i, err;
2801         int stride;
2802
2803         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2804         if (!priv)
2805                 return -ENOMEM;
2806
2807         INIT_LIST_HEAD(&priv->alg_list);
2808
2809         dev_set_drvdata(dev, priv);
2810
2811         priv->ofdev = ofdev;
2812
2813         spin_lock_init(&priv->reg_lock);
2814
2815         priv->reg = of_iomap(np, 0);
2816         if (!priv->reg) {
2817                 dev_err(dev, "failed to of_iomap\n");
2818                 err = -ENOMEM;
2819                 goto err_out;
2820         }
2821
2822         /* get SEC version capabilities from device tree */
2823         prop = of_get_property(np, "fsl,num-channels", NULL);
2824         if (prop)
2825                 priv->num_channels = *prop;
2826
2827         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2828         if (prop)
2829                 priv->chfifo_len = *prop;
2830
2831         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2832         if (prop)
2833                 priv->exec_units = *prop;
2834
2835         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2836         if (prop)
2837                 priv->desc_types = *prop;
2838
2839         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2840             !priv->exec_units || !priv->desc_types) {
2841                 dev_err(dev, "invalid property data in device tree node\n");
2842                 err = -EINVAL;
2843                 goto err_out;
2844         }
2845
2846         if (of_device_is_compatible(np, "fsl,sec3.0"))
2847                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2848
2849         if (of_device_is_compatible(np, "fsl,sec2.1"))
2850                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2851                                   TALITOS_FTR_SHA224_HWINIT |
2852                                   TALITOS_FTR_HMAC_OK;
2853
2854         if (of_device_is_compatible(np, "fsl,sec1.0"))
2855                 priv->features |= TALITOS_FTR_SEC1;
2856
2857         if (of_device_is_compatible(np, "fsl,sec1.2")) {
2858                 priv->reg_deu = priv->reg + TALITOS12_DEU;
2859                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2860                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2861                 stride = TALITOS1_CH_STRIDE;
2862         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2863                 priv->reg_deu = priv->reg + TALITOS10_DEU;
2864                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2865                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2866                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2867                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2868                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2869                 stride = TALITOS1_CH_STRIDE;
2870         } else {
2871                 priv->reg_deu = priv->reg + TALITOS2_DEU;
2872                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2873                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2874                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2875                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2876                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2877                 priv->reg_keu = priv->reg + TALITOS2_KEU;
2878                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2879                 stride = TALITOS2_CH_STRIDE;
2880         }
2881
2882         err = talitos_probe_irq(ofdev);
2883         if (err)
2884                 goto err_out;
2885
2886         if (of_device_is_compatible(np, "fsl,sec1.0")) {
2887                 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2888                              (unsigned long)dev);
2889         } else {
2890                 if (!priv->irq[1]) {
2891                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2892                                      (unsigned long)dev);
2893                 } else {
2894                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2895                                      (unsigned long)dev);
2896                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2897                                      (unsigned long)dev);
2898                 }
2899         }
2900
2901         priv->chan = kzalloc(sizeof(struct talitos_channel) *
2902                              priv->num_channels, GFP_KERNEL);
2903         if (!priv->chan) {
2904                 dev_err(dev, "failed to allocate channel management space\n");
2905                 err = -ENOMEM;
2906                 goto err_out;
2907         }
2908
2909         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2910
2911         for (i = 0; i < priv->num_channels; i++) {
2912                 priv->chan[i].reg = priv->reg + stride * (i + 1);
2913                 if (!priv->irq[1] || !(i & 1))
2914                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2915
2916                 spin_lock_init(&priv->chan[i].head_lock);
2917                 spin_lock_init(&priv->chan[i].tail_lock);
2918
2919                 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2920                                              priv->fifo_len, GFP_KERNEL);
2921                 if (!priv->chan[i].fifo) {
2922                         dev_err(dev, "failed to allocate request fifo %d\n", i);
2923                         err = -ENOMEM;
2924                         goto err_out;
2925                 }
2926
2927                 atomic_set(&priv->chan[i].submit_count,
2928                            -(priv->chfifo_len - 1));
2929         }
2930
2931         dma_set_mask(dev, DMA_BIT_MASK(36));
2932
2933         /* reset and initialize the h/w */
2934         err = init_device(dev);
2935         if (err) {
2936                 dev_err(dev, "failed to initialize device\n");
2937                 goto err_out;
2938         }
2939
2940         /* register the RNG, if available */
2941         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2942                 err = talitos_register_rng(dev);
2943                 if (err) {
2944                         dev_err(dev, "failed to register hwrng: %d\n", err);
2945                         goto err_out;
2946                 } else
2947                         dev_info(dev, "hwrng\n");
2948         }
2949
2950         /* register crypto algorithms the device supports */
2951         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2952                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2953                         struct talitos_crypto_alg *t_alg;
2954                         struct crypto_alg *alg = NULL;
2955
2956                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2957                         if (IS_ERR(t_alg)) {
2958                                 err = PTR_ERR(t_alg);
2959                                 if (err == -ENOTSUPP)
2960                                         continue;
2961                                 goto err_out;
2962                         }
2963
2964                         switch (t_alg->algt.type) {
2965                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2966                                 err = crypto_register_alg(
2967                                                 &t_alg->algt.alg.crypto);
2968                                 alg = &t_alg->algt.alg.crypto;
2969                                 break;
2970
2971                         case CRYPTO_ALG_TYPE_AEAD:
2972                                 err = crypto_register_aead(
2973                                         &t_alg->algt.alg.aead);
2974                                 alg = &t_alg->algt.alg.aead.base;
2975                                 break;
2976
2977                         case CRYPTO_ALG_TYPE_AHASH:
2978                                 err = crypto_register_ahash(
2979                                                 &t_alg->algt.alg.hash);
2980                                 alg = &t_alg->algt.alg.hash.halg.base;
2981                                 break;
2982                         }
2983                         if (err) {
2984                                 dev_err(dev, "%s alg registration failed\n",
2985                                         alg->cra_driver_name);
2986                                 kfree(t_alg);
2987                         } else
2988                                 list_add_tail(&t_alg->entry, &priv->alg_list);
2989                 }
2990         }
2991         if (!list_empty(&priv->alg_list))
2992                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2993                          (char *)of_get_property(np, "compatible", NULL));
2994
2995         return 0;
2996
2997 err_out:
2998         talitos_remove(ofdev);
2999
3000         return err;
3001 }
3002
3003 static const struct of_device_id talitos_match[] = {
3004 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3005         {
3006                 .compatible = "fsl,sec1.0",
3007         },
3008 #endif
3009 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3010         {
3011                 .compatible = "fsl,sec2.0",
3012         },
3013 #endif
3014         {},
3015 };
3016 MODULE_DEVICE_TABLE(of, talitos_match);
3017
3018 static struct platform_driver talitos_driver = {
3019         .driver = {
3020                 .name = "talitos",
3021                 .of_match_table = talitos_match,
3022         },
3023         .probe = talitos_probe,
3024         .remove = talitos_remove,
3025 };
3026
3027 module_platform_driver(talitos_driver);
3028
3029 MODULE_LICENSE("GPL");
3030 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3031 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");