2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
69 dst_ptr->ptr = src_ptr->ptr;
71 dst_ptr->eptr = src_ptr->eptr;
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
79 ptr->len1 = cpu_to_be16(len);
81 ptr->len = cpu_to_be16(len);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
89 return be16_to_cpu(ptr->len1);
91 return be16_to_cpu(ptr->len);
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
103 static void map_single_talitos_ptr(struct device *dev,
104 struct talitos_ptr *ptr,
105 unsigned int len, void *data,
106 enum dma_data_direction dir)
108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
118 * unmap bus single (contiguous) h/w descriptor pointer
120 static void unmap_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 enum dma_data_direction dir)
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128 from_talitos_ptr_len(ptr, is_sec1), dir);
131 static int reset_channel(struct device *dev, int ch)
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
135 bool is_sec1 = has_ftr_sec1(priv);
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
154 dev_err(dev, "failed to reset channel %d\n", ch);
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165 TALITOS_CCCR_LO_IWSE);
170 static int reset_device(struct device *dev)
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
189 dev_err(dev, "failed to reset device\n");
197 * Reset and initialize the device
199 static int init_device(struct device *dev)
201 struct talitos_private *priv = dev_get_drvdata(dev);
203 bool is_sec1 = has_ftr_sec1(priv);
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
211 err = reset_device(dev);
215 err = reset_device(dev);
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
226 /* enable channel done and error interrupts */
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240 TALITOS_MDEUICR_LO_ICE);
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
267 bool is_sec1 = has_ftr_sec1(priv);
269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
280 /* map descriptor and save caller data */
282 desc->hdr1 = desc->hdr;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
288 request->dma_desc = dma_map_single(dev, desc,
292 request->callback = callback;
293 request->context = context;
295 /* increment fifo head */
296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
299 request->desc = desc;
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306 lower_32_bits(request->dma_desc));
308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
312 EXPORT_SYMBOL(talitos_submit);
315 * process what was done, notify callback of error if not
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
323 bool is_sec1 = has_ftr_sec1(priv);
325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->chan[ch].fifo[tail];
333 /* descriptors with their done bits set don't get the error */
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 dma_unmap_single(dev, request->dma_desc,
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
354 /* release request entry in fifo */
356 request->desc = NULL;
358 /* increment fifo tail */
359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
363 atomic_dec(&priv->chan[ch].submit_count);
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
378 * process completed requests for channels that have done status
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
441 * locate current (offending) descriptor
443 static u32 current_desc_hdr(struct device *dev, int ch)
445 struct talitos_private *priv = dev_get_drvdata(dev);
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
457 tail = priv->chan[ch].tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
463 dev_err(dev, "couldn't locate current descriptor\n");
468 return priv->chan[ch].fifo[iter].desc->hdr;
472 * user diagnostics; report root cause of error based on execution unit status
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
476 struct talitos_private *priv = dev_get_drvdata(dev);
480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
547 * recover from error interrupts
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
553 int ch, error, reset_dev = 0;
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
565 if (!(isr & (1 << (ch * 2 + 1))))
571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo & TALITOS_CCPSR_LO_EU)
597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
609 flush_channel(dev, ch, error, reset_ch);
612 reset_channel(dev, ch);
614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618 TALITOS2_CCCR_CONT) && --timeout)
621 dev_err(dev, "failed to restart channel %d\n",
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
640 /* reset and reinitialize the device */
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
651 unsigned long flags; \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
686 unsigned long flags; \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
729 for (i = 0; i < 20; i++) {
730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731 TALITOS_RNGUSR_LO_OFL;
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
745 /* rng fifo requires 64-bit accesses */
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
752 static int talitos_rng_init(struct hwrng *rng)
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
764 dev_err(dev, "failed to reset rng hw\n");
768 /* start generating */
769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
774 static int talitos_register_rng(struct device *dev)
776 struct talitos_private *priv = dev_get_drvdata(dev);
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
785 err = hwrng_register(&priv->rng);
787 priv->rng_registered = true;
792 static void talitos_unregister_rng(struct device *dev)
794 struct talitos_private *priv = dev_get_drvdata(dev);
796 if (!priv->rng_registered)
799 hwrng_unregister(&priv->rng);
800 priv->rng_registered = false;
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE 96
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
815 u8 iv[TALITOS_MAX_IV_LENGTH];
817 unsigned int enckeylen;
818 unsigned int authkeylen;
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
824 struct talitos_ahash_req_ctx {
825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
832 unsigned int to_hash_later;
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
838 static int aead_setkey(struct crypto_aead *authenc,
839 const u8 *key, unsigned int keylen)
841 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
842 struct crypto_authenc_keys keys;
844 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
847 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
850 memcpy(ctx->key, keys.authkey, keys.authkeylen);
851 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
853 ctx->keylen = keys.authkeylen + keys.enckeylen;
854 ctx->enckeylen = keys.enckeylen;
855 ctx->authkeylen = keys.authkeylen;
860 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
865 * talitos_edesc - s/w-extended descriptor
866 * @src_nents: number of segments in input scatterlist
867 * @dst_nents: number of segments in output scatterlist
868 * @icv_ool: whether ICV is out-of-line
869 * @iv_dma: dma address of iv for checking continuity and link table
870 * @dma_len: length of dma mapped link_tbl space
871 * @dma_link_tbl: bus physical address of link_tbl/buf
872 * @desc: h/w descriptor
873 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
874 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
876 * if decrypting (with authcheck), or either one of src_nents or dst_nents
877 * is greater than 1, an integrity check value is concatenated to the end
880 struct talitos_edesc {
886 dma_addr_t dma_link_tbl;
887 struct talitos_desc desc;
889 struct talitos_ptr link_tbl[0];
894 static void talitos_sg_unmap(struct device *dev,
895 struct talitos_edesc *edesc,
896 struct scatterlist *src,
897 struct scatterlist *dst)
899 unsigned int src_nents = edesc->src_nents ? : 1;
900 unsigned int dst_nents = edesc->dst_nents ? : 1;
903 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
906 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
909 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
912 static void ipsec_esp_unmap(struct device *dev,
913 struct talitos_edesc *edesc,
914 struct aead_request *areq)
916 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
917 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
918 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
919 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
921 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
924 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
929 * ipsec_esp descriptor callbacks
931 static void ipsec_esp_encrypt_done(struct device *dev,
932 struct talitos_desc *desc, void *context,
935 struct aead_request *areq = context;
936 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
937 unsigned int authsize = crypto_aead_authsize(authenc);
938 struct talitos_edesc *edesc;
939 struct scatterlist *sg;
942 edesc = container_of(desc, struct talitos_edesc, desc);
944 ipsec_esp_unmap(dev, edesc, areq);
946 /* copy the generated ICV to dst */
947 if (edesc->icv_ool) {
948 icvdata = &edesc->link_tbl[edesc->src_nents +
949 edesc->dst_nents + 2];
950 sg = sg_last(areq->dst, edesc->dst_nents);
951 memcpy((char *)sg_virt(sg) + sg->length - authsize,
957 aead_request_complete(areq, err);
960 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
961 struct talitos_desc *desc,
962 void *context, int err)
964 struct aead_request *req = context;
965 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
966 unsigned int authsize = crypto_aead_authsize(authenc);
967 struct talitos_edesc *edesc;
968 struct scatterlist *sg;
971 edesc = container_of(desc, struct talitos_edesc, desc);
973 ipsec_esp_unmap(dev, edesc, req);
977 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
978 icv = (char *)sg_virt(sg) + sg->length - authsize;
980 if (edesc->dma_len) {
981 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
982 edesc->dst_nents + 2];
984 icv = oicv + authsize;
986 oicv = (char *)&edesc->link_tbl[0];
988 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
993 aead_request_complete(req, err);
996 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
997 struct talitos_desc *desc,
998 void *context, int err)
1000 struct aead_request *req = context;
1001 struct talitos_edesc *edesc;
1003 edesc = container_of(desc, struct talitos_edesc, desc);
1005 ipsec_esp_unmap(dev, edesc, req);
1007 /* check ICV auth status */
1008 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1009 DESC_HDR_LO_ICCR1_PASS))
1014 aead_request_complete(req, err);
1018 * convert scatterlist to SEC h/w link table format
1019 * stop at cryptlen bytes
1021 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1022 unsigned int offset, int cryptlen,
1023 struct talitos_ptr *link_tbl_ptr)
1025 int n_sg = sg_count;
1028 while (cryptlen && sg && n_sg--) {
1029 unsigned int len = sg_dma_len(sg);
1031 if (offset >= len) {
1041 to_talitos_ptr(link_tbl_ptr + count,
1042 sg_dma_address(sg) + offset, 0);
1043 link_tbl_ptr[count].len = cpu_to_be16(len);
1044 link_tbl_ptr[count].j_extent = 0;
1053 /* tag end of link table */
1055 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1060 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1062 struct talitos_ptr *link_tbl_ptr)
1064 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1069 * fill in and submit ipsec_esp descriptor
1071 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1072 void (*callback)(struct device *dev,
1073 struct talitos_desc *desc,
1074 void *context, int error))
1076 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1077 unsigned int authsize = crypto_aead_authsize(aead);
1078 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1079 struct device *dev = ctx->dev;
1080 struct talitos_desc *desc = &edesc->desc;
1081 unsigned int cryptlen = areq->cryptlen;
1082 unsigned int ivsize = crypto_aead_ivsize(aead);
1085 int sg_link_tbl_len;
1088 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1091 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1092 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1095 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1097 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1099 &edesc->link_tbl[tbl_off])) > 1) {
1100 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1101 sizeof(struct talitos_ptr), 0);
1102 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1104 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1105 edesc->dma_len, DMA_BIDIRECTIONAL);
1109 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1110 desc->ptr[1].j_extent = 0;
1114 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1115 desc->ptr[2].len = cpu_to_be16(ivsize);
1116 desc->ptr[2].j_extent = 0;
1119 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1120 (char *)&ctx->key + ctx->authkeylen,
1125 * map and adjust cipher len to aead request cryptlen.
1126 * extent is bytes of HMAC postpended to ciphertext,
1127 * typically 12 for ipsec
1129 desc->ptr[4].len = cpu_to_be16(cryptlen);
1130 desc->ptr[4].j_extent = authsize;
1132 sg_link_tbl_len = cryptlen;
1133 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1134 sg_link_tbl_len += authsize;
1136 if (sg_count == 1) {
1137 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1139 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1140 areq->assoclen, sg_link_tbl_len,
1141 &edesc->link_tbl[tbl_off])) >
1143 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1144 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1146 sizeof(struct talitos_ptr), 0);
1147 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1152 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1156 desc->ptr[5].len = cpu_to_be16(cryptlen);
1157 desc->ptr[5].j_extent = authsize;
1159 if (areq->src != areq->dst)
1160 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1163 edesc->icv_ool = false;
1165 if (sg_count == 1) {
1166 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1168 } else if ((sg_count =
1169 sg_to_link_tbl_offset(areq->dst, sg_count,
1170 areq->assoclen, cryptlen,
1171 &edesc->link_tbl[tbl_off])) > 1) {
1172 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1174 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1175 tbl_off * sizeof(struct talitos_ptr), 0);
1177 /* Add an entry to the link table for ICV data */
1178 tbl_ptr += sg_count - 1;
1179 tbl_ptr->j_extent = 0;
1181 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1182 tbl_ptr->len = cpu_to_be16(authsize);
1184 /* icv data follows link tables */
1185 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1186 (edesc->src_nents + edesc->dst_nents +
1187 2) * sizeof(struct talitos_ptr) +
1189 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1190 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1191 edesc->dma_len, DMA_BIDIRECTIONAL);
1193 edesc->icv_ool = true;
1195 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1199 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1202 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1203 if (ret != -EINPROGRESS) {
1204 ipsec_esp_unmap(dev, edesc, areq);
1211 * allocate and map the extended descriptor
1213 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1214 struct scatterlist *src,
1215 struct scatterlist *dst,
1217 unsigned int assoclen,
1218 unsigned int cryptlen,
1219 unsigned int authsize,
1220 unsigned int ivsize,
1225 struct talitos_edesc *edesc;
1226 int src_nents, dst_nents, alloc_len, dma_len;
1227 dma_addr_t iv_dma = 0;
1228 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1230 struct talitos_private *priv = dev_get_drvdata(dev);
1231 bool is_sec1 = has_ftr_sec1(priv);
1232 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1234 if (cryptlen + authsize > max_len) {
1235 dev_err(dev, "length exceeds h/w max limit\n");
1236 return ERR_PTR(-EINVAL);
1240 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1242 if (!dst || dst == src) {
1243 src_nents = sg_nents_for_len(src,
1244 assoclen + cryptlen + authsize);
1245 src_nents = (src_nents == 1) ? 0 : src_nents;
1246 dst_nents = dst ? src_nents : 0;
1247 } else { /* dst && dst != src*/
1248 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1249 (encrypt ? 0 : authsize));
1250 src_nents = (src_nents == 1) ? 0 : src_nents;
1251 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1252 (encrypt ? authsize : 0));
1253 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1257 * allocate space for base edesc plus the link tables,
1258 * allowing for two separate entries for AD and generated ICV (+ 2),
1259 * and space for two sets of ICVs (stashed and generated)
1261 alloc_len = sizeof(struct talitos_edesc);
1262 if (src_nents || dst_nents) {
1264 dma_len = (src_nents ? cryptlen : 0) +
1265 (dst_nents ? cryptlen : 0);
1267 dma_len = (src_nents + dst_nents + 2) *
1268 sizeof(struct talitos_ptr) + authsize * 2;
1269 alloc_len += dma_len;
1272 alloc_len += icv_stashing ? authsize : 0;
1275 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1278 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1280 dev_err(dev, "could not allocate edescriptor\n");
1281 return ERR_PTR(-ENOMEM);
1284 edesc->src_nents = src_nents;
1285 edesc->dst_nents = dst_nents;
1286 edesc->iv_dma = iv_dma;
1287 edesc->dma_len = dma_len;
1289 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1296 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1297 int icv_stashing, bool encrypt)
1299 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1300 unsigned int authsize = crypto_aead_authsize(authenc);
1301 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1302 unsigned int ivsize = crypto_aead_ivsize(authenc);
1304 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1305 iv, areq->assoclen, areq->cryptlen,
1306 authsize, ivsize, icv_stashing,
1307 areq->base.flags, encrypt);
1310 static int aead_encrypt(struct aead_request *req)
1312 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1313 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1314 struct talitos_edesc *edesc;
1316 /* allocate extended descriptor */
1317 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1319 return PTR_ERR(edesc);
1322 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1324 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1327 static int aead_decrypt(struct aead_request *req)
1329 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1330 unsigned int authsize = crypto_aead_authsize(authenc);
1331 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1332 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1333 struct talitos_edesc *edesc;
1334 struct scatterlist *sg;
1337 req->cryptlen -= authsize;
1339 /* allocate extended descriptor */
1340 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1342 return PTR_ERR(edesc);
1344 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1345 ((!edesc->src_nents && !edesc->dst_nents) ||
1346 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1348 /* decrypt and check the ICV */
1349 edesc->desc.hdr = ctx->desc_hdr_template |
1350 DESC_HDR_DIR_INBOUND |
1351 DESC_HDR_MODE1_MDEU_CICV;
1353 /* reset integrity check result bits */
1354 edesc->desc.hdr_lo = 0;
1356 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1359 /* Have to check the ICV with software */
1360 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1362 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1364 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1365 edesc->dst_nents + 2];
1367 icvdata = &edesc->link_tbl[0];
1369 sg = sg_last(req->src, edesc->src_nents ? : 1);
1371 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1373 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1376 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1377 const u8 *key, unsigned int keylen)
1379 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1381 memcpy(&ctx->key, key, keylen);
1382 ctx->keylen = keylen;
1387 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1388 struct scatterlist *dst, unsigned int len,
1389 struct talitos_edesc *edesc)
1391 struct talitos_private *priv = dev_get_drvdata(dev);
1392 bool is_sec1 = has_ftr_sec1(priv);
1395 if (!edesc->src_nents) {
1396 dma_unmap_sg(dev, src, 1,
1397 dst != src ? DMA_TO_DEVICE
1398 : DMA_BIDIRECTIONAL);
1400 if (dst && edesc->dst_nents) {
1401 dma_sync_single_for_device(dev,
1402 edesc->dma_link_tbl + len,
1403 len, DMA_FROM_DEVICE);
1404 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1405 edesc->buf + len, len);
1406 } else if (dst && dst != src) {
1407 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1410 talitos_sg_unmap(dev, edesc, src, dst);
1414 static void common_nonsnoop_unmap(struct device *dev,
1415 struct talitos_edesc *edesc,
1416 struct ablkcipher_request *areq)
1418 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1420 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1421 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1422 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1425 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1429 static void ablkcipher_done(struct device *dev,
1430 struct talitos_desc *desc, void *context,
1433 struct ablkcipher_request *areq = context;
1434 struct talitos_edesc *edesc;
1436 edesc = container_of(desc, struct talitos_edesc, desc);
1438 common_nonsnoop_unmap(dev, edesc, areq);
1442 areq->base.complete(&areq->base, err);
1445 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1446 unsigned int len, struct talitos_edesc *edesc,
1447 enum dma_data_direction dir, struct talitos_ptr *ptr)
1450 struct talitos_private *priv = dev_get_drvdata(dev);
1451 bool is_sec1 = has_ftr_sec1(priv);
1453 to_talitos_ptr_len(ptr, len, is_sec1);
1456 sg_count = edesc->src_nents ? : 1;
1458 if (sg_count == 1) {
1459 dma_map_sg(dev, src, 1, dir);
1460 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1462 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1463 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1464 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1465 len, DMA_TO_DEVICE);
1468 to_talitos_ptr_extent_clear(ptr, is_sec1);
1470 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1472 if (sg_count == 1) {
1473 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1475 sg_count = sg_to_link_tbl(src, sg_count, len,
1476 &edesc->link_tbl[0]);
1478 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1479 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1480 dma_sync_single_for_device(dev,
1481 edesc->dma_link_tbl,
1485 /* Only one segment now, so no link tbl needed*/
1486 to_talitos_ptr(ptr, sg_dma_address(src),
1494 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1495 unsigned int len, struct talitos_edesc *edesc,
1496 enum dma_data_direction dir,
1497 struct talitos_ptr *ptr, int sg_count)
1499 struct talitos_private *priv = dev_get_drvdata(dev);
1500 bool is_sec1 = has_ftr_sec1(priv);
1502 if (dir != DMA_NONE)
1503 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1505 to_talitos_ptr_len(ptr, len, is_sec1);
1508 if (sg_count == 1) {
1509 if (dir != DMA_NONE)
1510 dma_map_sg(dev, dst, 1, dir);
1511 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1513 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1514 dma_sync_single_for_device(dev,
1515 edesc->dma_link_tbl + len,
1516 len, DMA_FROM_DEVICE);
1519 to_talitos_ptr_extent_clear(ptr, is_sec1);
1521 if (sg_count == 1) {
1522 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1524 struct talitos_ptr *link_tbl_ptr =
1525 &edesc->link_tbl[edesc->src_nents + 1];
1527 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1528 (edesc->src_nents + 1) *
1529 sizeof(struct talitos_ptr), 0);
1530 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1531 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1532 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1539 static int common_nonsnoop(struct talitos_edesc *edesc,
1540 struct ablkcipher_request *areq,
1541 void (*callback) (struct device *dev,
1542 struct talitos_desc *desc,
1543 void *context, int error))
1545 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1546 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1547 struct device *dev = ctx->dev;
1548 struct talitos_desc *desc = &edesc->desc;
1549 unsigned int cryptlen = areq->nbytes;
1550 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1552 struct talitos_private *priv = dev_get_drvdata(dev);
1553 bool is_sec1 = has_ftr_sec1(priv);
1555 /* first DWORD empty */
1556 desc->ptr[0] = zero_entry;
1559 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1560 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1561 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1564 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1565 (char *)&ctx->key, DMA_TO_DEVICE);
1570 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1571 (areq->src == areq->dst) ?
1572 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1576 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1577 (areq->src == areq->dst) ? DMA_NONE
1579 &desc->ptr[4], sg_count);
1582 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1585 /* last DWORD empty */
1586 desc->ptr[6] = zero_entry;
1588 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1589 if (ret != -EINPROGRESS) {
1590 common_nonsnoop_unmap(dev, edesc, areq);
1596 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1599 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1600 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1601 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1603 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1604 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1605 areq->base.flags, encrypt);
1608 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1610 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1611 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1612 struct talitos_edesc *edesc;
1614 /* allocate extended descriptor */
1615 edesc = ablkcipher_edesc_alloc(areq, true);
1617 return PTR_ERR(edesc);
1620 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1622 return common_nonsnoop(edesc, areq, ablkcipher_done);
1625 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1627 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1628 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1629 struct talitos_edesc *edesc;
1631 /* allocate extended descriptor */
1632 edesc = ablkcipher_edesc_alloc(areq, false);
1634 return PTR_ERR(edesc);
1636 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1638 return common_nonsnoop(edesc, areq, ablkcipher_done);
1641 static void common_nonsnoop_hash_unmap(struct device *dev,
1642 struct talitos_edesc *edesc,
1643 struct ahash_request *areq)
1645 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1646 struct talitos_private *priv = dev_get_drvdata(dev);
1647 bool is_sec1 = has_ftr_sec1(priv);
1649 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1651 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1653 /* When using hashctx-in, must unmap it. */
1654 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1655 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1658 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1659 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1663 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1668 static void ahash_done(struct device *dev,
1669 struct talitos_desc *desc, void *context,
1672 struct ahash_request *areq = context;
1673 struct talitos_edesc *edesc =
1674 container_of(desc, struct talitos_edesc, desc);
1675 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1677 if (!req_ctx->last && req_ctx->to_hash_later) {
1678 /* Position any partial block for next update/final/finup */
1679 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1680 req_ctx->nbuf = req_ctx->to_hash_later;
1682 common_nonsnoop_hash_unmap(dev, edesc, areq);
1686 areq->base.complete(&areq->base, err);
1690 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1691 * ourself and submit a padded block
1693 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1694 struct talitos_edesc *edesc,
1695 struct talitos_ptr *ptr)
1697 static u8 padded_hash[64] = {
1698 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1699 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1700 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1701 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1704 pr_err_once("Bug in SEC1, padding ourself\n");
1705 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1706 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1707 (char *)padded_hash, DMA_TO_DEVICE);
1710 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1711 struct ahash_request *areq, unsigned int length,
1712 void (*callback) (struct device *dev,
1713 struct talitos_desc *desc,
1714 void *context, int error))
1716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1717 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1718 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1719 struct device *dev = ctx->dev;
1720 struct talitos_desc *desc = &edesc->desc;
1722 struct talitos_private *priv = dev_get_drvdata(dev);
1723 bool is_sec1 = has_ftr_sec1(priv);
1725 /* first DWORD empty */
1726 desc->ptr[0] = zero_entry;
1728 /* hash context in */
1729 if (!req_ctx->first || req_ctx->swinit) {
1730 map_single_talitos_ptr(dev, &desc->ptr[1],
1731 req_ctx->hw_context_size,
1732 (char *)req_ctx->hw_context,
1734 req_ctx->swinit = 0;
1736 desc->ptr[1] = zero_entry;
1737 /* Indicate next op is not the first. */
1743 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1744 (char *)&ctx->key, DMA_TO_DEVICE);
1746 desc->ptr[2] = zero_entry;
1751 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1752 DMA_TO_DEVICE, &desc->ptr[3]);
1754 /* fifth DWORD empty */
1755 desc->ptr[4] = zero_entry;
1757 /* hash/HMAC out -or- hash context out */
1759 map_single_talitos_ptr(dev, &desc->ptr[5],
1760 crypto_ahash_digestsize(tfm),
1761 areq->result, DMA_FROM_DEVICE);
1763 map_single_talitos_ptr(dev, &desc->ptr[5],
1764 req_ctx->hw_context_size,
1765 req_ctx->hw_context, DMA_FROM_DEVICE);
1767 /* last DWORD empty */
1768 desc->ptr[6] = zero_entry;
1770 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1771 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1773 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1774 if (ret != -EINPROGRESS) {
1775 common_nonsnoop_hash_unmap(dev, edesc, areq);
1781 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1782 unsigned int nbytes)
1784 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1785 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1786 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1788 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1789 nbytes, 0, 0, 0, areq->base.flags, false);
1792 static int ahash_init(struct ahash_request *areq)
1794 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797 /* Initialize the context */
1799 req_ctx->first = 1; /* first indicates h/w must init its context */
1800 req_ctx->swinit = 0; /* assume h/w init of context */
1801 req_ctx->hw_context_size =
1802 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1803 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1804 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1810 * on h/w without explicit sha224 support, we initialize h/w context
1811 * manually with sha224 constants, and tell it to run sha256.
1813 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1815 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1818 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1820 req_ctx->hw_context[0] = SHA224_H0;
1821 req_ctx->hw_context[1] = SHA224_H1;
1822 req_ctx->hw_context[2] = SHA224_H2;
1823 req_ctx->hw_context[3] = SHA224_H3;
1824 req_ctx->hw_context[4] = SHA224_H4;
1825 req_ctx->hw_context[5] = SHA224_H5;
1826 req_ctx->hw_context[6] = SHA224_H6;
1827 req_ctx->hw_context[7] = SHA224_H7;
1829 /* init 64-bit count */
1830 req_ctx->hw_context[8] = 0;
1831 req_ctx->hw_context[9] = 0;
1836 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1838 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1839 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1840 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1841 struct talitos_edesc *edesc;
1842 unsigned int blocksize =
1843 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1844 unsigned int nbytes_to_hash;
1845 unsigned int to_hash_later;
1848 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1849 /* Buffer up to one whole block */
1850 sg_copy_to_buffer(areq->src,
1851 sg_nents_for_len(areq->src, nbytes),
1852 req_ctx->buf + req_ctx->nbuf, nbytes);
1853 req_ctx->nbuf += nbytes;
1857 /* At least (blocksize + 1) bytes are available to hash */
1858 nbytes_to_hash = nbytes + req_ctx->nbuf;
1859 to_hash_later = nbytes_to_hash & (blocksize - 1);
1863 else if (to_hash_later)
1864 /* There is a partial block. Hash the full block(s) now */
1865 nbytes_to_hash -= to_hash_later;
1867 /* Keep one block buffered */
1868 nbytes_to_hash -= blocksize;
1869 to_hash_later = blocksize;
1872 /* Chain in any previously buffered data */
1873 if (req_ctx->nbuf) {
1874 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1875 sg_init_table(req_ctx->bufsl, nsg);
1876 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1878 sg_chain(req_ctx->bufsl, 2, areq->src);
1879 req_ctx->psrc = req_ctx->bufsl;
1881 req_ctx->psrc = areq->src;
1883 if (to_hash_later) {
1884 int nents = sg_nents_for_len(areq->src, nbytes);
1885 sg_pcopy_to_buffer(areq->src, nents,
1888 nbytes - to_hash_later);
1890 req_ctx->to_hash_later = to_hash_later;
1892 /* Allocate extended descriptor */
1893 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1895 return PTR_ERR(edesc);
1897 edesc->desc.hdr = ctx->desc_hdr_template;
1899 /* On last one, request SEC to pad; otherwise continue */
1901 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1903 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1905 /* request SEC to INIT hash. */
1906 if (req_ctx->first && !req_ctx->swinit)
1907 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1909 /* When the tfm context has a keylen, it's an HMAC.
1910 * A first or last (ie. not middle) descriptor must request HMAC.
1912 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1913 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1915 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1919 static int ahash_update(struct ahash_request *areq)
1921 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1925 return ahash_process_req(areq, areq->nbytes);
1928 static int ahash_final(struct ahash_request *areq)
1930 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1934 return ahash_process_req(areq, 0);
1937 static int ahash_finup(struct ahash_request *areq)
1939 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1943 return ahash_process_req(areq, areq->nbytes);
1946 static int ahash_digest(struct ahash_request *areq)
1948 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1954 return ahash_process_req(areq, areq->nbytes);
1957 struct keyhash_result {
1958 struct completion completion;
1962 static void keyhash_complete(struct crypto_async_request *req, int err)
1964 struct keyhash_result *res = req->data;
1966 if (err == -EINPROGRESS)
1970 complete(&res->completion);
1973 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1976 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1978 struct scatterlist sg[1];
1979 struct ahash_request *req;
1980 struct keyhash_result hresult;
1983 init_completion(&hresult.completion);
1985 req = ahash_request_alloc(tfm, GFP_KERNEL);
1989 /* Keep tfm keylen == 0 during hash of the long key */
1991 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1992 keyhash_complete, &hresult);
1994 sg_init_one(&sg[0], key, keylen);
1996 ahash_request_set_crypt(req, sg, hash, keylen);
1997 ret = crypto_ahash_digest(req);
2003 ret = wait_for_completion_interruptible(
2004 &hresult.completion);
2011 ahash_request_free(req);
2016 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2017 unsigned int keylen)
2019 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2020 unsigned int blocksize =
2021 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2022 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2023 unsigned int keysize = keylen;
2024 u8 hash[SHA512_DIGEST_SIZE];
2027 if (keylen <= blocksize)
2028 memcpy(ctx->key, key, keysize);
2030 /* Must get the hash of the long key */
2031 ret = keyhash(tfm, key, keylen, hash);
2034 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2038 keysize = digestsize;
2039 memcpy(ctx->key, hash, digestsize);
2042 ctx->keylen = keysize;
2048 struct talitos_alg_template {
2051 struct crypto_alg crypto;
2052 struct ahash_alg hash;
2053 struct aead_alg aead;
2055 __be32 desc_hdr_template;
2058 static struct talitos_alg_template driver_algs[] = {
2059 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2060 { .type = CRYPTO_ALG_TYPE_AEAD,
2063 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2064 .cra_driver_name = "authenc-hmac-sha1-"
2066 .cra_blocksize = AES_BLOCK_SIZE,
2067 .cra_flags = CRYPTO_ALG_ASYNC,
2069 .ivsize = AES_BLOCK_SIZE,
2070 .maxauthsize = SHA1_DIGEST_SIZE,
2072 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2073 DESC_HDR_SEL0_AESU |
2074 DESC_HDR_MODE0_AESU_CBC |
2075 DESC_HDR_SEL1_MDEUA |
2076 DESC_HDR_MODE1_MDEU_INIT |
2077 DESC_HDR_MODE1_MDEU_PAD |
2078 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2080 { .type = CRYPTO_ALG_TYPE_AEAD,
2083 .cra_name = "authenc(hmac(sha1),"
2085 .cra_driver_name = "authenc-hmac-sha1-"
2087 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2088 .cra_flags = CRYPTO_ALG_ASYNC,
2090 .ivsize = DES3_EDE_BLOCK_SIZE,
2091 .maxauthsize = SHA1_DIGEST_SIZE,
2093 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2095 DESC_HDR_MODE0_DEU_CBC |
2096 DESC_HDR_MODE0_DEU_3DES |
2097 DESC_HDR_SEL1_MDEUA |
2098 DESC_HDR_MODE1_MDEU_INIT |
2099 DESC_HDR_MODE1_MDEU_PAD |
2100 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2102 { .type = CRYPTO_ALG_TYPE_AEAD,
2105 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2106 .cra_driver_name = "authenc-hmac-sha224-"
2108 .cra_blocksize = AES_BLOCK_SIZE,
2109 .cra_flags = CRYPTO_ALG_ASYNC,
2111 .ivsize = AES_BLOCK_SIZE,
2112 .maxauthsize = SHA224_DIGEST_SIZE,
2114 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2115 DESC_HDR_SEL0_AESU |
2116 DESC_HDR_MODE0_AESU_CBC |
2117 DESC_HDR_SEL1_MDEUA |
2118 DESC_HDR_MODE1_MDEU_INIT |
2119 DESC_HDR_MODE1_MDEU_PAD |
2120 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2122 { .type = CRYPTO_ALG_TYPE_AEAD,
2125 .cra_name = "authenc(hmac(sha224),"
2127 .cra_driver_name = "authenc-hmac-sha224-"
2129 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2130 .cra_flags = CRYPTO_ALG_ASYNC,
2132 .ivsize = DES3_EDE_BLOCK_SIZE,
2133 .maxauthsize = SHA224_DIGEST_SIZE,
2135 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2137 DESC_HDR_MODE0_DEU_CBC |
2138 DESC_HDR_MODE0_DEU_3DES |
2139 DESC_HDR_SEL1_MDEUA |
2140 DESC_HDR_MODE1_MDEU_INIT |
2141 DESC_HDR_MODE1_MDEU_PAD |
2142 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2144 { .type = CRYPTO_ALG_TYPE_AEAD,
2147 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2148 .cra_driver_name = "authenc-hmac-sha256-"
2150 .cra_blocksize = AES_BLOCK_SIZE,
2151 .cra_flags = CRYPTO_ALG_ASYNC,
2153 .ivsize = AES_BLOCK_SIZE,
2154 .maxauthsize = SHA256_DIGEST_SIZE,
2156 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2157 DESC_HDR_SEL0_AESU |
2158 DESC_HDR_MODE0_AESU_CBC |
2159 DESC_HDR_SEL1_MDEUA |
2160 DESC_HDR_MODE1_MDEU_INIT |
2161 DESC_HDR_MODE1_MDEU_PAD |
2162 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2164 { .type = CRYPTO_ALG_TYPE_AEAD,
2167 .cra_name = "authenc(hmac(sha256),"
2169 .cra_driver_name = "authenc-hmac-sha256-"
2171 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2172 .cra_flags = CRYPTO_ALG_ASYNC,
2174 .ivsize = DES3_EDE_BLOCK_SIZE,
2175 .maxauthsize = SHA256_DIGEST_SIZE,
2177 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2179 DESC_HDR_MODE0_DEU_CBC |
2180 DESC_HDR_MODE0_DEU_3DES |
2181 DESC_HDR_SEL1_MDEUA |
2182 DESC_HDR_MODE1_MDEU_INIT |
2183 DESC_HDR_MODE1_MDEU_PAD |
2184 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2186 { .type = CRYPTO_ALG_TYPE_AEAD,
2189 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2190 .cra_driver_name = "authenc-hmac-sha384-"
2192 .cra_blocksize = AES_BLOCK_SIZE,
2193 .cra_flags = CRYPTO_ALG_ASYNC,
2195 .ivsize = AES_BLOCK_SIZE,
2196 .maxauthsize = SHA384_DIGEST_SIZE,
2198 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2199 DESC_HDR_SEL0_AESU |
2200 DESC_HDR_MODE0_AESU_CBC |
2201 DESC_HDR_SEL1_MDEUB |
2202 DESC_HDR_MODE1_MDEU_INIT |
2203 DESC_HDR_MODE1_MDEU_PAD |
2204 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2206 { .type = CRYPTO_ALG_TYPE_AEAD,
2209 .cra_name = "authenc(hmac(sha384),"
2211 .cra_driver_name = "authenc-hmac-sha384-"
2213 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2214 .cra_flags = CRYPTO_ALG_ASYNC,
2216 .ivsize = DES3_EDE_BLOCK_SIZE,
2217 .maxauthsize = SHA384_DIGEST_SIZE,
2219 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2221 DESC_HDR_MODE0_DEU_CBC |
2222 DESC_HDR_MODE0_DEU_3DES |
2223 DESC_HDR_SEL1_MDEUB |
2224 DESC_HDR_MODE1_MDEU_INIT |
2225 DESC_HDR_MODE1_MDEU_PAD |
2226 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2228 { .type = CRYPTO_ALG_TYPE_AEAD,
2231 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2232 .cra_driver_name = "authenc-hmac-sha512-"
2234 .cra_blocksize = AES_BLOCK_SIZE,
2235 .cra_flags = CRYPTO_ALG_ASYNC,
2237 .ivsize = AES_BLOCK_SIZE,
2238 .maxauthsize = SHA512_DIGEST_SIZE,
2240 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2241 DESC_HDR_SEL0_AESU |
2242 DESC_HDR_MODE0_AESU_CBC |
2243 DESC_HDR_SEL1_MDEUB |
2244 DESC_HDR_MODE1_MDEU_INIT |
2245 DESC_HDR_MODE1_MDEU_PAD |
2246 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2248 { .type = CRYPTO_ALG_TYPE_AEAD,
2251 .cra_name = "authenc(hmac(sha512),"
2253 .cra_driver_name = "authenc-hmac-sha512-"
2255 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2256 .cra_flags = CRYPTO_ALG_ASYNC,
2258 .ivsize = DES3_EDE_BLOCK_SIZE,
2259 .maxauthsize = SHA512_DIGEST_SIZE,
2261 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2263 DESC_HDR_MODE0_DEU_CBC |
2264 DESC_HDR_MODE0_DEU_3DES |
2265 DESC_HDR_SEL1_MDEUB |
2266 DESC_HDR_MODE1_MDEU_INIT |
2267 DESC_HDR_MODE1_MDEU_PAD |
2268 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2270 { .type = CRYPTO_ALG_TYPE_AEAD,
2273 .cra_name = "authenc(hmac(md5),cbc(aes))",
2274 .cra_driver_name = "authenc-hmac-md5-"
2276 .cra_blocksize = AES_BLOCK_SIZE,
2277 .cra_flags = CRYPTO_ALG_ASYNC,
2279 .ivsize = AES_BLOCK_SIZE,
2280 .maxauthsize = MD5_DIGEST_SIZE,
2282 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2283 DESC_HDR_SEL0_AESU |
2284 DESC_HDR_MODE0_AESU_CBC |
2285 DESC_HDR_SEL1_MDEUA |
2286 DESC_HDR_MODE1_MDEU_INIT |
2287 DESC_HDR_MODE1_MDEU_PAD |
2288 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2290 { .type = CRYPTO_ALG_TYPE_AEAD,
2293 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2294 .cra_driver_name = "authenc-hmac-md5-"
2296 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2297 .cra_flags = CRYPTO_ALG_ASYNC,
2299 .ivsize = DES3_EDE_BLOCK_SIZE,
2300 .maxauthsize = MD5_DIGEST_SIZE,
2302 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2304 DESC_HDR_MODE0_DEU_CBC |
2305 DESC_HDR_MODE0_DEU_3DES |
2306 DESC_HDR_SEL1_MDEUA |
2307 DESC_HDR_MODE1_MDEU_INIT |
2308 DESC_HDR_MODE1_MDEU_PAD |
2309 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2311 /* ABLKCIPHER algorithms. */
2312 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2314 .cra_name = "cbc(aes)",
2315 .cra_driver_name = "cbc-aes-talitos",
2316 .cra_blocksize = AES_BLOCK_SIZE,
2317 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2320 .min_keysize = AES_MIN_KEY_SIZE,
2321 .max_keysize = AES_MAX_KEY_SIZE,
2322 .ivsize = AES_BLOCK_SIZE,
2325 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2326 DESC_HDR_SEL0_AESU |
2327 DESC_HDR_MODE0_AESU_CBC,
2329 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2331 .cra_name = "cbc(des3_ede)",
2332 .cra_driver_name = "cbc-3des-talitos",
2333 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2334 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2337 .min_keysize = DES3_EDE_KEY_SIZE,
2338 .max_keysize = DES3_EDE_KEY_SIZE,
2339 .ivsize = DES3_EDE_BLOCK_SIZE,
2342 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2344 DESC_HDR_MODE0_DEU_CBC |
2345 DESC_HDR_MODE0_DEU_3DES,
2347 /* AHASH algorithms. */
2348 { .type = CRYPTO_ALG_TYPE_AHASH,
2350 .halg.digestsize = MD5_DIGEST_SIZE,
2353 .cra_driver_name = "md5-talitos",
2354 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2355 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2359 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2360 DESC_HDR_SEL0_MDEUA |
2361 DESC_HDR_MODE0_MDEU_MD5,
2363 { .type = CRYPTO_ALG_TYPE_AHASH,
2365 .halg.digestsize = SHA1_DIGEST_SIZE,
2368 .cra_driver_name = "sha1-talitos",
2369 .cra_blocksize = SHA1_BLOCK_SIZE,
2370 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2374 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2375 DESC_HDR_SEL0_MDEUA |
2376 DESC_HDR_MODE0_MDEU_SHA1,
2378 { .type = CRYPTO_ALG_TYPE_AHASH,
2380 .halg.digestsize = SHA224_DIGEST_SIZE,
2382 .cra_name = "sha224",
2383 .cra_driver_name = "sha224-talitos",
2384 .cra_blocksize = SHA224_BLOCK_SIZE,
2385 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2389 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2390 DESC_HDR_SEL0_MDEUA |
2391 DESC_HDR_MODE0_MDEU_SHA224,
2393 { .type = CRYPTO_ALG_TYPE_AHASH,
2395 .halg.digestsize = SHA256_DIGEST_SIZE,
2397 .cra_name = "sha256",
2398 .cra_driver_name = "sha256-talitos",
2399 .cra_blocksize = SHA256_BLOCK_SIZE,
2400 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2404 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405 DESC_HDR_SEL0_MDEUA |
2406 DESC_HDR_MODE0_MDEU_SHA256,
2408 { .type = CRYPTO_ALG_TYPE_AHASH,
2410 .halg.digestsize = SHA384_DIGEST_SIZE,
2412 .cra_name = "sha384",
2413 .cra_driver_name = "sha384-talitos",
2414 .cra_blocksize = SHA384_BLOCK_SIZE,
2415 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2419 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2420 DESC_HDR_SEL0_MDEUB |
2421 DESC_HDR_MODE0_MDEUB_SHA384,
2423 { .type = CRYPTO_ALG_TYPE_AHASH,
2425 .halg.digestsize = SHA512_DIGEST_SIZE,
2427 .cra_name = "sha512",
2428 .cra_driver_name = "sha512-talitos",
2429 .cra_blocksize = SHA512_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2434 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2435 DESC_HDR_SEL0_MDEUB |
2436 DESC_HDR_MODE0_MDEUB_SHA512,
2438 { .type = CRYPTO_ALG_TYPE_AHASH,
2440 .halg.digestsize = MD5_DIGEST_SIZE,
2442 .cra_name = "hmac(md5)",
2443 .cra_driver_name = "hmac-md5-talitos",
2444 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2445 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2449 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2450 DESC_HDR_SEL0_MDEUA |
2451 DESC_HDR_MODE0_MDEU_MD5,
2453 { .type = CRYPTO_ALG_TYPE_AHASH,
2455 .halg.digestsize = SHA1_DIGEST_SIZE,
2457 .cra_name = "hmac(sha1)",
2458 .cra_driver_name = "hmac-sha1-talitos",
2459 .cra_blocksize = SHA1_BLOCK_SIZE,
2460 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2464 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2465 DESC_HDR_SEL0_MDEUA |
2466 DESC_HDR_MODE0_MDEU_SHA1,
2468 { .type = CRYPTO_ALG_TYPE_AHASH,
2470 .halg.digestsize = SHA224_DIGEST_SIZE,
2472 .cra_name = "hmac(sha224)",
2473 .cra_driver_name = "hmac-sha224-talitos",
2474 .cra_blocksize = SHA224_BLOCK_SIZE,
2475 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2479 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2480 DESC_HDR_SEL0_MDEUA |
2481 DESC_HDR_MODE0_MDEU_SHA224,
2483 { .type = CRYPTO_ALG_TYPE_AHASH,
2485 .halg.digestsize = SHA256_DIGEST_SIZE,
2487 .cra_name = "hmac(sha256)",
2488 .cra_driver_name = "hmac-sha256-talitos",
2489 .cra_blocksize = SHA256_BLOCK_SIZE,
2490 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2494 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2495 DESC_HDR_SEL0_MDEUA |
2496 DESC_HDR_MODE0_MDEU_SHA256,
2498 { .type = CRYPTO_ALG_TYPE_AHASH,
2500 .halg.digestsize = SHA384_DIGEST_SIZE,
2502 .cra_name = "hmac(sha384)",
2503 .cra_driver_name = "hmac-sha384-talitos",
2504 .cra_blocksize = SHA384_BLOCK_SIZE,
2505 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2509 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2510 DESC_HDR_SEL0_MDEUB |
2511 DESC_HDR_MODE0_MDEUB_SHA384,
2513 { .type = CRYPTO_ALG_TYPE_AHASH,
2515 .halg.digestsize = SHA512_DIGEST_SIZE,
2517 .cra_name = "hmac(sha512)",
2518 .cra_driver_name = "hmac-sha512-talitos",
2519 .cra_blocksize = SHA512_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2524 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2525 DESC_HDR_SEL0_MDEUB |
2526 DESC_HDR_MODE0_MDEUB_SHA512,
2530 struct talitos_crypto_alg {
2531 struct list_head entry;
2533 struct talitos_alg_template algt;
2536 static int talitos_init_common(struct talitos_ctx *ctx,
2537 struct talitos_crypto_alg *talitos_alg)
2539 struct talitos_private *priv;
2541 /* update context with ptr to dev */
2542 ctx->dev = talitos_alg->dev;
2544 /* assign SEC channel to tfm in round-robin fashion */
2545 priv = dev_get_drvdata(ctx->dev);
2546 ctx->ch = atomic_inc_return(&priv->last_chan) &
2547 (priv->num_channels - 1);
2549 /* copy descriptor header template value */
2550 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2552 /* select done notification */
2553 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2558 static int talitos_cra_init(struct crypto_tfm *tfm)
2560 struct crypto_alg *alg = tfm->__crt_alg;
2561 struct talitos_crypto_alg *talitos_alg;
2562 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2564 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2565 talitos_alg = container_of(__crypto_ahash_alg(alg),
2566 struct talitos_crypto_alg,
2569 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2572 return talitos_init_common(ctx, talitos_alg);
2575 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2577 struct aead_alg *alg = crypto_aead_alg(tfm);
2578 struct talitos_crypto_alg *talitos_alg;
2579 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2581 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2584 return talitos_init_common(ctx, talitos_alg);
2587 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2589 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2591 talitos_cra_init(tfm);
2594 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2595 sizeof(struct talitos_ahash_req_ctx));
2601 * given the alg's descriptor header template, determine whether descriptor
2602 * type and primary/secondary execution units required match the hw
2603 * capabilities description provided in the device tree node.
2605 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2607 struct talitos_private *priv = dev_get_drvdata(dev);
2610 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2611 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2613 if (SECONDARY_EU(desc_hdr_template))
2614 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2615 & priv->exec_units);
2620 static int talitos_remove(struct platform_device *ofdev)
2622 struct device *dev = &ofdev->dev;
2623 struct talitos_private *priv = dev_get_drvdata(dev);
2624 struct talitos_crypto_alg *t_alg, *n;
2627 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2628 switch (t_alg->algt.type) {
2629 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2631 case CRYPTO_ALG_TYPE_AEAD:
2632 crypto_unregister_aead(&t_alg->algt.alg.aead);
2633 case CRYPTO_ALG_TYPE_AHASH:
2634 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2637 list_del(&t_alg->entry);
2641 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2642 talitos_unregister_rng(dev);
2644 for (i = 0; priv->chan && i < priv->num_channels; i++)
2645 kfree(priv->chan[i].fifo);
2649 for (i = 0; i < 2; i++)
2651 free_irq(priv->irq[i], dev);
2652 irq_dispose_mapping(priv->irq[i]);
2655 tasklet_kill(&priv->done_task[0]);
2657 tasklet_kill(&priv->done_task[1]);
2666 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2667 struct talitos_alg_template
2670 struct talitos_private *priv = dev_get_drvdata(dev);
2671 struct talitos_crypto_alg *t_alg;
2672 struct crypto_alg *alg;
2674 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2676 return ERR_PTR(-ENOMEM);
2678 t_alg->algt = *template;
2680 switch (t_alg->algt.type) {
2681 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2682 alg = &t_alg->algt.alg.crypto;
2683 alg->cra_init = talitos_cra_init;
2684 alg->cra_type = &crypto_ablkcipher_type;
2685 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2686 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2687 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2688 alg->cra_ablkcipher.geniv = "eseqiv";
2690 case CRYPTO_ALG_TYPE_AEAD:
2691 alg = &t_alg->algt.alg.aead.base;
2692 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2693 t_alg->algt.alg.aead.setkey = aead_setkey;
2694 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2695 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2697 case CRYPTO_ALG_TYPE_AHASH:
2698 alg = &t_alg->algt.alg.hash.halg.base;
2699 alg->cra_init = talitos_cra_init_ahash;
2700 alg->cra_type = &crypto_ahash_type;
2701 t_alg->algt.alg.hash.init = ahash_init;
2702 t_alg->algt.alg.hash.update = ahash_update;
2703 t_alg->algt.alg.hash.final = ahash_final;
2704 t_alg->algt.alg.hash.finup = ahash_finup;
2705 t_alg->algt.alg.hash.digest = ahash_digest;
2706 t_alg->algt.alg.hash.setkey = ahash_setkey;
2708 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2709 !strncmp(alg->cra_name, "hmac", 4)) {
2711 return ERR_PTR(-ENOTSUPP);
2713 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2714 (!strcmp(alg->cra_name, "sha224") ||
2715 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2716 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2717 t_alg->algt.desc_hdr_template =
2718 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2719 DESC_HDR_SEL0_MDEUA |
2720 DESC_HDR_MODE0_MDEU_SHA256;
2724 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2726 return ERR_PTR(-EINVAL);
2729 alg->cra_module = THIS_MODULE;
2730 alg->cra_priority = TALITOS_CRA_PRIORITY;
2731 alg->cra_alignmask = 0;
2732 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2733 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2740 static int talitos_probe_irq(struct platform_device *ofdev)
2742 struct device *dev = &ofdev->dev;
2743 struct device_node *np = ofdev->dev.of_node;
2744 struct talitos_private *priv = dev_get_drvdata(dev);
2746 bool is_sec1 = has_ftr_sec1(priv);
2748 priv->irq[0] = irq_of_parse_and_map(np, 0);
2749 if (!priv->irq[0]) {
2750 dev_err(dev, "failed to map irq\n");
2754 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2755 dev_driver_string(dev), dev);
2759 priv->irq[1] = irq_of_parse_and_map(np, 1);
2761 /* get the primary irq line */
2762 if (!priv->irq[1]) {
2763 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2764 dev_driver_string(dev), dev);
2768 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2769 dev_driver_string(dev), dev);
2773 /* get the secondary irq line */
2774 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2775 dev_driver_string(dev), dev);
2777 dev_err(dev, "failed to request secondary irq\n");
2778 irq_dispose_mapping(priv->irq[1]);
2786 dev_err(dev, "failed to request primary irq\n");
2787 irq_dispose_mapping(priv->irq[0]);
2794 static int talitos_probe(struct platform_device *ofdev)
2796 struct device *dev = &ofdev->dev;
2797 struct device_node *np = ofdev->dev.of_node;
2798 struct talitos_private *priv;
2799 const unsigned int *prop;
2803 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2807 INIT_LIST_HEAD(&priv->alg_list);
2809 dev_set_drvdata(dev, priv);
2811 priv->ofdev = ofdev;
2813 spin_lock_init(&priv->reg_lock);
2815 priv->reg = of_iomap(np, 0);
2817 dev_err(dev, "failed to of_iomap\n");
2822 /* get SEC version capabilities from device tree */
2823 prop = of_get_property(np, "fsl,num-channels", NULL);
2825 priv->num_channels = *prop;
2827 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2829 priv->chfifo_len = *prop;
2831 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2833 priv->exec_units = *prop;
2835 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2837 priv->desc_types = *prop;
2839 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2840 !priv->exec_units || !priv->desc_types) {
2841 dev_err(dev, "invalid property data in device tree node\n");
2846 if (of_device_is_compatible(np, "fsl,sec3.0"))
2847 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2849 if (of_device_is_compatible(np, "fsl,sec2.1"))
2850 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2851 TALITOS_FTR_SHA224_HWINIT |
2852 TALITOS_FTR_HMAC_OK;
2854 if (of_device_is_compatible(np, "fsl,sec1.0"))
2855 priv->features |= TALITOS_FTR_SEC1;
2857 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2858 priv->reg_deu = priv->reg + TALITOS12_DEU;
2859 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2860 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2861 stride = TALITOS1_CH_STRIDE;
2862 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2863 priv->reg_deu = priv->reg + TALITOS10_DEU;
2864 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2865 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2866 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2867 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2868 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2869 stride = TALITOS1_CH_STRIDE;
2871 priv->reg_deu = priv->reg + TALITOS2_DEU;
2872 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2873 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2874 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2875 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2876 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2877 priv->reg_keu = priv->reg + TALITOS2_KEU;
2878 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2879 stride = TALITOS2_CH_STRIDE;
2882 err = talitos_probe_irq(ofdev);
2886 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2887 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2888 (unsigned long)dev);
2890 if (!priv->irq[1]) {
2891 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2892 (unsigned long)dev);
2894 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2895 (unsigned long)dev);
2896 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2897 (unsigned long)dev);
2901 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2902 priv->num_channels, GFP_KERNEL);
2904 dev_err(dev, "failed to allocate channel management space\n");
2909 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2911 for (i = 0; i < priv->num_channels; i++) {
2912 priv->chan[i].reg = priv->reg + stride * (i + 1);
2913 if (!priv->irq[1] || !(i & 1))
2914 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2916 spin_lock_init(&priv->chan[i].head_lock);
2917 spin_lock_init(&priv->chan[i].tail_lock);
2919 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2920 priv->fifo_len, GFP_KERNEL);
2921 if (!priv->chan[i].fifo) {
2922 dev_err(dev, "failed to allocate request fifo %d\n", i);
2927 atomic_set(&priv->chan[i].submit_count,
2928 -(priv->chfifo_len - 1));
2931 dma_set_mask(dev, DMA_BIT_MASK(36));
2933 /* reset and initialize the h/w */
2934 err = init_device(dev);
2936 dev_err(dev, "failed to initialize device\n");
2940 /* register the RNG, if available */
2941 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2942 err = talitos_register_rng(dev);
2944 dev_err(dev, "failed to register hwrng: %d\n", err);
2947 dev_info(dev, "hwrng\n");
2950 /* register crypto algorithms the device supports */
2951 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2952 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2953 struct talitos_crypto_alg *t_alg;
2954 struct crypto_alg *alg = NULL;
2956 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2957 if (IS_ERR(t_alg)) {
2958 err = PTR_ERR(t_alg);
2959 if (err == -ENOTSUPP)
2964 switch (t_alg->algt.type) {
2965 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2966 err = crypto_register_alg(
2967 &t_alg->algt.alg.crypto);
2968 alg = &t_alg->algt.alg.crypto;
2971 case CRYPTO_ALG_TYPE_AEAD:
2972 err = crypto_register_aead(
2973 &t_alg->algt.alg.aead);
2974 alg = &t_alg->algt.alg.aead.base;
2977 case CRYPTO_ALG_TYPE_AHASH:
2978 err = crypto_register_ahash(
2979 &t_alg->algt.alg.hash);
2980 alg = &t_alg->algt.alg.hash.halg.base;
2984 dev_err(dev, "%s alg registration failed\n",
2985 alg->cra_driver_name);
2988 list_add_tail(&t_alg->entry, &priv->alg_list);
2991 if (!list_empty(&priv->alg_list))
2992 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2993 (char *)of_get_property(np, "compatible", NULL));
2998 talitos_remove(ofdev);
3003 static const struct of_device_id talitos_match[] = {
3004 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3006 .compatible = "fsl,sec1.0",
3009 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3011 .compatible = "fsl,sec2.0",
3016 MODULE_DEVICE_TABLE(of, talitos_match);
3018 static struct platform_driver talitos_driver = {
3021 .of_match_table = talitos_match,
3023 .probe = talitos_probe,
3024 .remove = talitos_remove,
3027 module_platform_driver(talitos_driver);
3029 MODULE_LICENSE("GPL");
3030 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3031 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");