Merge branch 'fortglx/3.15/time' of git://git.linaro.org/people/john.stultz/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / ccp / ccp-dev.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2013 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu_device_id.h>
24 #include <linux/ccp.h>
25
26 #include "ccp-dev.h"
27
28 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
29 MODULE_LICENSE("GPL");
30 MODULE_VERSION("1.0.0");
31 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
32
33
34 static struct ccp_device *ccp_dev;
35 static inline struct ccp_device *ccp_get_device(void)
36 {
37         return ccp_dev;
38 }
39
40 static inline void ccp_add_device(struct ccp_device *ccp)
41 {
42         ccp_dev = ccp;
43 }
44
45 static inline void ccp_del_device(struct ccp_device *ccp)
46 {
47         ccp_dev = NULL;
48 }
49
50 /**
51  * ccp_enqueue_cmd - queue an operation for processing by the CCP
52  *
53  * @cmd: ccp_cmd struct to be processed
54  *
55  * Queue a cmd to be processed by the CCP. If queueing the cmd
56  * would exceed the defined length of the cmd queue the cmd will
57  * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
58  * result in a return code of -EBUSY.
59  *
60  * The callback routine specified in the ccp_cmd struct will be
61  * called to notify the caller of completion (if the cmd was not
62  * backlogged) or advancement out of the backlog. If the cmd has
63  * advanced out of the backlog the "err" value of the callback
64  * will be -EINPROGRESS. Any other "err" value during callback is
65  * the result of the operation.
66  *
67  * The cmd has been successfully queued if:
68  *   the return code is -EINPROGRESS or
69  *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
70  */
71 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
72 {
73         struct ccp_device *ccp = ccp_get_device();
74         unsigned long flags;
75         unsigned int i;
76         int ret;
77
78         if (!ccp)
79                 return -ENODEV;
80
81         /* Caller must supply a callback routine */
82         if (!cmd->callback)
83                 return -EINVAL;
84
85         cmd->ccp = ccp;
86
87         spin_lock_irqsave(&ccp->cmd_lock, flags);
88
89         i = ccp->cmd_q_count;
90
91         if (ccp->cmd_count >= MAX_CMD_QLEN) {
92                 ret = -EBUSY;
93                 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
94                         list_add_tail(&cmd->entry, &ccp->backlog);
95         } else {
96                 ret = -EINPROGRESS;
97                 ccp->cmd_count++;
98                 list_add_tail(&cmd->entry, &ccp->cmd);
99
100                 /* Find an idle queue */
101                 if (!ccp->suspending) {
102                         for (i = 0; i < ccp->cmd_q_count; i++) {
103                                 if (ccp->cmd_q[i].active)
104                                         continue;
105
106                                 break;
107                         }
108                 }
109         }
110
111         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
112
113         /* If we found an idle queue, wake it up */
114         if (i < ccp->cmd_q_count)
115                 wake_up_process(ccp->cmd_q[i].kthread);
116
117         return ret;
118 }
119 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
120
121 static void ccp_do_cmd_backlog(struct work_struct *work)
122 {
123         struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
124         struct ccp_device *ccp = cmd->ccp;
125         unsigned long flags;
126         unsigned int i;
127
128         cmd->callback(cmd->data, -EINPROGRESS);
129
130         spin_lock_irqsave(&ccp->cmd_lock, flags);
131
132         ccp->cmd_count++;
133         list_add_tail(&cmd->entry, &ccp->cmd);
134
135         /* Find an idle queue */
136         for (i = 0; i < ccp->cmd_q_count; i++) {
137                 if (ccp->cmd_q[i].active)
138                         continue;
139
140                 break;
141         }
142
143         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
144
145         /* If we found an idle queue, wake it up */
146         if (i < ccp->cmd_q_count)
147                 wake_up_process(ccp->cmd_q[i].kthread);
148 }
149
150 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
151 {
152         struct ccp_device *ccp = cmd_q->ccp;
153         struct ccp_cmd *cmd = NULL;
154         struct ccp_cmd *backlog = NULL;
155         unsigned long flags;
156
157         spin_lock_irqsave(&ccp->cmd_lock, flags);
158
159         cmd_q->active = 0;
160
161         if (ccp->suspending) {
162                 cmd_q->suspended = 1;
163
164                 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
165                 wake_up_interruptible(&ccp->suspend_queue);
166
167                 return NULL;
168         }
169
170         if (ccp->cmd_count) {
171                 cmd_q->active = 1;
172
173                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
174                 list_del(&cmd->entry);
175
176                 ccp->cmd_count--;
177         }
178
179         if (!list_empty(&ccp->backlog)) {
180                 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
181                                            entry);
182                 list_del(&backlog->entry);
183         }
184
185         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
186
187         if (backlog) {
188                 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
189                 schedule_work(&backlog->work);
190         }
191
192         return cmd;
193 }
194
195 static void ccp_do_cmd_complete(struct work_struct *work)
196 {
197         struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
198
199         cmd->callback(cmd->data, cmd->ret);
200 }
201
202 static int ccp_cmd_queue_thread(void *data)
203 {
204         struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
205         struct ccp_cmd *cmd;
206
207         set_current_state(TASK_INTERRUPTIBLE);
208         while (!kthread_should_stop()) {
209                 schedule();
210
211                 set_current_state(TASK_INTERRUPTIBLE);
212
213                 cmd = ccp_dequeue_cmd(cmd_q);
214                 if (!cmd)
215                         continue;
216
217                 __set_current_state(TASK_RUNNING);
218
219                 /* Execute the command */
220                 cmd->ret = ccp_run_cmd(cmd_q, cmd);
221
222                 /* Schedule the completion callback */
223                 INIT_WORK(&cmd->work, ccp_do_cmd_complete);
224                 schedule_work(&cmd->work);
225         }
226
227         __set_current_state(TASK_RUNNING);
228
229         return 0;
230 }
231
232 static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
233 {
234         struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
235         u32 trng_value;
236         int len = min_t(int, sizeof(trng_value), max);
237
238         /*
239          * Locking is provided by the caller so we can update device
240          * hwrng-related fields safely
241          */
242         trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
243         if (!trng_value) {
244                 /* Zero is returned if not data is available or if a
245                  * bad-entropy error is present. Assume an error if
246                  * we exceed TRNG_RETRIES reads of zero.
247                  */
248                 if (ccp->hwrng_retries++ > TRNG_RETRIES)
249                         return -EIO;
250
251                 return 0;
252         }
253
254         /* Reset the counter and save the rng value */
255         ccp->hwrng_retries = 0;
256         memcpy(data, &trng_value, len);
257
258         return len;
259 }
260
261 /**
262  * ccp_alloc_struct - allocate and initialize the ccp_device struct
263  *
264  * @dev: device struct of the CCP
265  */
266 struct ccp_device *ccp_alloc_struct(struct device *dev)
267 {
268         struct ccp_device *ccp;
269
270         ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
271         if (ccp == NULL) {
272                 dev_err(dev, "unable to allocate device struct\n");
273                 return NULL;
274         }
275         ccp->dev = dev;
276
277         INIT_LIST_HEAD(&ccp->cmd);
278         INIT_LIST_HEAD(&ccp->backlog);
279
280         spin_lock_init(&ccp->cmd_lock);
281         mutex_init(&ccp->req_mutex);
282         mutex_init(&ccp->ksb_mutex);
283         ccp->ksb_count = KSB_COUNT;
284         ccp->ksb_start = 0;
285
286         return ccp;
287 }
288
289 /**
290  * ccp_init - initialize the CCP device
291  *
292  * @ccp: ccp_device struct
293  */
294 int ccp_init(struct ccp_device *ccp)
295 {
296         struct device *dev = ccp->dev;
297         struct ccp_cmd_queue *cmd_q;
298         struct dma_pool *dma_pool;
299         char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
300         unsigned int qmr, qim, i;
301         int ret;
302
303         /* Find available queues */
304         qim = 0;
305         qmr = ioread32(ccp->io_regs + Q_MASK_REG);
306         for (i = 0; i < MAX_HW_QUEUES; i++) {
307                 if (!(qmr & (1 << i)))
308                         continue;
309
310                 /* Allocate a dma pool for this queue */
311                 snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
312                 dma_pool = dma_pool_create(dma_pool_name, dev,
313                                            CCP_DMAPOOL_MAX_SIZE,
314                                            CCP_DMAPOOL_ALIGN, 0);
315                 if (!dma_pool) {
316                         dev_err(dev, "unable to allocate dma pool\n");
317                         ret = -ENOMEM;
318                         goto e_pool;
319                 }
320
321                 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
322                 ccp->cmd_q_count++;
323
324                 cmd_q->ccp = ccp;
325                 cmd_q->id = i;
326                 cmd_q->dma_pool = dma_pool;
327
328                 /* Reserve 2 KSB regions for the queue */
329                 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
330                 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
331                 ccp->ksb_count -= 2;
332
333                 /* Preset some register values and masks that are queue
334                  * number dependent
335                  */
336                 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
337                                     (CMD_Q_STATUS_INCR * i);
338                 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
339                                         (CMD_Q_STATUS_INCR * i);
340                 cmd_q->int_ok = 1 << (i * 2);
341                 cmd_q->int_err = 1 << ((i * 2) + 1);
342
343                 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
344
345                 init_waitqueue_head(&cmd_q->int_queue);
346
347                 /* Build queue interrupt mask (two interrupts per queue) */
348                 qim |= cmd_q->int_ok | cmd_q->int_err;
349
350                 dev_dbg(dev, "queue #%u available\n", i);
351         }
352         if (ccp->cmd_q_count == 0) {
353                 dev_notice(dev, "no command queues available\n");
354                 ret = -EIO;
355                 goto e_pool;
356         }
357         dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
358
359         /* Disable and clear interrupts until ready */
360         iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
361         for (i = 0; i < ccp->cmd_q_count; i++) {
362                 cmd_q = &ccp->cmd_q[i];
363
364                 ioread32(cmd_q->reg_int_status);
365                 ioread32(cmd_q->reg_status);
366         }
367         iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
368
369         /* Request an irq */
370         ret = ccp->get_irq(ccp);
371         if (ret) {
372                 dev_err(dev, "unable to allocate an IRQ\n");
373                 goto e_pool;
374         }
375
376         /* Initialize the queues used to wait for KSB space and suspend */
377         init_waitqueue_head(&ccp->ksb_queue);
378         init_waitqueue_head(&ccp->suspend_queue);
379
380         /* Create a kthread for each queue */
381         for (i = 0; i < ccp->cmd_q_count; i++) {
382                 struct task_struct *kthread;
383
384                 cmd_q = &ccp->cmd_q[i];
385
386                 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
387                                          "ccp-q%u", cmd_q->id);
388                 if (IS_ERR(kthread)) {
389                         dev_err(dev, "error creating queue thread (%ld)\n",
390                                 PTR_ERR(kthread));
391                         ret = PTR_ERR(kthread);
392                         goto e_kthread;
393                 }
394
395                 cmd_q->kthread = kthread;
396                 wake_up_process(kthread);
397         }
398
399         /* Register the RNG */
400         ccp->hwrng.name = "ccp-rng";
401         ccp->hwrng.read = ccp_trng_read;
402         ret = hwrng_register(&ccp->hwrng);
403         if (ret) {
404                 dev_err(dev, "error registering hwrng (%d)\n", ret);
405                 goto e_kthread;
406         }
407
408         /* Make the device struct available before enabling interrupts */
409         ccp_add_device(ccp);
410
411         /* Enable interrupts */
412         iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
413
414         return 0;
415
416 e_kthread:
417         for (i = 0; i < ccp->cmd_q_count; i++)
418                 if (ccp->cmd_q[i].kthread)
419                         kthread_stop(ccp->cmd_q[i].kthread);
420
421         ccp->free_irq(ccp);
422
423 e_pool:
424         for (i = 0; i < ccp->cmd_q_count; i++)
425                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
426
427         return ret;
428 }
429
430 /**
431  * ccp_destroy - tear down the CCP device
432  *
433  * @ccp: ccp_device struct
434  */
435 void ccp_destroy(struct ccp_device *ccp)
436 {
437         struct ccp_cmd_queue *cmd_q;
438         struct ccp_cmd *cmd;
439         unsigned int qim, i;
440
441         /* Remove general access to the device struct */
442         ccp_del_device(ccp);
443
444         /* Unregister the RNG */
445         hwrng_unregister(&ccp->hwrng);
446
447         /* Stop the queue kthreads */
448         for (i = 0; i < ccp->cmd_q_count; i++)
449                 if (ccp->cmd_q[i].kthread)
450                         kthread_stop(ccp->cmd_q[i].kthread);
451
452         /* Build queue interrupt mask (two interrupt masks per queue) */
453         qim = 0;
454         for (i = 0; i < ccp->cmd_q_count; i++) {
455                 cmd_q = &ccp->cmd_q[i];
456                 qim |= cmd_q->int_ok | cmd_q->int_err;
457         }
458
459         /* Disable and clear interrupts */
460         iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
461         for (i = 0; i < ccp->cmd_q_count; i++) {
462                 cmd_q = &ccp->cmd_q[i];
463
464                 ioread32(cmd_q->reg_int_status);
465                 ioread32(cmd_q->reg_status);
466         }
467         iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
468
469         ccp->free_irq(ccp);
470
471         for (i = 0; i < ccp->cmd_q_count; i++)
472                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
473
474         /* Flush the cmd and backlog queue */
475         while (!list_empty(&ccp->cmd)) {
476                 /* Invoke the callback directly with an error code */
477                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
478                 list_del(&cmd->entry);
479                 cmd->callback(cmd->data, -ENODEV);
480         }
481         while (!list_empty(&ccp->backlog)) {
482                 /* Invoke the callback directly with an error code */
483                 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
484                 list_del(&cmd->entry);
485                 cmd->callback(cmd->data, -ENODEV);
486         }
487 }
488
489 /**
490  * ccp_irq_handler - handle interrupts generated by the CCP device
491  *
492  * @irq: the irq associated with the interrupt
493  * @data: the data value supplied when the irq was created
494  */
495 irqreturn_t ccp_irq_handler(int irq, void *data)
496 {
497         struct device *dev = data;
498         struct ccp_device *ccp = dev_get_drvdata(dev);
499         struct ccp_cmd_queue *cmd_q;
500         u32 q_int, status;
501         unsigned int i;
502
503         status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
504
505         for (i = 0; i < ccp->cmd_q_count; i++) {
506                 cmd_q = &ccp->cmd_q[i];
507
508                 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
509                 if (q_int) {
510                         cmd_q->int_status = status;
511                         cmd_q->q_status = ioread32(cmd_q->reg_status);
512                         cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
513
514                         /* On error, only save the first error value */
515                         if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
516                                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
517
518                         cmd_q->int_rcvd = 1;
519
520                         /* Acknowledge the interrupt and wake the kthread */
521                         iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
522                         wake_up_interruptible(&cmd_q->int_queue);
523                 }
524         }
525
526         return IRQ_HANDLED;
527 }
528
529 #ifdef CONFIG_PM
530 bool ccp_queues_suspended(struct ccp_device *ccp)
531 {
532         unsigned int suspended = 0;
533         unsigned long flags;
534         unsigned int i;
535
536         spin_lock_irqsave(&ccp->cmd_lock, flags);
537
538         for (i = 0; i < ccp->cmd_q_count; i++)
539                 if (ccp->cmd_q[i].suspended)
540                         suspended++;
541
542         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
543
544         return ccp->cmd_q_count == suspended;
545 }
546 #endif
547
548 static const struct x86_cpu_id ccp_support[] = {
549         { X86_VENDOR_AMD, 22, },
550 };
551
552 static int __init ccp_mod_init(void)
553 {
554         struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
555         int ret;
556
557         if (!x86_match_cpu(ccp_support))
558                 return -ENODEV;
559
560         switch (cpuinfo->x86) {
561         case 22:
562                 if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
563                         return -ENODEV;
564
565                 ret = ccp_pci_init();
566                 if (ret)
567                         return ret;
568
569                 /* Don't leave the driver loaded if init failed */
570                 if (!ccp_get_device()) {
571                         ccp_pci_exit();
572                         return -ENODEV;
573                 }
574
575                 return 0;
576
577                 break;
578         }
579
580         return -ENODEV;
581 }
582
583 static void __exit ccp_mod_exit(void)
584 {
585         struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
586
587         switch (cpuinfo->x86) {
588         case 22:
589                 ccp_pci_exit();
590                 break;
591         }
592 }
593
594 module_init(ccp_mod_init);
595 module_exit(ccp_mod_exit);