Merge tag 'ktest-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / ccp / ccp-dev.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2013 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu_device_id.h>
24 #include <linux/ccp.h>
25
26 #include "ccp-dev.h"
27
28 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
29 MODULE_LICENSE("GPL");
30 MODULE_VERSION("1.0.0");
31 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
32
33 struct ccp_tasklet_data {
34         struct completion completion;
35         struct ccp_cmd *cmd;
36 };
37
38
39 static struct ccp_device *ccp_dev;
40 static inline struct ccp_device *ccp_get_device(void)
41 {
42         return ccp_dev;
43 }
44
45 static inline void ccp_add_device(struct ccp_device *ccp)
46 {
47         ccp_dev = ccp;
48 }
49
50 static inline void ccp_del_device(struct ccp_device *ccp)
51 {
52         ccp_dev = NULL;
53 }
54
55 /**
56  * ccp_enqueue_cmd - queue an operation for processing by the CCP
57  *
58  * @cmd: ccp_cmd struct to be processed
59  *
60  * Queue a cmd to be processed by the CCP. If queueing the cmd
61  * would exceed the defined length of the cmd queue the cmd will
62  * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
63  * result in a return code of -EBUSY.
64  *
65  * The callback routine specified in the ccp_cmd struct will be
66  * called to notify the caller of completion (if the cmd was not
67  * backlogged) or advancement out of the backlog. If the cmd has
68  * advanced out of the backlog the "err" value of the callback
69  * will be -EINPROGRESS. Any other "err" value during callback is
70  * the result of the operation.
71  *
72  * The cmd has been successfully queued if:
73  *   the return code is -EINPROGRESS or
74  *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
75  */
76 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
77 {
78         struct ccp_device *ccp = ccp_get_device();
79         unsigned long flags;
80         unsigned int i;
81         int ret;
82
83         if (!ccp)
84                 return -ENODEV;
85
86         /* Caller must supply a callback routine */
87         if (!cmd->callback)
88                 return -EINVAL;
89
90         cmd->ccp = ccp;
91
92         spin_lock_irqsave(&ccp->cmd_lock, flags);
93
94         i = ccp->cmd_q_count;
95
96         if (ccp->cmd_count >= MAX_CMD_QLEN) {
97                 ret = -EBUSY;
98                 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
99                         list_add_tail(&cmd->entry, &ccp->backlog);
100         } else {
101                 ret = -EINPROGRESS;
102                 ccp->cmd_count++;
103                 list_add_tail(&cmd->entry, &ccp->cmd);
104
105                 /* Find an idle queue */
106                 if (!ccp->suspending) {
107                         for (i = 0; i < ccp->cmd_q_count; i++) {
108                                 if (ccp->cmd_q[i].active)
109                                         continue;
110
111                                 break;
112                         }
113                 }
114         }
115
116         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
117
118         /* If we found an idle queue, wake it up */
119         if (i < ccp->cmd_q_count)
120                 wake_up_process(ccp->cmd_q[i].kthread);
121
122         return ret;
123 }
124 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
125
126 static void ccp_do_cmd_backlog(struct work_struct *work)
127 {
128         struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
129         struct ccp_device *ccp = cmd->ccp;
130         unsigned long flags;
131         unsigned int i;
132
133         cmd->callback(cmd->data, -EINPROGRESS);
134
135         spin_lock_irqsave(&ccp->cmd_lock, flags);
136
137         ccp->cmd_count++;
138         list_add_tail(&cmd->entry, &ccp->cmd);
139
140         /* Find an idle queue */
141         for (i = 0; i < ccp->cmd_q_count; i++) {
142                 if (ccp->cmd_q[i].active)
143                         continue;
144
145                 break;
146         }
147
148         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
149
150         /* If we found an idle queue, wake it up */
151         if (i < ccp->cmd_q_count)
152                 wake_up_process(ccp->cmd_q[i].kthread);
153 }
154
155 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
156 {
157         struct ccp_device *ccp = cmd_q->ccp;
158         struct ccp_cmd *cmd = NULL;
159         struct ccp_cmd *backlog = NULL;
160         unsigned long flags;
161
162         spin_lock_irqsave(&ccp->cmd_lock, flags);
163
164         cmd_q->active = 0;
165
166         if (ccp->suspending) {
167                 cmd_q->suspended = 1;
168
169                 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
170                 wake_up_interruptible(&ccp->suspend_queue);
171
172                 return NULL;
173         }
174
175         if (ccp->cmd_count) {
176                 cmd_q->active = 1;
177
178                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
179                 list_del(&cmd->entry);
180
181                 ccp->cmd_count--;
182         }
183
184         if (!list_empty(&ccp->backlog)) {
185                 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
186                                            entry);
187                 list_del(&backlog->entry);
188         }
189
190         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
191
192         if (backlog) {
193                 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
194                 schedule_work(&backlog->work);
195         }
196
197         return cmd;
198 }
199
200 static void ccp_do_cmd_complete(unsigned long data)
201 {
202         struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
203         struct ccp_cmd *cmd = tdata->cmd;
204
205         cmd->callback(cmd->data, cmd->ret);
206         complete(&tdata->completion);
207 }
208
209 static int ccp_cmd_queue_thread(void *data)
210 {
211         struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
212         struct ccp_cmd *cmd;
213         struct ccp_tasklet_data tdata;
214         struct tasklet_struct tasklet;
215
216         tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
217
218         set_current_state(TASK_INTERRUPTIBLE);
219         while (!kthread_should_stop()) {
220                 schedule();
221
222                 set_current_state(TASK_INTERRUPTIBLE);
223
224                 cmd = ccp_dequeue_cmd(cmd_q);
225                 if (!cmd)
226                         continue;
227
228                 __set_current_state(TASK_RUNNING);
229
230                 /* Execute the command */
231                 cmd->ret = ccp_run_cmd(cmd_q, cmd);
232
233                 /* Schedule the completion callback */
234                 tdata.cmd = cmd;
235                 init_completion(&tdata.completion);
236                 tasklet_schedule(&tasklet);
237                 wait_for_completion(&tdata.completion);
238         }
239
240         __set_current_state(TASK_RUNNING);
241
242         return 0;
243 }
244
245 static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
246 {
247         struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
248         u32 trng_value;
249         int len = min_t(int, sizeof(trng_value), max);
250
251         /*
252          * Locking is provided by the caller so we can update device
253          * hwrng-related fields safely
254          */
255         trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
256         if (!trng_value) {
257                 /* Zero is returned if not data is available or if a
258                  * bad-entropy error is present. Assume an error if
259                  * we exceed TRNG_RETRIES reads of zero.
260                  */
261                 if (ccp->hwrng_retries++ > TRNG_RETRIES)
262                         return -EIO;
263
264                 return 0;
265         }
266
267         /* Reset the counter and save the rng value */
268         ccp->hwrng_retries = 0;
269         memcpy(data, &trng_value, len);
270
271         return len;
272 }
273
274 /**
275  * ccp_alloc_struct - allocate and initialize the ccp_device struct
276  *
277  * @dev: device struct of the CCP
278  */
279 struct ccp_device *ccp_alloc_struct(struct device *dev)
280 {
281         struct ccp_device *ccp;
282
283         ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
284         if (ccp == NULL) {
285                 dev_err(dev, "unable to allocate device struct\n");
286                 return NULL;
287         }
288         ccp->dev = dev;
289
290         INIT_LIST_HEAD(&ccp->cmd);
291         INIT_LIST_HEAD(&ccp->backlog);
292
293         spin_lock_init(&ccp->cmd_lock);
294         mutex_init(&ccp->req_mutex);
295         mutex_init(&ccp->ksb_mutex);
296         ccp->ksb_count = KSB_COUNT;
297         ccp->ksb_start = 0;
298
299         return ccp;
300 }
301
302 /**
303  * ccp_init - initialize the CCP device
304  *
305  * @ccp: ccp_device struct
306  */
307 int ccp_init(struct ccp_device *ccp)
308 {
309         struct device *dev = ccp->dev;
310         struct ccp_cmd_queue *cmd_q;
311         struct dma_pool *dma_pool;
312         char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
313         unsigned int qmr, qim, i;
314         int ret;
315
316         /* Find available queues */
317         qim = 0;
318         qmr = ioread32(ccp->io_regs + Q_MASK_REG);
319         for (i = 0; i < MAX_HW_QUEUES; i++) {
320                 if (!(qmr & (1 << i)))
321                         continue;
322
323                 /* Allocate a dma pool for this queue */
324                 snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
325                 dma_pool = dma_pool_create(dma_pool_name, dev,
326                                            CCP_DMAPOOL_MAX_SIZE,
327                                            CCP_DMAPOOL_ALIGN, 0);
328                 if (!dma_pool) {
329                         dev_err(dev, "unable to allocate dma pool\n");
330                         ret = -ENOMEM;
331                         goto e_pool;
332                 }
333
334                 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
335                 ccp->cmd_q_count++;
336
337                 cmd_q->ccp = ccp;
338                 cmd_q->id = i;
339                 cmd_q->dma_pool = dma_pool;
340
341                 /* Reserve 2 KSB regions for the queue */
342                 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
343                 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
344                 ccp->ksb_count -= 2;
345
346                 /* Preset some register values and masks that are queue
347                  * number dependent
348                  */
349                 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
350                                     (CMD_Q_STATUS_INCR * i);
351                 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
352                                         (CMD_Q_STATUS_INCR * i);
353                 cmd_q->int_ok = 1 << (i * 2);
354                 cmd_q->int_err = 1 << ((i * 2) + 1);
355
356                 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
357
358                 init_waitqueue_head(&cmd_q->int_queue);
359
360                 /* Build queue interrupt mask (two interrupts per queue) */
361                 qim |= cmd_q->int_ok | cmd_q->int_err;
362
363                 dev_dbg(dev, "queue #%u available\n", i);
364         }
365         if (ccp->cmd_q_count == 0) {
366                 dev_notice(dev, "no command queues available\n");
367                 ret = -EIO;
368                 goto e_pool;
369         }
370         dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
371
372         /* Disable and clear interrupts until ready */
373         iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
374         for (i = 0; i < ccp->cmd_q_count; i++) {
375                 cmd_q = &ccp->cmd_q[i];
376
377                 ioread32(cmd_q->reg_int_status);
378                 ioread32(cmd_q->reg_status);
379         }
380         iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
381
382         /* Request an irq */
383         ret = ccp->get_irq(ccp);
384         if (ret) {
385                 dev_err(dev, "unable to allocate an IRQ\n");
386                 goto e_pool;
387         }
388
389         /* Initialize the queues used to wait for KSB space and suspend */
390         init_waitqueue_head(&ccp->ksb_queue);
391         init_waitqueue_head(&ccp->suspend_queue);
392
393         /* Create a kthread for each queue */
394         for (i = 0; i < ccp->cmd_q_count; i++) {
395                 struct task_struct *kthread;
396
397                 cmd_q = &ccp->cmd_q[i];
398
399                 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
400                                          "ccp-q%u", cmd_q->id);
401                 if (IS_ERR(kthread)) {
402                         dev_err(dev, "error creating queue thread (%ld)\n",
403                                 PTR_ERR(kthread));
404                         ret = PTR_ERR(kthread);
405                         goto e_kthread;
406                 }
407
408                 cmd_q->kthread = kthread;
409                 wake_up_process(kthread);
410         }
411
412         /* Register the RNG */
413         ccp->hwrng.name = "ccp-rng";
414         ccp->hwrng.read = ccp_trng_read;
415         ret = hwrng_register(&ccp->hwrng);
416         if (ret) {
417                 dev_err(dev, "error registering hwrng (%d)\n", ret);
418                 goto e_kthread;
419         }
420
421         /* Make the device struct available before enabling interrupts */
422         ccp_add_device(ccp);
423
424         /* Enable interrupts */
425         iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
426
427         return 0;
428
429 e_kthread:
430         for (i = 0; i < ccp->cmd_q_count; i++)
431                 if (ccp->cmd_q[i].kthread)
432                         kthread_stop(ccp->cmd_q[i].kthread);
433
434         ccp->free_irq(ccp);
435
436 e_pool:
437         for (i = 0; i < ccp->cmd_q_count; i++)
438                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
439
440         return ret;
441 }
442
443 /**
444  * ccp_destroy - tear down the CCP device
445  *
446  * @ccp: ccp_device struct
447  */
448 void ccp_destroy(struct ccp_device *ccp)
449 {
450         struct ccp_cmd_queue *cmd_q;
451         struct ccp_cmd *cmd;
452         unsigned int qim, i;
453
454         /* Remove general access to the device struct */
455         ccp_del_device(ccp);
456
457         /* Unregister the RNG */
458         hwrng_unregister(&ccp->hwrng);
459
460         /* Stop the queue kthreads */
461         for (i = 0; i < ccp->cmd_q_count; i++)
462                 if (ccp->cmd_q[i].kthread)
463                         kthread_stop(ccp->cmd_q[i].kthread);
464
465         /* Build queue interrupt mask (two interrupt masks per queue) */
466         qim = 0;
467         for (i = 0; i < ccp->cmd_q_count; i++) {
468                 cmd_q = &ccp->cmd_q[i];
469                 qim |= cmd_q->int_ok | cmd_q->int_err;
470         }
471
472         /* Disable and clear interrupts */
473         iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
474         for (i = 0; i < ccp->cmd_q_count; i++) {
475                 cmd_q = &ccp->cmd_q[i];
476
477                 ioread32(cmd_q->reg_int_status);
478                 ioread32(cmd_q->reg_status);
479         }
480         iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
481
482         ccp->free_irq(ccp);
483
484         for (i = 0; i < ccp->cmd_q_count; i++)
485                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
486
487         /* Flush the cmd and backlog queue */
488         while (!list_empty(&ccp->cmd)) {
489                 /* Invoke the callback directly with an error code */
490                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
491                 list_del(&cmd->entry);
492                 cmd->callback(cmd->data, -ENODEV);
493         }
494         while (!list_empty(&ccp->backlog)) {
495                 /* Invoke the callback directly with an error code */
496                 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
497                 list_del(&cmd->entry);
498                 cmd->callback(cmd->data, -ENODEV);
499         }
500 }
501
502 /**
503  * ccp_irq_handler - handle interrupts generated by the CCP device
504  *
505  * @irq: the irq associated with the interrupt
506  * @data: the data value supplied when the irq was created
507  */
508 irqreturn_t ccp_irq_handler(int irq, void *data)
509 {
510         struct device *dev = data;
511         struct ccp_device *ccp = dev_get_drvdata(dev);
512         struct ccp_cmd_queue *cmd_q;
513         u32 q_int, status;
514         unsigned int i;
515
516         status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
517
518         for (i = 0; i < ccp->cmd_q_count; i++) {
519                 cmd_q = &ccp->cmd_q[i];
520
521                 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
522                 if (q_int) {
523                         cmd_q->int_status = status;
524                         cmd_q->q_status = ioread32(cmd_q->reg_status);
525                         cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
526
527                         /* On error, only save the first error value */
528                         if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
529                                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
530
531                         cmd_q->int_rcvd = 1;
532
533                         /* Acknowledge the interrupt and wake the kthread */
534                         iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
535                         wake_up_interruptible(&cmd_q->int_queue);
536                 }
537         }
538
539         return IRQ_HANDLED;
540 }
541
542 #ifdef CONFIG_PM
543 bool ccp_queues_suspended(struct ccp_device *ccp)
544 {
545         unsigned int suspended = 0;
546         unsigned long flags;
547         unsigned int i;
548
549         spin_lock_irqsave(&ccp->cmd_lock, flags);
550
551         for (i = 0; i < ccp->cmd_q_count; i++)
552                 if (ccp->cmd_q[i].suspended)
553                         suspended++;
554
555         spin_unlock_irqrestore(&ccp->cmd_lock, flags);
556
557         return ccp->cmd_q_count == suspended;
558 }
559 #endif
560
561 static const struct x86_cpu_id ccp_support[] = {
562         { X86_VENDOR_AMD, 22, },
563 };
564
565 static int __init ccp_mod_init(void)
566 {
567         struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
568         int ret;
569
570         if (!x86_match_cpu(ccp_support))
571                 return -ENODEV;
572
573         switch (cpuinfo->x86) {
574         case 22:
575                 if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
576                         return -ENODEV;
577
578                 ret = ccp_pci_init();
579                 if (ret)
580                         return ret;
581
582                 /* Don't leave the driver loaded if init failed */
583                 if (!ccp_get_device()) {
584                         ccp_pci_exit();
585                         return -ENODEV;
586                 }
587
588                 return 0;
589
590                 break;
591         }
592
593         return -ENODEV;
594 }
595
596 static void __exit ccp_mod_exit(void)
597 {
598         struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
599
600         switch (cpuinfo->x86) {
601         case 22:
602                 ccp_pci_exit();
603                 break;
604         }
605 }
606
607 module_init(ccp_mod_init);
608 module_exit(ccp_mod_exit);