Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / sep / sep_main.c
1 /*
2  *
3  *  sep_main.c - Security Processor Driver main group of functions
4  *
5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  *  Contributions(c) 2009-2011 Discretix. All rights reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms of the GNU General Public License as published by the Free
10  *  Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  You should have received a copy of the GNU General Public License along with
18  *  this program; if not, write to the Free Software Foundation, Inc., 59
19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  *
21  *  CONTACTS:
22  *
23  *  Mark Allyn          mark.a.allyn@intel.com
24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
25  *
26  *  CHANGES:
27  *
28  *  2009.06.26  Initial publish
29  *  2010.09.14  Upgrade to Medfield
30  *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
31  *  2011.02.22  Enable kernel crypto operation
32  *
33  *  Please note that this driver is based on information in the Discretix
34  *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35  *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36  *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37  *  Overview and Integration Guide.
38  */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
46 #include <linux/fs.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
51 #include <linux/mm.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
60 #include <linux/io.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
75
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
79 #include "sep_dev.h"
80 #include "sep_crypto.h"
81
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
84
85 /*
86  * Let's not spend cycles iterating over message
87  * area contents if debugging not enabled
88  */
89 #ifdef DEBUG
90 #define sep_dump_message(sep)   _sep_dump_message(sep)
91 #else
92 #define sep_dump_message(sep)
93 #endif
94
95 /**
96  * Currently, there is only one SEP device per platform;
97  * In event platforms in the future have more than one SEP
98  * device, this will be a linked list
99  */
100
101 struct sep_device *sep_dev;
102
103 /**
104  * sep_queue_status_remove - Removes transaction from status queue
105  * @sep: SEP device
106  * @sep_queue_info: pointer to status queue
107  *
108  * This function will remove information about transaction from the queue.
109  */
110 void sep_queue_status_remove(struct sep_device *sep,
111                                       struct sep_queue_info **queue_elem)
112 {
113         unsigned long lck_flags;
114
115         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
116                 current->pid);
117
118         if (!queue_elem || !(*queue_elem)) {
119                 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120                                         current->pid, __func__);
121                 return;
122         }
123
124         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125         list_del(&(*queue_elem)->list);
126         sep->sep_queue_num--;
127         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
128
129         kfree(*queue_elem);
130         *queue_elem = NULL;
131
132         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
133                 current->pid);
134         return;
135 }
136
137 /**
138  * sep_queue_status_add - Adds transaction to status queue
139  * @sep: SEP device
140  * @opcode: transaction opcode
141  * @size: input data size
142  * @pid: pid of current process
143  * @name: current process name
144  * @name_len: length of name (current process)
145  *
146  * This function adds information about about transaction started to the status
147  * queue.
148  */
149 struct sep_queue_info *sep_queue_status_add(
150                                                 struct sep_device *sep,
151                                                 u32 opcode,
152                                                 u32 size,
153                                                 u32 pid,
154                                                 u8 *name, size_t name_len)
155 {
156         unsigned long lck_flags;
157         struct sep_queue_info *my_elem = NULL;
158
159         my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
160
161         if (!my_elem)
162                 return NULL;
163
164         dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165
166         my_elem->data.opcode = opcode;
167         my_elem->data.size = size;
168         my_elem->data.pid = pid;
169
170         if (name_len > TASK_COMM_LEN)
171                 name_len = TASK_COMM_LEN;
172
173         memcpy(&my_elem->data.name, name, name_len);
174
175         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176
177         list_add_tail(&my_elem->list, &sep->sep_queue_status);
178         sep->sep_queue_num++;
179
180         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
181
182         return my_elem;
183 }
184
185 /**
186  *      sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187  *      @sep: SEP device
188  *      @dmatables_region: Destination pointer for the buffer
189  *      @dma_ctx: DMA context for the transaction
190  *      @table_count: Number of MLLI/DMA tables to create
191  *      The buffer created will not work as-is for DMA operations,
192  *      it needs to be copied over to the appropriate place in the
193  *      shared area.
194  */
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196                                          void **dmatables_region,
197                                          struct sep_dma_context *dma_ctx,
198                                          const u32 table_count)
199 {
200         const size_t new_len =
201                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
202
203         void *tmp_region = NULL;
204
205         dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206                                 current->pid, dma_ctx);
207         dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208                                 current->pid, dmatables_region);
209
210         if (!dma_ctx || !dmatables_region) {
211                 dev_warn(&sep->pdev->dev,
212                         "[PID%d] dma context/region uninitialized\n",
213                         current->pid);
214                 return -EINVAL;
215         }
216
217         dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218                                 current->pid, new_len);
219         dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220                                 dma_ctx->dmatables_len);
221         tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
222         if (!tmp_region)
223                 return -ENOMEM;
224
225         /* Were there any previous tables that need to be preserved ? */
226         if (*dmatables_region) {
227                 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
228                 kfree(*dmatables_region);
229                 *dmatables_region = NULL;
230         }
231
232         *dmatables_region = tmp_region;
233
234         dma_ctx->dmatables_len += new_len;
235
236         return 0;
237 }
238
239 /**
240  *      sep_wait_transaction - Used for synchronizing transactions
241  *      @sep: SEP device
242  */
243 int sep_wait_transaction(struct sep_device *sep)
244 {
245         int error = 0;
246         DEFINE_WAIT(wait);
247
248         if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
249                                 &sep->in_use_flags)) {
250                 dev_dbg(&sep->pdev->dev,
251                         "[PID%d] no transactions, returning\n",
252                                 current->pid);
253                 goto end_function_setpid;
254         }
255
256         /*
257          * Looping needed even for exclusive waitq entries
258          * due to process wakeup latencies, previous process
259          * might have already created another transaction.
260          */
261         for (;;) {
262                 /*
263                  * Exclusive waitq entry, so that only one process is
264                  * woken up from the queue at a time.
265                  */
266                 prepare_to_wait_exclusive(&sep->event_transactions,
267                                           &wait,
268                                           TASK_INTERRUPTIBLE);
269                 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
270                                           &sep->in_use_flags)) {
271                         dev_dbg(&sep->pdev->dev,
272                                 "[PID%d] no transactions, breaking\n",
273                                         current->pid);
274                         break;
275                 }
276                 dev_dbg(&sep->pdev->dev,
277                         "[PID%d] transactions ongoing, sleeping\n",
278                                 current->pid);
279                 schedule();
280                 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
281
282                 if (signal_pending(current)) {
283                         dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
284                                                         current->pid);
285                         error = -EINTR;
286                         goto end_function;
287                 }
288         }
289 end_function_setpid:
290         /*
291          * The pid_doing_transaction indicates that this process
292          * now owns the facilities to perform a transaction with
293          * the SEP. While this process is performing a transaction,
294          * no other process who has the SEP device open can perform
295          * any transactions. This method allows more than one process
296          * to have the device open at any given time, which provides
297          * finer granularity for device utilization by multiple
298          * processes.
299          */
300         /* Only one process is able to progress here at a time */
301         sep->pid_doing_transaction = current->pid;
302
303 end_function:
304         finish_wait(&sep->event_transactions, &wait);
305
306         return error;
307 }
308
309 /**
310  * sep_check_transaction_owner - Checks if current process owns transaction
311  * @sep: SEP device
312  */
313 static inline int sep_check_transaction_owner(struct sep_device *sep)
314 {
315         dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
316                 current->pid,
317                 sep->pid_doing_transaction);
318
319         if ((sep->pid_doing_transaction == 0) ||
320                 (current->pid != sep->pid_doing_transaction)) {
321                 return -EACCES;
322         }
323
324         /* We own the transaction */
325         return 0;
326 }
327
328 #ifdef DEBUG
329
330 /**
331  * sep_dump_message - dump the message that is pending
332  * @sep: SEP device
333  * This will only print dump if DEBUG is set; it does
334  * follow kernel debug print enabling
335  */
336 static void _sep_dump_message(struct sep_device *sep)
337 {
338         int count;
339
340         u32 *p = sep->shared_addr;
341
342         for (count = 0; count < 10 * 4; count += 4)
343                 dev_dbg(&sep->pdev->dev,
344                         "[PID%d] Word %d of the message is %x\n",
345                                 current->pid, count/4, *p++);
346 }
347
348 #endif
349
350 /**
351  * sep_map_and_alloc_shared_area -allocate shared block
352  * @sep: security processor
353  * @size: size of shared area
354  */
355 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
356 {
357         sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
358                 sep->shared_size,
359                 &sep->shared_bus, GFP_KERNEL);
360
361         if (!sep->shared_addr) {
362                 dev_dbg(&sep->pdev->dev,
363                         "[PID%d] shared memory dma_alloc_coherent failed\n",
364                                 current->pid);
365                 return -ENOMEM;
366         }
367         dev_dbg(&sep->pdev->dev,
368                 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369                                 current->pid,
370                                 sep->shared_size, sep->shared_addr,
371                                 (unsigned long long)sep->shared_bus);
372         return 0;
373 }
374
375 /**
376  * sep_unmap_and_free_shared_area - free shared block
377  * @sep: security processor
378  */
379 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
380 {
381         dma_free_coherent(&sep->pdev->dev, sep->shared_size,
382                                 sep->shared_addr, sep->shared_bus);
383 }
384
385 #ifdef DEBUG
386
387 /**
388  * sep_shared_bus_to_virt - convert bus/virt addresses
389  * @sep: pointer to struct sep_device
390  * @bus_address: address to convert
391  *
392  * Returns virtual address inside the shared area according
393  * to the bus address.
394  */
395 static void *sep_shared_bus_to_virt(struct sep_device *sep,
396                                                 dma_addr_t bus_address)
397 {
398         return sep->shared_addr + (bus_address - sep->shared_bus);
399 }
400
401 #endif
402
403 /**
404  * sep_open - device open method
405  * @inode: inode of SEP device
406  * @filp: file handle to SEP device
407  *
408  * Open method for the SEP device. Called when userspace opens
409  * the SEP device node.
410  *
411  * Returns zero on success otherwise an error code.
412  */
413 static int sep_open(struct inode *inode, struct file *filp)
414 {
415         struct sep_device *sep;
416         struct sep_private_data *priv;
417
418         dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
419
420         if (filp->f_flags & O_NONBLOCK)
421                 return -ENOTSUPP;
422
423         /*
424          * Get the SEP device structure and use it for the
425          * private_data field in filp for other methods
426          */
427
428         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
429         if (!priv)
430                 return -ENOMEM;
431
432         sep = sep_dev;
433         priv->device = sep;
434         filp->private_data = priv;
435
436         dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
437                                         current->pid, priv);
438
439         /* Anyone can open; locking takes place at transaction level */
440         return 0;
441 }
442
443 /**
444  * sep_free_dma_table_data_handler - free DMA table
445  * @sep: pointer to struct sep_device
446  * @dma_ctx: dma context
447  *
448  * Handles the request to free DMA table for synchronic actions
449  */
450 int sep_free_dma_table_data_handler(struct sep_device *sep,
451                                            struct sep_dma_context **dma_ctx)
452 {
453         int count;
454         int dcb_counter;
455         /* Pointer to the current dma_resource struct */
456         struct sep_dma_resource *dma;
457
458         dev_dbg(&sep->pdev->dev,
459                 "[PID%d] sep_free_dma_table_data_handler\n",
460                         current->pid);
461
462         if (!dma_ctx || !(*dma_ctx)) {
463                 /* No context or context already freed */
464                 dev_dbg(&sep->pdev->dev,
465                         "[PID%d] no DMA context or context already freed\n",
466                                 current->pid);
467
468                 return 0;
469         }
470
471         dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472                                         current->pid,
473                                         (*dma_ctx)->nr_dcb_creat);
474
475         for (dcb_counter = 0;
476              dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
477                 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
478
479                 /* Unmap and free input map array */
480                 if (dma->in_map_array) {
481                         for (count = 0; count < dma->in_num_pages; count++) {
482                                 dma_unmap_page(&sep->pdev->dev,
483                                         dma->in_map_array[count].dma_addr,
484                                         dma->in_map_array[count].size,
485                                         DMA_TO_DEVICE);
486                         }
487                         kfree(dma->in_map_array);
488                 }
489
490                 /**
491                  * Output is handled different. If
492                  * this was a secure dma into restricted memory,
493                  * then we skip this step altogether as restricted
494                  * memory is not available to the o/s at all.
495                  */
496                 if (((*dma_ctx)->secure_dma == false) &&
497                         (dma->out_map_array)) {
498
499                         for (count = 0; count < dma->out_num_pages; count++) {
500                                 dma_unmap_page(&sep->pdev->dev,
501                                         dma->out_map_array[count].dma_addr,
502                                         dma->out_map_array[count].size,
503                                         DMA_FROM_DEVICE);
504                         }
505                         kfree(dma->out_map_array);
506                 }
507
508                 /* Free page cache for output */
509                 if (dma->in_page_array) {
510                         for (count = 0; count < dma->in_num_pages; count++) {
511                                 flush_dcache_page(dma->in_page_array[count]);
512                                 page_cache_release(dma->in_page_array[count]);
513                         }
514                         kfree(dma->in_page_array);
515                 }
516
517                 /* Again, we do this only for non secure dma */
518                 if (((*dma_ctx)->secure_dma == false) &&
519                         (dma->out_page_array)) {
520
521                         for (count = 0; count < dma->out_num_pages; count++) {
522                                 if (!PageReserved(dma->out_page_array[count]))
523
524                                         SetPageDirty(dma->
525                                         out_page_array[count]);
526
527                                 flush_dcache_page(dma->out_page_array[count]);
528                                 page_cache_release(dma->out_page_array[count]);
529                         }
530                         kfree(dma->out_page_array);
531                 }
532
533                 /**
534                  * Note that here we use in_map_num_entries because we
535                  * don't have a page array; the page array is generated
536                  * only in the lock_user_pages, which is not called
537                  * for kernel crypto, which is what the sg (scatter gather
538                  * is used for exclusively)
539                  */
540                 if (dma->src_sg) {
541                         dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
542                                 dma->in_map_num_entries, DMA_TO_DEVICE);
543                         dma->src_sg = NULL;
544                 }
545
546                 if (dma->dst_sg) {
547                         dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
548                                 dma->in_map_num_entries, DMA_FROM_DEVICE);
549                         dma->dst_sg = NULL;
550                 }
551
552                 /* Reset all the values */
553                 dma->in_page_array = NULL;
554                 dma->out_page_array = NULL;
555                 dma->in_num_pages = 0;
556                 dma->out_num_pages = 0;
557                 dma->in_map_array = NULL;
558                 dma->out_map_array = NULL;
559                 dma->in_map_num_entries = 0;
560                 dma->out_map_num_entries = 0;
561         }
562
563         (*dma_ctx)->nr_dcb_creat = 0;
564         (*dma_ctx)->num_lli_tables_created = 0;
565
566         kfree(*dma_ctx);
567         *dma_ctx = NULL;
568
569         dev_dbg(&sep->pdev->dev,
570                 "[PID%d] sep_free_dma_table_data_handler end\n",
571                         current->pid);
572
573         return 0;
574 }
575
576 /**
577  * sep_end_transaction_handler - end transaction
578  * @sep: pointer to struct sep_device
579  * @dma_ctx: DMA context
580  * @call_status: Call status
581  *
582  * This API handles the end transaction request.
583  */
584 static int sep_end_transaction_handler(struct sep_device *sep,
585                                        struct sep_dma_context **dma_ctx,
586                                        struct sep_call_status *call_status,
587                                        struct sep_queue_info **my_queue_elem)
588 {
589         dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
590
591         /*
592          * Extraneous transaction clearing would mess up PM
593          * device usage counters and SEP would get suspended
594          * just before we send a command to SEP in the next
595          * transaction
596          * */
597         if (sep_check_transaction_owner(sep)) {
598                 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
599                                                 current->pid);
600                 return 0;
601         }
602
603         /* Update queue status */
604         sep_queue_status_remove(sep, my_queue_elem);
605
606         /* Check that all the DMA resources were freed */
607         if (dma_ctx)
608                 sep_free_dma_table_data_handler(sep, dma_ctx);
609
610         /* Reset call status for next transaction */
611         if (call_status)
612                 call_status->status = 0;
613
614         /* Clear the message area to avoid next transaction reading
615          * sensitive results from previous transaction */
616         memset(sep->shared_addr, 0,
617                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
618
619         /* start suspend delay */
620 #ifdef SEP_ENABLE_RUNTIME_PM
621         if (sep->in_use) {
622                 sep->in_use = 0;
623                 pm_runtime_mark_last_busy(&sep->pdev->dev);
624                 pm_runtime_put_autosuspend(&sep->pdev->dev);
625         }
626 #endif
627
628         clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
629         sep->pid_doing_transaction = 0;
630
631         /* Now it's safe for next process to proceed */
632         dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
633                                         current->pid);
634         clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
635         wake_up(&sep->event_transactions);
636
637         return 0;
638 }
639
640
641 /**
642  * sep_release - close a SEP device
643  * @inode: inode of SEP device
644  * @filp: file handle being closed
645  *
646  * Called on the final close of a SEP device.
647  */
648 static int sep_release(struct inode *inode, struct file *filp)
649 {
650         struct sep_private_data * const private_data = filp->private_data;
651         struct sep_call_status *call_status = &private_data->call_status;
652         struct sep_device *sep = private_data->device;
653         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
654         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
655
656         dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
657
658         sep_end_transaction_handler(sep, dma_ctx, call_status,
659                 my_queue_elem);
660
661         kfree(filp->private_data);
662
663         return 0;
664 }
665
666 /**
667  * sep_mmap -  maps the shared area to user space
668  * @filp: pointer to struct file
669  * @vma: pointer to vm_area_struct
670  *
671  * Called on an mmap of our space via the normal SEP device
672  */
673 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
674 {
675         struct sep_private_data * const private_data = filp->private_data;
676         struct sep_call_status *call_status = &private_data->call_status;
677         struct sep_device *sep = private_data->device;
678         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
679         dma_addr_t bus_addr;
680         unsigned long error = 0;
681
682         dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
683
684         /* Set the transaction busy (own the device) */
685         /*
686          * Problem for multithreaded applications is that here we're
687          * possibly going to sleep while holding a write lock on
688          * current->mm->mmap_sem, which will cause deadlock for ongoing
689          * transaction trying to create DMA tables
690          */
691         error = sep_wait_transaction(sep);
692         if (error)
693                 /* Interrupted by signal, don't clear transaction */
694                 goto end_function;
695
696         /* Clear the message area to avoid next transaction reading
697          * sensitive results from previous transaction */
698         memset(sep->shared_addr, 0,
699                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
700
701         /*
702          * Check that the size of the mapped range is as the size of the message
703          * shared area
704          */
705         if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
706                 error = -EINVAL;
707                 goto end_function_with_error;
708         }
709
710         dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
711                                         current->pid, sep->shared_addr);
712
713         /* Get bus address */
714         bus_addr = sep->shared_bus;
715
716         if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
717                 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
718                 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
719                                                 current->pid);
720                 error = -EAGAIN;
721                 goto end_function_with_error;
722         }
723
724         /* Update call status */
725         set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
726
727         goto end_function;
728
729 end_function_with_error:
730         /* Clear our transaction */
731         sep_end_transaction_handler(sep, NULL, call_status,
732                 my_queue_elem);
733
734 end_function:
735         return error;
736 }
737
738 /**
739  * sep_poll - poll handler
740  * @filp:       pointer to struct file
741  * @wait:       pointer to poll_table
742  *
743  * Called by the OS when the kernel is asked to do a poll on
744  * a SEP file handle.
745  */
746 static unsigned int sep_poll(struct file *filp, poll_table *wait)
747 {
748         struct sep_private_data * const private_data = filp->private_data;
749         struct sep_call_status *call_status = &private_data->call_status;
750         struct sep_device *sep = private_data->device;
751         u32 mask = 0;
752         u32 retval = 0;
753         u32 retval2 = 0;
754         unsigned long lock_irq_flag;
755
756         /* Am I the process that owns the transaction? */
757         if (sep_check_transaction_owner(sep)) {
758                 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
759                                                 current->pid);
760                 mask = POLLERR;
761                 goto end_function;
762         }
763
764         /* Check if send command or send_reply were activated previously */
765         if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
766                           &call_status->status)) {
767                 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
768                                                 current->pid);
769                 mask = POLLERR;
770                 goto end_function;
771         }
772
773
774         /* Add the event to the polling wait table */
775         dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
776                                         current->pid);
777
778         poll_wait(filp, &sep->event_interrupt, wait);
779
780         dev_dbg(&sep->pdev->dev,
781                 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
782                         current->pid, sep->send_ct, sep->reply_ct);
783
784         /* Check if error occurred during poll */
785         retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
786         if ((retval2 != 0x0) && (retval2 != 0x8)) {
787                 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
788                                                 current->pid, retval2);
789                 mask |= POLLERR;
790                 goto end_function;
791         }
792
793         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
794
795         if (sep->send_ct == sep->reply_ct) {
796                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
797                 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
798                 dev_dbg(&sep->pdev->dev,
799                         "[PID%d] poll: data ready check (GPR2)  %x\n",
800                                 current->pid, retval);
801
802                 /* Check if printf request  */
803                 if ((retval >> 30) & 0x1) {
804                         dev_dbg(&sep->pdev->dev,
805                                 "[PID%d] poll: SEP printf request\n",
806                                         current->pid);
807                         goto end_function;
808                 }
809
810                 /* Check if the this is SEP reply or request */
811                 if (retval >> 31) {
812                         dev_dbg(&sep->pdev->dev,
813                                 "[PID%d] poll: SEP request\n",
814                                         current->pid);
815                 } else {
816                         dev_dbg(&sep->pdev->dev,
817                                 "[PID%d] poll: normal return\n",
818                                         current->pid);
819                         sep_dump_message(sep);
820                         dev_dbg(&sep->pdev->dev,
821                                 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
822                                         current->pid);
823                         mask |= POLLIN | POLLRDNORM;
824                 }
825                 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
826         } else {
827                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
828                 dev_dbg(&sep->pdev->dev,
829                         "[PID%d] poll; no reply; returning mask of 0\n",
830                                 current->pid);
831                 mask = 0;
832         }
833
834 end_function:
835         return mask;
836 }
837
838 /**
839  * sep_time_address - address in SEP memory of time
840  * @sep: SEP device we want the address from
841  *
842  * Return the address of the two dwords in memory used for time
843  * setting.
844  */
845 static u32 *sep_time_address(struct sep_device *sep)
846 {
847         return sep->shared_addr +
848                 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
849 }
850
851 /**
852  * sep_set_time - set the SEP time
853  * @sep: the SEP we are setting the time for
854  *
855  * Calculates time and sets it at the predefined address.
856  * Called with the SEP mutex held.
857  */
858 static unsigned long sep_set_time(struct sep_device *sep)
859 {
860         struct timeval time;
861         u32 *time_addr; /* Address of time as seen by the kernel */
862
863
864         do_gettimeofday(&time);
865
866         /* Set value in the SYSTEM MEMORY offset */
867         time_addr = sep_time_address(sep);
868
869         time_addr[0] = SEP_TIME_VAL_TOKEN;
870         time_addr[1] = time.tv_sec;
871
872         dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
873                                         current->pid, time.tv_sec);
874         dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
875                                         current->pid, time_addr);
876         dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
877                                         current->pid, sep->shared_addr);
878
879         return time.tv_sec;
880 }
881
882 /**
883  * sep_send_command_handler - kick off a command
884  * @sep: SEP being signalled
885  *
886  * This function raises interrupt to SEP that signals that is has a new
887  * command from the host
888  *
889  * Note that this function does fall under the ioctl lock
890  */
891 int sep_send_command_handler(struct sep_device *sep)
892 {
893         unsigned long lock_irq_flag;
894         u32 *msg_pool;
895         int error = 0;
896
897         /* Basic sanity check; set msg pool to start of shared area */
898         msg_pool = (u32 *)sep->shared_addr;
899         msg_pool += 2;
900
901         /* Look for start msg token */
902         if (*msg_pool != SEP_START_MSG_TOKEN) {
903                 dev_warn(&sep->pdev->dev, "start message token not present\n");
904                 error = -EPROTO;
905                 goto end_function;
906         }
907
908         /* Do we have a reasonable size? */
909         msg_pool += 1;
910         if ((*msg_pool < 2) ||
911                 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
912
913                 dev_warn(&sep->pdev->dev, "invalid message size\n");
914                 error = -EPROTO;
915                 goto end_function;
916         }
917
918         /* Does the command look reasonable? */
919         msg_pool += 1;
920         if (*msg_pool < 2) {
921                 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
922                 error = -EPROTO;
923                 goto end_function;
924         }
925
926 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
927         dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
928                                         current->pid,
929                                         sep->pdev->dev.power.runtime_status);
930         sep->in_use = 1; /* device is about to be used */
931         pm_runtime_get_sync(&sep->pdev->dev);
932 #endif
933
934         if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
935                 error = -EPROTO;
936                 goto end_function;
937         }
938         sep->in_use = 1; /* device is about to be used */
939         sep_set_time(sep);
940
941         sep_dump_message(sep);
942
943         /* Update counter */
944         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
945         sep->send_ct++;
946         spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
947
948         dev_dbg(&sep->pdev->dev,
949                 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
950                         current->pid, sep->send_ct, sep->reply_ct);
951
952         /* Send interrupt to SEP */
953         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
954
955 end_function:
956         return error;
957 }
958
959 /**
960  *      sep_crypto_dma -
961  *      @sep: pointer to struct sep_device
962  *      @sg: pointer to struct scatterlist
963  *      @direction:
964  *      @dma_maps: pointer to place a pointer to array of dma maps
965  *       This is filled in; anything previous there will be lost
966  *       The structure for dma maps is sep_dma_map
967  *      @returns number of dma maps on success; negative on error
968  *
969  *      This creates the dma table from the scatterlist
970  *      It is used only for kernel crypto as it works with scatterlists
971  *      representation of data buffers
972  *
973  */
974 static int sep_crypto_dma(
975         struct sep_device *sep,
976         struct scatterlist *sg,
977         struct sep_dma_map **dma_maps,
978         enum dma_data_direction direction)
979 {
980         struct scatterlist *temp_sg;
981
982         u32 count_segment;
983         u32 count_mapped;
984         struct sep_dma_map *sep_dma;
985         int ct1;
986
987         if (sg->length == 0)
988                 return 0;
989
990         /* Count the segments */
991         temp_sg = sg;
992         count_segment = 0;
993         while (temp_sg) {
994                 count_segment += 1;
995                 temp_sg = scatterwalk_sg_next(temp_sg);
996         }
997         dev_dbg(&sep->pdev->dev,
998                 "There are (hex) %x segments in sg\n", count_segment);
999
1000         /* DMA map segments */
1001         count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1002                 count_segment, direction);
1003
1004         dev_dbg(&sep->pdev->dev,
1005                 "There are (hex) %x maps in sg\n", count_mapped);
1006
1007         if (count_mapped == 0) {
1008                 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1009                 return -ENOMEM;
1010         }
1011
1012         sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1013                 count_mapped, GFP_ATOMIC);
1014
1015         if (sep_dma == NULL) {
1016                 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1017                 return -ENOMEM;
1018         }
1019
1020         for_each_sg(sg, temp_sg, count_mapped, ct1) {
1021                 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1022                 sep_dma[ct1].size = sg_dma_len(temp_sg);
1023                 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1024                         ct1, (unsigned long)sep_dma[ct1].dma_addr,
1025                         (unsigned long)sep_dma[ct1].size);
1026                 }
1027
1028         *dma_maps = sep_dma;
1029         return count_mapped;
1030
1031 }
1032
1033 /**
1034  *      sep_crypto_lli -
1035  *      @sep: pointer to struct sep_device
1036  *      @sg: pointer to struct scatterlist
1037  *      @data_size: total data size
1038  *      @direction:
1039  *      @dma_maps: pointer to place a pointer to array of dma maps
1040  *       This is filled in; anything previous there will be lost
1041  *       The structure for dma maps is sep_dma_map
1042  *      @lli_maps: pointer to place a pointer to array of lli maps
1043  *       This is filled in; anything previous there will be lost
1044  *       The structure for dma maps is sep_dma_map
1045  *      @returns number of dma maps on success; negative on error
1046  *
1047  *      This creates the LLI table from the scatterlist
1048  *      It is only used for kernel crypto as it works exclusively
1049  *      with scatterlists (struct scatterlist) representation of
1050  *      data buffers
1051  */
1052 static int sep_crypto_lli(
1053         struct sep_device *sep,
1054         struct scatterlist *sg,
1055         struct sep_dma_map **maps,
1056         struct sep_lli_entry **llis,
1057         u32 data_size,
1058         enum dma_data_direction direction)
1059 {
1060
1061         int ct1;
1062         struct sep_lli_entry *sep_lli;
1063         struct sep_dma_map *sep_map;
1064
1065         int nbr_ents;
1066
1067         nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1068         if (nbr_ents <= 0) {
1069                 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1070                         nbr_ents);
1071                 return nbr_ents;
1072         }
1073
1074         sep_map = *maps;
1075
1076         sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1077
1078         if (sep_lli == NULL) {
1079                 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1080
1081                 kfree(*maps);
1082                 *maps = NULL;
1083                 return -ENOMEM;
1084         }
1085
1086         for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1087                 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1088
1089                 /* Maximum for page is total data size */
1090                 if (sep_map[ct1].size > data_size)
1091                         sep_map[ct1].size = data_size;
1092
1093                 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1094         }
1095
1096         *llis = sep_lli;
1097         return nbr_ents;
1098 }
1099
1100 /**
1101  *      sep_lock_kernel_pages - map kernel pages for DMA
1102  *      @sep: pointer to struct sep_device
1103  *      @kernel_virt_addr: address of data buffer in kernel
1104  *      @data_size: size of data
1105  *      @lli_array_ptr: lli array
1106  *      @in_out_flag: input into device or output from device
1107  *
1108  *      This function locks all the physical pages of the kernel virtual buffer
1109  *      and construct a basic lli  array, where each entry holds the physical
1110  *      page address and the size that application data holds in this page
1111  *      This function is used only during kernel crypto mod calls from within
1112  *      the kernel (when ioctl is not used)
1113  *
1114  *      This is used only for kernel crypto. Kernel pages
1115  *      are handled differently as they are done via
1116  *      scatter gather lists (struct scatterlist)
1117  */
1118 static int sep_lock_kernel_pages(struct sep_device *sep,
1119         unsigned long kernel_virt_addr,
1120         u32 data_size,
1121         struct sep_lli_entry **lli_array_ptr,
1122         int in_out_flag,
1123         struct sep_dma_context *dma_ctx)
1124
1125 {
1126         u32 num_pages;
1127         struct scatterlist *sg;
1128
1129         /* Array of lli */
1130         struct sep_lli_entry *lli_array;
1131         /* Map array */
1132         struct sep_dma_map *map_array;
1133
1134         enum dma_data_direction direction;
1135
1136         lli_array = NULL;
1137         map_array = NULL;
1138
1139         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1140                 direction = DMA_TO_DEVICE;
1141                 sg = dma_ctx->src_sg;
1142         } else {
1143                 direction = DMA_FROM_DEVICE;
1144                 sg = dma_ctx->dst_sg;
1145         }
1146
1147         num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1148                 data_size, direction);
1149
1150         if (num_pages <= 0) {
1151                 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1152                         num_pages);
1153                 return -ENOMEM;
1154         }
1155
1156         /* Put mapped kernel sg into kernel resource array */
1157
1158         /* Set output params according to the in_out flag */
1159         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1160                 *lli_array_ptr = lli_array;
1161                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1162                                                                 num_pages;
1163                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1164                                                                 NULL;
1165                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1166                                                                 map_array;
1167                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1168                                                                 num_pages;
1169                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1170                         dma_ctx->src_sg;
1171         } else {
1172                 *lli_array_ptr = lli_array;
1173                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1174                                                                 num_pages;
1175                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1176                                                                 NULL;
1177                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1178                                                                 map_array;
1179                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1180                                         out_map_num_entries = num_pages;
1181                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1182                         dma_ctx->dst_sg;
1183         }
1184
1185         return 0;
1186 }
1187
1188 /**
1189  * sep_lock_user_pages - lock and map user pages for DMA
1190  * @sep: pointer to struct sep_device
1191  * @app_virt_addr: user memory data buffer
1192  * @data_size: size of data buffer
1193  * @lli_array_ptr: lli array
1194  * @in_out_flag: input or output to device
1195  *
1196  * This function locks all the physical pages of the application
1197  * virtual buffer and construct a basic lli  array, where each entry
1198  * holds the physical page address and the size that application
1199  * data holds in this physical pages
1200  */
1201 static int sep_lock_user_pages(struct sep_device *sep,
1202         u32 app_virt_addr,
1203         u32 data_size,
1204         struct sep_lli_entry **lli_array_ptr,
1205         int in_out_flag,
1206         struct sep_dma_context *dma_ctx)
1207
1208 {
1209         int error = 0;
1210         u32 count;
1211         int result;
1212         /* The the page of the end address of the user space buffer */
1213         u32 end_page;
1214         /* The page of the start address of the user space buffer */
1215         u32 start_page;
1216         /* The range in pages */
1217         u32 num_pages;
1218         /* Array of pointers to page */
1219         struct page **page_array;
1220         /* Array of lli */
1221         struct sep_lli_entry *lli_array;
1222         /* Map array */
1223         struct sep_dma_map *map_array;
1224
1225         /* Set start and end pages and num pages */
1226         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1227         start_page = app_virt_addr >> PAGE_SHIFT;
1228         num_pages = end_page - start_page + 1;
1229
1230         dev_dbg(&sep->pdev->dev,
1231                 "[PID%d] lock user pages app_virt_addr is %x\n",
1232                         current->pid, app_virt_addr);
1233
1234         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1235                                         current->pid, data_size);
1236         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1237                                         current->pid, start_page);
1238         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1239                                         current->pid, end_page);
1240         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1241                                         current->pid, num_pages);
1242
1243         /* Allocate array of pages structure pointers */
1244         page_array = kmalloc_array(num_pages, sizeof(struct page *),
1245                                    GFP_ATOMIC);
1246         if (!page_array) {
1247                 error = -ENOMEM;
1248                 goto end_function;
1249         }
1250
1251         map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1252                                   GFP_ATOMIC);
1253         if (!map_array) {
1254                 error = -ENOMEM;
1255                 goto end_function_with_error1;
1256         }
1257
1258         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1259                                   GFP_ATOMIC);
1260         if (!lli_array) {
1261                 error = -ENOMEM;
1262                 goto end_function_with_error2;
1263         }
1264
1265         /* Convert the application virtual address into a set of physical */
1266         down_read(&current->mm->mmap_sem);
1267         result = get_user_pages(current, current->mm, app_virt_addr,
1268                 num_pages,
1269                 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1270                 0, page_array, NULL);
1271
1272         up_read(&current->mm->mmap_sem);
1273
1274         /* Check the number of pages locked - if not all then exit with error */
1275         if (result != num_pages) {
1276                 dev_warn(&sep->pdev->dev,
1277                         "[PID%d] not all pages locked by get_user_pages, "
1278                         "result 0x%X, num_pages 0x%X\n",
1279                                 current->pid, result, num_pages);
1280                 error = -ENOMEM;
1281                 goto end_function_with_error3;
1282         }
1283
1284         dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1285                                         current->pid);
1286
1287         /*
1288          * Fill the array using page array data and
1289          * map the pages - this action will also flush the cache as needed
1290          */
1291         for (count = 0; count < num_pages; count++) {
1292                 /* Fill the map array */
1293                 map_array[count].dma_addr =
1294                         dma_map_page(&sep->pdev->dev, page_array[count],
1295                         0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1296
1297                 map_array[count].size = PAGE_SIZE;
1298
1299                 /* Fill the lli array entry */
1300                 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1301                 lli_array[count].block_size = PAGE_SIZE;
1302
1303                 dev_dbg(&sep->pdev->dev,
1304                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1305                         "lli_array[%x].block_size is (hex) %x\n", current->pid,
1306                         count, (unsigned long)lli_array[count].bus_address,
1307                         count, lli_array[count].block_size);
1308         }
1309
1310         /* Check the offset for the first page */
1311         lli_array[0].bus_address =
1312                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1313
1314         /* Check that not all the data is in the first page only */
1315         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1316                 lli_array[0].block_size = data_size;
1317         else
1318                 lli_array[0].block_size =
1319                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1320
1321                 dev_dbg(&sep->pdev->dev,
1322                         "[PID%d] After check if page 0 has all data\n",
1323                         current->pid);
1324                 dev_dbg(&sep->pdev->dev,
1325                         "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1326                         "lli_array[0].block_size is (hex) %x\n",
1327                         current->pid,
1328                         (unsigned long)lli_array[0].bus_address,
1329                         lli_array[0].block_size);
1330
1331
1332         /* Check the size of the last page */
1333         if (num_pages > 1) {
1334                 lli_array[num_pages - 1].block_size =
1335                         (app_virt_addr + data_size) & (~PAGE_MASK);
1336                 if (lli_array[num_pages - 1].block_size == 0)
1337                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1338
1339                 dev_dbg(&sep->pdev->dev,
1340                         "[PID%d] After last page size adjustment\n",
1341                         current->pid);
1342                 dev_dbg(&sep->pdev->dev,
1343                         "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1344                         "lli_array[%x].block_size is (hex) %x\n",
1345                         current->pid,
1346                         num_pages - 1,
1347                         (unsigned long)lli_array[num_pages - 1].bus_address,
1348                         num_pages - 1,
1349                         lli_array[num_pages - 1].block_size);
1350         }
1351
1352         /* Set output params according to the in_out flag */
1353         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1354                 *lli_array_ptr = lli_array;
1355                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1356                                                                 num_pages;
1357                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1358                                                                 page_array;
1359                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1360                                                                 map_array;
1361                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1362                                                                 num_pages;
1363                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1364         } else {
1365                 *lli_array_ptr = lli_array;
1366                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1367                                                                 num_pages;
1368                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1369                                                                 page_array;
1370                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1371                                                                 map_array;
1372                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1373                                         out_map_num_entries = num_pages;
1374                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1375         }
1376         goto end_function;
1377
1378 end_function_with_error3:
1379         /* Free lli array */
1380         kfree(lli_array);
1381
1382 end_function_with_error2:
1383         kfree(map_array);
1384
1385 end_function_with_error1:
1386         /* Free page array */
1387         kfree(page_array);
1388
1389 end_function:
1390         return error;
1391 }
1392
1393 /**
1394  *      sep_lli_table_secure_dma - get lli array for IMR addresses
1395  *      @sep: pointer to struct sep_device
1396  *      @app_virt_addr: user memory data buffer
1397  *      @data_size: size of data buffer
1398  *      @lli_array_ptr: lli array
1399  *      @in_out_flag: not used
1400  *      @dma_ctx: pointer to struct sep_dma_context
1401  *
1402  *      This function creates lli tables for outputting data to
1403  *      IMR memory, which is memory that cannot be accessed by the
1404  *      the x86 processor.
1405  */
1406 static int sep_lli_table_secure_dma(struct sep_device *sep,
1407         u32 app_virt_addr,
1408         u32 data_size,
1409         struct sep_lli_entry **lli_array_ptr,
1410         int in_out_flag,
1411         struct sep_dma_context *dma_ctx)
1412
1413 {
1414         int error = 0;
1415         u32 count;
1416         /* The the page of the end address of the user space buffer */
1417         u32 end_page;
1418         /* The page of the start address of the user space buffer */
1419         u32 start_page;
1420         /* The range in pages */
1421         u32 num_pages;
1422         /* Array of lli */
1423         struct sep_lli_entry *lli_array;
1424
1425         /* Set start and end pages and num pages */
1426         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1427         start_page = app_virt_addr >> PAGE_SHIFT;
1428         num_pages = end_page - start_page + 1;
1429
1430         dev_dbg(&sep->pdev->dev,
1431                 "[PID%d] lock user pages  app_virt_addr is %x\n",
1432                 current->pid, app_virt_addr);
1433
1434         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1435                 current->pid, data_size);
1436         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1437                 current->pid, start_page);
1438         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1439                 current->pid, end_page);
1440         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1441                 current->pid, num_pages);
1442
1443         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1444                                   GFP_ATOMIC);
1445         if (!lli_array)
1446                 return -ENOMEM;
1447
1448         /*
1449          * Fill the lli_array
1450          */
1451         start_page = start_page << PAGE_SHIFT;
1452         for (count = 0; count < num_pages; count++) {
1453                 /* Fill the lli array entry */
1454                 lli_array[count].bus_address = start_page;
1455                 lli_array[count].block_size = PAGE_SIZE;
1456
1457                 start_page += PAGE_SIZE;
1458
1459                 dev_dbg(&sep->pdev->dev,
1460                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1461                         "lli_array[%x].block_size is (hex) %x\n",
1462                         current->pid,
1463                         count, (unsigned long)lli_array[count].bus_address,
1464                         count, lli_array[count].block_size);
1465         }
1466
1467         /* Check the offset for the first page */
1468         lli_array[0].bus_address =
1469                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1470
1471         /* Check that not all the data is in the first page only */
1472         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1473                 lli_array[0].block_size = data_size;
1474         else
1475                 lli_array[0].block_size =
1476                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1477
1478         dev_dbg(&sep->pdev->dev,
1479                 "[PID%d] After check if page 0 has all data\n"
1480                 "lli_array[0].bus_address is (hex) %08lx, "
1481                 "lli_array[0].block_size is (hex) %x\n",
1482                 current->pid,
1483                 (unsigned long)lli_array[0].bus_address,
1484                 lli_array[0].block_size);
1485
1486         /* Check the size of the last page */
1487         if (num_pages > 1) {
1488                 lli_array[num_pages - 1].block_size =
1489                         (app_virt_addr + data_size) & (~PAGE_MASK);
1490                 if (lli_array[num_pages - 1].block_size == 0)
1491                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1492
1493                 dev_dbg(&sep->pdev->dev,
1494                         "[PID%d] After last page size adjustment\n"
1495                         "lli_array[%x].bus_address is (hex) %08lx, "
1496                         "lli_array[%x].block_size is (hex) %x\n",
1497                         current->pid, num_pages - 1,
1498                         (unsigned long)lli_array[num_pages - 1].bus_address,
1499                         num_pages - 1,
1500                         lli_array[num_pages - 1].block_size);
1501         }
1502         *lli_array_ptr = lli_array;
1503         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1504         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1505         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1506         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1507
1508         return error;
1509 }
1510
1511 /**
1512  * sep_calculate_lli_table_max_size - size the LLI table
1513  * @sep: pointer to struct sep_device
1514  * @lli_in_array_ptr
1515  * @num_array_entries
1516  * @last_table_flag
1517  *
1518  * This function calculates the size of data that can be inserted into
1519  * the lli table from this array, such that either the table is full
1520  * (all entries are entered), or there are no more entries in the
1521  * lli array
1522  */
1523 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1524         struct sep_lli_entry *lli_in_array_ptr,
1525         u32 num_array_entries,
1526         u32 *last_table_flag)
1527 {
1528         u32 counter;
1529         /* Table data size */
1530         u32 table_data_size = 0;
1531         /* Data size for the next table */
1532         u32 next_table_data_size;
1533
1534         *last_table_flag = 0;
1535
1536         /*
1537          * Calculate the data in the out lli table till we fill the whole
1538          * table or till the data has ended
1539          */
1540         for (counter = 0;
1541                 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1542                         (counter < num_array_entries); counter++)
1543                 table_data_size += lli_in_array_ptr[counter].block_size;
1544
1545         /*
1546          * Check if we reached the last entry,
1547          * meaning this ia the last table to build,
1548          * and no need to check the block alignment
1549          */
1550         if (counter == num_array_entries) {
1551                 /* Set the last table flag */
1552                 *last_table_flag = 1;
1553                 goto end_function;
1554         }
1555
1556         /*
1557          * Calculate the data size of the next table.
1558          * Stop if no entries left or if data size is more the DMA restriction
1559          */
1560         next_table_data_size = 0;
1561         for (; counter < num_array_entries; counter++) {
1562                 next_table_data_size += lli_in_array_ptr[counter].block_size;
1563                 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1564                         break;
1565         }
1566
1567         /*
1568          * Check if the next table data size is less then DMA rstriction.
1569          * if it is - recalculate the current table size, so that the next
1570          * table data size will be adaquete for DMA
1571          */
1572         if (next_table_data_size &&
1573                 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1574
1575                 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1576                         next_table_data_size);
1577
1578 end_function:
1579         return table_data_size;
1580 }
1581
1582 /**
1583  * sep_build_lli_table - build an lli array for the given table
1584  * @sep: pointer to struct sep_device
1585  * @lli_array_ptr: pointer to lli array
1586  * @lli_table_ptr: pointer to lli table
1587  * @num_processed_entries_ptr: pointer to number of entries
1588  * @num_table_entries_ptr: pointer to number of tables
1589  * @table_data_size: total data size
1590  *
1591  * Builds an lli table from the lli_array according to
1592  * the given size of data
1593  */
1594 static void sep_build_lli_table(struct sep_device *sep,
1595         struct sep_lli_entry    *lli_array_ptr,
1596         struct sep_lli_entry    *lli_table_ptr,
1597         u32 *num_processed_entries_ptr,
1598         u32 *num_table_entries_ptr,
1599         u32 table_data_size)
1600 {
1601         /* Current table data size */
1602         u32 curr_table_data_size;
1603         /* Counter of lli array entry */
1604         u32 array_counter;
1605
1606         /* Init current table data size and lli array entry counter */
1607         curr_table_data_size = 0;
1608         array_counter = 0;
1609         *num_table_entries_ptr = 1;
1610
1611         dev_dbg(&sep->pdev->dev,
1612                 "[PID%d] build lli table table_data_size: (hex) %x\n",
1613                         current->pid, table_data_size);
1614
1615         /* Fill the table till table size reaches the needed amount */
1616         while (curr_table_data_size < table_data_size) {
1617                 /* Update the number of entries in table */
1618                 (*num_table_entries_ptr)++;
1619
1620                 lli_table_ptr->bus_address =
1621                         cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1622
1623                 lli_table_ptr->block_size =
1624                         cpu_to_le32(lli_array_ptr[array_counter].block_size);
1625
1626                 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1627
1628                 dev_dbg(&sep->pdev->dev,
1629                         "[PID%d] lli_table_ptr is %p\n",
1630                                 current->pid, lli_table_ptr);
1631                 dev_dbg(&sep->pdev->dev,
1632                         "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1633                                 current->pid,
1634                                 (unsigned long)lli_table_ptr->bus_address);
1635
1636                 dev_dbg(&sep->pdev->dev,
1637                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1638                                 current->pid, lli_table_ptr->block_size);
1639
1640                 /* Check for overflow of the table data */
1641                 if (curr_table_data_size > table_data_size) {
1642                         dev_dbg(&sep->pdev->dev,
1643                                 "[PID%d] curr_table_data_size too large\n",
1644                                         current->pid);
1645
1646                         /* Update the size of block in the table */
1647                         lli_table_ptr->block_size =
1648                                 cpu_to_le32(lli_table_ptr->block_size) -
1649                                 (curr_table_data_size - table_data_size);
1650
1651                         /* Update the physical address in the lli array */
1652                         lli_array_ptr[array_counter].bus_address +=
1653                         cpu_to_le32(lli_table_ptr->block_size);
1654
1655                         /* Update the block size left in the lli array */
1656                         lli_array_ptr[array_counter].block_size =
1657                                 (curr_table_data_size - table_data_size);
1658                 } else
1659                         /* Advance to the next entry in the lli_array */
1660                         array_counter++;
1661
1662                 dev_dbg(&sep->pdev->dev,
1663                         "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1664                                 current->pid,
1665                                 (unsigned long)lli_table_ptr->bus_address);
1666                 dev_dbg(&sep->pdev->dev,
1667                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1668                                 current->pid,
1669                                 lli_table_ptr->block_size);
1670
1671                 /* Move to the next entry in table */
1672                 lli_table_ptr++;
1673         }
1674
1675         /* Set the info entry to default */
1676         lli_table_ptr->bus_address = 0xffffffff;
1677         lli_table_ptr->block_size = 0;
1678
1679         /* Set the output parameter */
1680         *num_processed_entries_ptr += array_counter;
1681
1682 }
1683
1684 /**
1685  * sep_shared_area_virt_to_bus - map shared area to bus address
1686  * @sep: pointer to struct sep_device
1687  * @virt_address: virtual address to convert
1688  *
1689  * This functions returns the physical address inside shared area according
1690  * to the virtual address. It can be either on the external RAM device
1691  * (ioremapped), or on the system RAM
1692  * This implementation is for the external RAM
1693  */
1694 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1695         void *virt_address)
1696 {
1697         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1698                                         current->pid, virt_address);
1699         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1700                 current->pid,
1701                 (unsigned long)
1702                 sep->shared_bus + (virt_address - sep->shared_addr));
1703
1704         return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1705 }
1706
1707 /**
1708  * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1709  * @sep: pointer to struct sep_device
1710  * @bus_address: bus address to convert
1711  *
1712  * This functions returns the virtual address inside shared area
1713  * according to the physical address. It can be either on the
1714  * external RAM device (ioremapped), or on the system RAM
1715  * This implementation is for the external RAM
1716  */
1717 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1718         dma_addr_t bus_address)
1719 {
1720         dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1721                 current->pid,
1722                 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1723                         (size_t)(bus_address - sep->shared_bus)));
1724
1725         return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1726 }
1727
1728 /**
1729  * sep_debug_print_lli_tables - dump LLI table
1730  * @sep: pointer to struct sep_device
1731  * @lli_table_ptr: pointer to sep_lli_entry
1732  * @num_table_entries: number of entries
1733  * @table_data_size: total data size
1734  *
1735  * Walk the the list of the print created tables and print all the data
1736  */
1737 static void sep_debug_print_lli_tables(struct sep_device *sep,
1738         struct sep_lli_entry *lli_table_ptr,
1739         unsigned long num_table_entries,
1740         unsigned long table_data_size)
1741 {
1742 #ifdef DEBUG
1743         unsigned long table_count = 1;
1744         unsigned long entries_count = 0;
1745
1746         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1747                                         current->pid);
1748         if (num_table_entries == 0) {
1749                 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1750                         current->pid);
1751                 return;
1752         }
1753
1754         while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1755                 dev_dbg(&sep->pdev->dev,
1756                         "[PID%d] lli table %08lx, "
1757                         "table_data_size is (hex) %lx\n",
1758                                 current->pid, table_count, table_data_size);
1759                 dev_dbg(&sep->pdev->dev,
1760                         "[PID%d] num_table_entries is (hex) %lx\n",
1761                                 current->pid, num_table_entries);
1762
1763                 /* Print entries of the table (without info entry) */
1764                 for (entries_count = 0; entries_count < num_table_entries;
1765                         entries_count++, lli_table_ptr++) {
1766
1767                         dev_dbg(&sep->pdev->dev,
1768                                 "[PID%d] lli_table_ptr address is %08lx\n",
1769                                 current->pid,
1770                                 (unsigned long) lli_table_ptr);
1771
1772                         dev_dbg(&sep->pdev->dev,
1773                                 "[PID%d] phys address is %08lx "
1774                                 "block size is (hex) %x\n", current->pid,
1775                                 (unsigned long)lli_table_ptr->bus_address,
1776                                 lli_table_ptr->block_size);
1777                 }
1778
1779                 /* Point to the info entry */
1780                 lli_table_ptr--;
1781
1782                 dev_dbg(&sep->pdev->dev,
1783                         "[PID%d] phys lli_table_ptr->block_size "
1784                         "is (hex) %x\n",
1785                         current->pid,
1786                         lli_table_ptr->block_size);
1787
1788                 dev_dbg(&sep->pdev->dev,
1789                         "[PID%d] phys lli_table_ptr->physical_address "
1790                         "is %08lx\n",
1791                         current->pid,
1792                         (unsigned long)lli_table_ptr->bus_address);
1793
1794
1795                 table_data_size = lli_table_ptr->block_size & 0xffffff;
1796                 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1797
1798                 dev_dbg(&sep->pdev->dev,
1799                         "[PID%d] phys table_data_size is "
1800                         "(hex) %lx num_table_entries is"
1801                         " %lx bus_address is%lx\n",
1802                                 current->pid,
1803                                 table_data_size,
1804                                 num_table_entries,
1805                                 (unsigned long)lli_table_ptr->bus_address);
1806
1807                 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1808                         lli_table_ptr = (struct sep_lli_entry *)
1809                                 sep_shared_bus_to_virt(sep,
1810                                 (unsigned long)lli_table_ptr->bus_address);
1811
1812                 table_count++;
1813         }
1814         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1815                                         current->pid);
1816 #endif
1817 }
1818
1819
1820 /**
1821  * sep_prepare_empty_lli_table - create a blank LLI table
1822  * @sep: pointer to struct sep_device
1823  * @lli_table_addr_ptr: pointer to lli table
1824  * @num_entries_ptr: pointer to number of entries
1825  * @table_data_size_ptr: point to table data size
1826  * @dmatables_region: Optional buffer for DMA tables
1827  * @dma_ctx: DMA context
1828  *
1829  * This function creates empty lli tables when there is no data
1830  */
1831 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1832                 dma_addr_t *lli_table_addr_ptr,
1833                 u32 *num_entries_ptr,
1834                 u32 *table_data_size_ptr,
1835                 void **dmatables_region,
1836                 struct sep_dma_context *dma_ctx)
1837 {
1838         struct sep_lli_entry *lli_table_ptr;
1839
1840         /* Find the area for new table */
1841         lli_table_ptr =
1842                 (struct sep_lli_entry *)(sep->shared_addr +
1843                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1844                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1845                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1846
1847         if (dmatables_region && *dmatables_region)
1848                 lli_table_ptr = *dmatables_region;
1849
1850         lli_table_ptr->bus_address = 0;
1851         lli_table_ptr->block_size = 0;
1852
1853         lli_table_ptr++;
1854         lli_table_ptr->bus_address = 0xFFFFFFFF;
1855         lli_table_ptr->block_size = 0;
1856
1857         /* Set the output parameter value */
1858         *lli_table_addr_ptr = sep->shared_bus +
1859                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1860                 dma_ctx->num_lli_tables_created *
1861                 sizeof(struct sep_lli_entry) *
1862                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1863
1864         /* Set the num of entries and table data size for empty table */
1865         *num_entries_ptr = 2;
1866         *table_data_size_ptr = 0;
1867
1868         /* Update the number of created tables */
1869         dma_ctx->num_lli_tables_created++;
1870 }
1871
1872 /**
1873  * sep_prepare_input_dma_table - prepare input DMA mappings
1874  * @sep: pointer to struct sep_device
1875  * @data_size:
1876  * @block_size:
1877  * @lli_table_ptr:
1878  * @num_entries_ptr:
1879  * @table_data_size_ptr:
1880  * @is_kva: set for kernel data (kernel crypt io call)
1881  *
1882  * This function prepares only input DMA table for synchronic symmetric
1883  * operations (HASH)
1884  * Note that all bus addresses that are passed to the SEP
1885  * are in 32 bit format; the SEP is a 32 bit device
1886  */
1887 static int sep_prepare_input_dma_table(struct sep_device *sep,
1888         unsigned long app_virt_addr,
1889         u32 data_size,
1890         u32 block_size,
1891         dma_addr_t *lli_table_ptr,
1892         u32 *num_entries_ptr,
1893         u32 *table_data_size_ptr,
1894         bool is_kva,
1895         void **dmatables_region,
1896         struct sep_dma_context *dma_ctx
1897 )
1898 {
1899         int error = 0;
1900         /* Pointer to the info entry of the table - the last entry */
1901         struct sep_lli_entry *info_entry_ptr;
1902         /* Array of pointers to page */
1903         struct sep_lli_entry *lli_array_ptr;
1904         /* Points to the first entry to be processed in the lli_in_array */
1905         u32 current_entry = 0;
1906         /* Num entries in the virtual buffer */
1907         u32 sep_lli_entries = 0;
1908         /* Lli table pointer */
1909         struct sep_lli_entry *in_lli_table_ptr;
1910         /* The total data in one table */
1911         u32 table_data_size = 0;
1912         /* Flag for last table */
1913         u32 last_table_flag = 0;
1914         /* Number of entries in lli table */
1915         u32 num_entries_in_table = 0;
1916         /* Next table address */
1917         void *lli_table_alloc_addr = NULL;
1918         void *dma_lli_table_alloc_addr = NULL;
1919         void *dma_in_lli_table_ptr = NULL;
1920
1921         dev_dbg(&sep->pdev->dev,
1922                 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1923                 current->pid, data_size);
1924
1925         dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1926                                         current->pid, block_size);
1927
1928         /* Initialize the pages pointers */
1929         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1930         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1931
1932         /* Set the kernel address for first table to be allocated */
1933         lli_table_alloc_addr = (void *)(sep->shared_addr +
1934                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1935                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1936                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1937
1938         if (data_size == 0) {
1939                 if (dmatables_region) {
1940                         error = sep_allocate_dmatables_region(sep,
1941                                                 dmatables_region,
1942                                                 dma_ctx,
1943                                                 1);
1944                         if (error)
1945                                 return error;
1946                 }
1947                 /* Special case  - create meptu table - 2 entries, zero data */
1948                 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1949                                 num_entries_ptr, table_data_size_ptr,
1950                                 dmatables_region, dma_ctx);
1951                 goto update_dcb_counter;
1952         }
1953
1954         /* Check if the pages are in Kernel Virtual Address layout */
1955         if (is_kva == true)
1956                 error = sep_lock_kernel_pages(sep, app_virt_addr,
1957                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1958                         dma_ctx);
1959         else
1960                 /*
1961                  * Lock the pages of the user buffer
1962                  * and translate them to pages
1963                  */
1964                 error = sep_lock_user_pages(sep, app_virt_addr,
1965                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1966                         dma_ctx);
1967
1968         if (error)
1969                 goto end_function;
1970
1971         dev_dbg(&sep->pdev->dev,
1972                 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1973                 current->pid,
1974                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1975
1976         current_entry = 0;
1977         info_entry_ptr = NULL;
1978
1979         sep_lli_entries =
1980                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1981
1982         dma_lli_table_alloc_addr = lli_table_alloc_addr;
1983         if (dmatables_region) {
1984                 error = sep_allocate_dmatables_region(sep,
1985                                         dmatables_region,
1986                                         dma_ctx,
1987                                         sep_lli_entries);
1988                 if (error)
1989                         return error;
1990                 lli_table_alloc_addr = *dmatables_region;
1991         }
1992
1993         /* Loop till all the entries in in array are processed */
1994         while (current_entry < sep_lli_entries) {
1995
1996                 /* Set the new input and output tables */
1997                 in_lli_table_ptr =
1998                         (struct sep_lli_entry *)lli_table_alloc_addr;
1999                 dma_in_lli_table_ptr =
2000                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2001
2002                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2003                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2004                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2005                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2006
2007                 if (dma_lli_table_alloc_addr >
2008                         ((void *)sep->shared_addr +
2009                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2010                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2011
2012                         error = -ENOMEM;
2013                         goto end_function_error;
2014
2015                 }
2016
2017                 /* Update the number of created tables */
2018                 dma_ctx->num_lli_tables_created++;
2019
2020                 /* Calculate the maximum size of data for input table */
2021                 table_data_size = sep_calculate_lli_table_max_size(sep,
2022                         &lli_array_ptr[current_entry],
2023                         (sep_lli_entries - current_entry),
2024                         &last_table_flag);
2025
2026                 /*
2027                  * If this is not the last table -
2028                  * then align it to the block size
2029                  */
2030                 if (!last_table_flag)
2031                         table_data_size =
2032                                 (table_data_size / block_size) * block_size;
2033
2034                 dev_dbg(&sep->pdev->dev,
2035                         "[PID%d] output table_data_size is (hex) %x\n",
2036                                 current->pid,
2037                                 table_data_size);
2038
2039                 /* Construct input lli table */
2040                 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2041                         in_lli_table_ptr,
2042                         &current_entry, &num_entries_in_table, table_data_size);
2043
2044                 if (info_entry_ptr == NULL) {
2045
2046                         /* Set the output parameters to physical addresses */
2047                         *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2048                                 dma_in_lli_table_ptr);
2049                         *num_entries_ptr = num_entries_in_table;
2050                         *table_data_size_ptr = table_data_size;
2051
2052                         dev_dbg(&sep->pdev->dev,
2053                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2054                                 current->pid,
2055                                 (unsigned long)*lli_table_ptr);
2056
2057                 } else {
2058                         /* Update the info entry of the previous in table */
2059                         info_entry_ptr->bus_address =
2060                                 sep_shared_area_virt_to_bus(sep,
2061                                                         dma_in_lli_table_ptr);
2062                         info_entry_ptr->block_size =
2063                                 ((num_entries_in_table) << 24) |
2064                                 (table_data_size);
2065                 }
2066                 /* Save the pointer to the info entry of the current tables */
2067                 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2068         }
2069         /* Print input tables */
2070         if (!dmatables_region) {
2071                 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2072                         sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2073                         *num_entries_ptr, *table_data_size_ptr);
2074         }
2075
2076         /* The array of the pages */
2077         kfree(lli_array_ptr);
2078
2079 update_dcb_counter:
2080         /* Update DCB counter */
2081         dma_ctx->nr_dcb_creat++;
2082         goto end_function;
2083
2084 end_function_error:
2085         /* Free all the allocated resources */
2086         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2087         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2088         kfree(lli_array_ptr);
2089         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2090         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2091
2092 end_function:
2093         return error;
2094
2095 }
2096
2097 /**
2098  * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2099  * @sep: pointer to struct sep_device
2100  * @lli_in_array:
2101  * @sep_in_lli_entries:
2102  * @lli_out_array:
2103  * @sep_out_lli_entries
2104  * @block_size
2105  * @lli_table_in_ptr
2106  * @lli_table_out_ptr
2107  * @in_num_entries_ptr
2108  * @out_num_entries_ptr
2109  * @table_data_size_ptr
2110  *
2111  * This function creates the input and output DMA tables for
2112  * symmetric operations (AES/DES) according to the block
2113  * size from LLI arays
2114  * Note that all bus addresses that are passed to the SEP
2115  * are in 32 bit format; the SEP is a 32 bit device
2116  */
2117 static int sep_construct_dma_tables_from_lli(
2118         struct sep_device *sep,
2119         struct sep_lli_entry *lli_in_array,
2120         u32     sep_in_lli_entries,
2121         struct sep_lli_entry *lli_out_array,
2122         u32     sep_out_lli_entries,
2123         u32     block_size,
2124         dma_addr_t *lli_table_in_ptr,
2125         dma_addr_t *lli_table_out_ptr,
2126         u32     *in_num_entries_ptr,
2127         u32     *out_num_entries_ptr,
2128         u32     *table_data_size_ptr,
2129         void    **dmatables_region,
2130         struct sep_dma_context *dma_ctx)
2131 {
2132         /* Points to the area where next lli table can be allocated */
2133         void *lli_table_alloc_addr = NULL;
2134         /*
2135          * Points to the area in shared region where next lli table
2136          * can be allocated
2137          */
2138         void *dma_lli_table_alloc_addr = NULL;
2139         /* Input lli table in dmatables_region or shared region */
2140         struct sep_lli_entry *in_lli_table_ptr = NULL;
2141         /* Input lli table location in the shared region */
2142         struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2143         /* Output lli table in dmatables_region or shared region */
2144         struct sep_lli_entry *out_lli_table_ptr = NULL;
2145         /* Output lli table location in the shared region */
2146         struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2147         /* Pointer to the info entry of the table - the last entry */
2148         struct sep_lli_entry *info_in_entry_ptr = NULL;
2149         /* Pointer to the info entry of the table - the last entry */
2150         struct sep_lli_entry *info_out_entry_ptr = NULL;
2151         /* Points to the first entry to be processed in the lli_in_array */
2152         u32 current_in_entry = 0;
2153         /* Points to the first entry to be processed in the lli_out_array */
2154         u32 current_out_entry = 0;
2155         /* Max size of the input table */
2156         u32 in_table_data_size = 0;
2157         /* Max size of the output table */
2158         u32 out_table_data_size = 0;
2159         /* Flag te signifies if this is the last tables build */
2160         u32 last_table_flag = 0;
2161         /* The data size that should be in table */
2162         u32 table_data_size = 0;
2163         /* Number of entries in the input table */
2164         u32 num_entries_in_table = 0;
2165         /* Number of entries in the output table */
2166         u32 num_entries_out_table = 0;
2167
2168         if (!dma_ctx) {
2169                 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2170                 return -EINVAL;
2171         }
2172
2173         /* Initiate to point after the message area */
2174         lli_table_alloc_addr = (void *)(sep->shared_addr +
2175                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2176                 (dma_ctx->num_lli_tables_created *
2177                 (sizeof(struct sep_lli_entry) *
2178                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2179         dma_lli_table_alloc_addr = lli_table_alloc_addr;
2180
2181         if (dmatables_region) {
2182                 /* 2 for both in+out table */
2183                 if (sep_allocate_dmatables_region(sep,
2184                                         dmatables_region,
2185                                         dma_ctx,
2186                                         2*sep_in_lli_entries))
2187                         return -ENOMEM;
2188                 lli_table_alloc_addr = *dmatables_region;
2189         }
2190
2191         /* Loop till all the entries in in array are not processed */
2192         while (current_in_entry < sep_in_lli_entries) {
2193                 /* Set the new input and output tables */
2194                 in_lli_table_ptr =
2195                         (struct sep_lli_entry *)lli_table_alloc_addr;
2196                 dma_in_lli_table_ptr =
2197                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2198
2199                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2200                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2201                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2202                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2203
2204                 /* Set the first output tables */
2205                 out_lli_table_ptr =
2206                         (struct sep_lli_entry *)lli_table_alloc_addr;
2207                 dma_out_lli_table_ptr =
2208                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2209
2210                 /* Check if the DMA table area limit was overrun */
2211                 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2212                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2213                         ((void *)sep->shared_addr +
2214                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2215                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2216
2217                         dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2218                         return -ENOMEM;
2219                 }
2220
2221                 /* Update the number of the lli tables created */
2222                 dma_ctx->num_lli_tables_created += 2;
2223
2224                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2225                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2226                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2227                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2228
2229                 /* Calculate the maximum size of data for input table */
2230                 in_table_data_size =
2231                         sep_calculate_lli_table_max_size(sep,
2232                         &lli_in_array[current_in_entry],
2233                         (sep_in_lli_entries - current_in_entry),
2234                         &last_table_flag);
2235
2236                 /* Calculate the maximum size of data for output table */
2237                 out_table_data_size =
2238                         sep_calculate_lli_table_max_size(sep,
2239                         &lli_out_array[current_out_entry],
2240                         (sep_out_lli_entries - current_out_entry),
2241                         &last_table_flag);
2242
2243                 if (!last_table_flag) {
2244                         in_table_data_size = (in_table_data_size /
2245                                 block_size) * block_size;
2246                         out_table_data_size = (out_table_data_size /
2247                                 block_size) * block_size;
2248                 }
2249
2250                 table_data_size = in_table_data_size;
2251                 if (table_data_size > out_table_data_size)
2252                         table_data_size = out_table_data_size;
2253
2254                 dev_dbg(&sep->pdev->dev,
2255                         "[PID%d] construct tables from lli"
2256                         " in_table_data_size is (hex) %x\n", current->pid,
2257                         in_table_data_size);
2258
2259                 dev_dbg(&sep->pdev->dev,
2260                         "[PID%d] construct tables from lli"
2261                         "out_table_data_size is (hex) %x\n", current->pid,
2262                         out_table_data_size);
2263
2264                 /* Construct input lli table */
2265                 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2266                         in_lli_table_ptr,
2267                         &current_in_entry,
2268                         &num_entries_in_table,
2269                         table_data_size);
2270
2271                 /* Construct output lli table */
2272                 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2273                         out_lli_table_ptr,
2274                         &current_out_entry,
2275                         &num_entries_out_table,
2276                         table_data_size);
2277
2278                 /* If info entry is null - this is the first table built */
2279                 if (info_in_entry_ptr == NULL) {
2280                         /* Set the output parameters to physical addresses */
2281                         *lli_table_in_ptr =
2282                         sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2283
2284                         *in_num_entries_ptr = num_entries_in_table;
2285
2286                         *lli_table_out_ptr =
2287                                 sep_shared_area_virt_to_bus(sep,
2288                                 dma_out_lli_table_ptr);
2289
2290                         *out_num_entries_ptr = num_entries_out_table;
2291                         *table_data_size_ptr = table_data_size;
2292
2293                         dev_dbg(&sep->pdev->dev,
2294                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2295                                 current->pid,
2296                                 (unsigned long)*lli_table_in_ptr);
2297                         dev_dbg(&sep->pdev->dev,
2298                                 "[PID%d] output lli_table_out_ptr is %08lx\n",
2299                                 current->pid,
2300                                 (unsigned long)*lli_table_out_ptr);
2301                 } else {
2302                         /* Update the info entry of the previous in table */
2303                         info_in_entry_ptr->bus_address =
2304                                 sep_shared_area_virt_to_bus(sep,
2305                                 dma_in_lli_table_ptr);
2306
2307                         info_in_entry_ptr->block_size =
2308                                 ((num_entries_in_table) << 24) |
2309                                 (table_data_size);
2310
2311                         /* Update the info entry of the previous in table */
2312                         info_out_entry_ptr->bus_address =
2313                                 sep_shared_area_virt_to_bus(sep,
2314                                 dma_out_lli_table_ptr);
2315
2316                         info_out_entry_ptr->block_size =
2317                                 ((num_entries_out_table) << 24) |
2318                                 (table_data_size);
2319
2320                         dev_dbg(&sep->pdev->dev,
2321                                 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2322                                 current->pid,
2323                                 (unsigned long)info_in_entry_ptr->bus_address,
2324                                 info_in_entry_ptr->block_size);
2325
2326                         dev_dbg(&sep->pdev->dev,
2327                                 "[PID%d] output lli_table_out_ptr:"
2328                                 "%08lx  %08x\n",
2329                                 current->pid,
2330                                 (unsigned long)info_out_entry_ptr->bus_address,
2331                                 info_out_entry_ptr->block_size);
2332                 }
2333
2334                 /* Save the pointer to the info entry of the current tables */
2335                 info_in_entry_ptr = in_lli_table_ptr +
2336                         num_entries_in_table - 1;
2337                 info_out_entry_ptr = out_lli_table_ptr +
2338                         num_entries_out_table - 1;
2339
2340                 dev_dbg(&sep->pdev->dev,
2341                         "[PID%d] output num_entries_out_table is %x\n",
2342                         current->pid,
2343                         (u32)num_entries_out_table);
2344                 dev_dbg(&sep->pdev->dev,
2345                         "[PID%d] output info_in_entry_ptr is %lx\n",
2346                         current->pid,
2347                         (unsigned long)info_in_entry_ptr);
2348                 dev_dbg(&sep->pdev->dev,
2349                         "[PID%d] output info_out_entry_ptr is %lx\n",
2350                         current->pid,
2351                         (unsigned long)info_out_entry_ptr);
2352         }
2353
2354         /* Print input tables */
2355         if (!dmatables_region) {
2356                 sep_debug_print_lli_tables(
2357                         sep,
2358                         (struct sep_lli_entry *)
2359                         sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2360                         *in_num_entries_ptr,
2361                         *table_data_size_ptr);
2362         }
2363
2364         /* Print output tables */
2365         if (!dmatables_region) {
2366                 sep_debug_print_lli_tables(
2367                         sep,
2368                         (struct sep_lli_entry *)
2369                         sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2370                         *out_num_entries_ptr,
2371                         *table_data_size_ptr);
2372         }
2373
2374         return 0;
2375 }
2376
2377 /**
2378  * sep_prepare_input_output_dma_table - prepare DMA I/O table
2379  * @app_virt_in_addr:
2380  * @app_virt_out_addr:
2381  * @data_size:
2382  * @block_size:
2383  * @lli_table_in_ptr:
2384  * @lli_table_out_ptr:
2385  * @in_num_entries_ptr:
2386  * @out_num_entries_ptr:
2387  * @table_data_size_ptr:
2388  * @is_kva: set for kernel data; used only for kernel crypto module
2389  *
2390  * This function builds input and output DMA tables for synchronic
2391  * symmetric operations (AES, DES, HASH). It also checks that each table
2392  * is of the modular block size
2393  * Note that all bus addresses that are passed to the SEP
2394  * are in 32 bit format; the SEP is a 32 bit device
2395  */
2396 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2397         unsigned long app_virt_in_addr,
2398         unsigned long app_virt_out_addr,
2399         u32 data_size,
2400         u32 block_size,
2401         dma_addr_t *lli_table_in_ptr,
2402         dma_addr_t *lli_table_out_ptr,
2403         u32 *in_num_entries_ptr,
2404         u32 *out_num_entries_ptr,
2405         u32 *table_data_size_ptr,
2406         bool is_kva,
2407         void **dmatables_region,
2408         struct sep_dma_context *dma_ctx)
2409
2410 {
2411         int error = 0;
2412         /* Array of pointers of page */
2413         struct sep_lli_entry *lli_in_array;
2414         /* Array of pointers of page */
2415         struct sep_lli_entry *lli_out_array;
2416
2417         if (!dma_ctx) {
2418                 error = -EINVAL;
2419                 goto end_function;
2420         }
2421
2422         if (data_size == 0) {
2423                 /* Prepare empty table for input and output */
2424                 if (dmatables_region) {
2425                         error = sep_allocate_dmatables_region(
2426                                         sep,
2427                                         dmatables_region,
2428                                         dma_ctx,
2429                                         2);
2430                   if (error)
2431                         goto end_function;
2432                 }
2433                 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2434                         in_num_entries_ptr, table_data_size_ptr,
2435                         dmatables_region, dma_ctx);
2436
2437                 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2438                         out_num_entries_ptr, table_data_size_ptr,
2439                         dmatables_region, dma_ctx);
2440
2441                 goto update_dcb_counter;
2442         }
2443
2444         /* Initialize the pages pointers */
2445         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2446         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2447
2448         /* Lock the pages of the buffer and translate them to pages */
2449         if (is_kva == true) {
2450                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2451                                                 current->pid);
2452                 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2453                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2454                                 dma_ctx);
2455                 if (error) {
2456                         dev_warn(&sep->pdev->dev,
2457                                 "[PID%d] sep_lock_kernel_pages for input "
2458                                 "virtual buffer failed\n", current->pid);
2459
2460                         goto end_function;
2461                 }
2462
2463                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2464                                                 current->pid);
2465                 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2466                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2467                                 dma_ctx);
2468
2469                 if (error) {
2470                         dev_warn(&sep->pdev->dev,
2471                                 "[PID%d] sep_lock_kernel_pages for output "
2472                                 "virtual buffer failed\n", current->pid);
2473
2474                         goto end_function_free_lli_in;
2475                 }
2476
2477         }
2478
2479         else {
2480                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2481                                                 current->pid);
2482                 error = sep_lock_user_pages(sep, app_virt_in_addr,
2483                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2484                                 dma_ctx);
2485                 if (error) {
2486                         dev_warn(&sep->pdev->dev,
2487                                 "[PID%d] sep_lock_user_pages for input "
2488                                 "virtual buffer failed\n", current->pid);
2489
2490                         goto end_function;
2491                 }
2492
2493                 if (dma_ctx->secure_dma == true) {
2494                         /* secure_dma requires use of non accessible memory */
2495                         dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2496                                 current->pid);
2497                         error = sep_lli_table_secure_dma(sep,
2498                                 app_virt_out_addr, data_size, &lli_out_array,
2499                                 SEP_DRIVER_OUT_FLAG, dma_ctx);
2500                         if (error) {
2501                                 dev_warn(&sep->pdev->dev,
2502                                         "[PID%d] secure dma table setup "
2503                                         " for output virtual buffer failed\n",
2504                                         current->pid);
2505
2506                                 goto end_function_free_lli_in;
2507                         }
2508                 } else {
2509                         /* For normal, non-secure dma */
2510                         dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2511                                 current->pid);
2512
2513                         dev_dbg(&sep->pdev->dev,
2514                                 "[PID%d] Locking user output pages\n",
2515                                 current->pid);
2516
2517                         error = sep_lock_user_pages(sep, app_virt_out_addr,
2518                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2519                                 dma_ctx);
2520
2521                         if (error) {
2522                                 dev_warn(&sep->pdev->dev,
2523                                         "[PID%d] sep_lock_user_pages"
2524                                         " for output virtual buffer failed\n",
2525                                         current->pid);
2526
2527                                 goto end_function_free_lli_in;
2528                         }
2529                 }
2530         }
2531
2532         dev_dbg(&sep->pdev->dev,
2533                 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2534                 current->pid,
2535                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2536
2537         dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2538                 current->pid,
2539                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2540
2541         dev_dbg(&sep->pdev->dev,
2542                 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2543                 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2544
2545         /* Call the function that creates table from the lli arrays */
2546         dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2547                                         current->pid);
2548         error = sep_construct_dma_tables_from_lli(
2549                         sep, lli_in_array,
2550                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2551                                                                 in_num_pages,
2552                         lli_out_array,
2553                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2554                                                                 out_num_pages,
2555                         block_size, lli_table_in_ptr, lli_table_out_ptr,
2556                         in_num_entries_ptr, out_num_entries_ptr,
2557                         table_data_size_ptr, dmatables_region, dma_ctx);
2558
2559         if (error) {
2560                 dev_warn(&sep->pdev->dev,
2561                         "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2562                         current->pid);
2563                 goto end_function_with_error;
2564         }
2565
2566         kfree(lli_out_array);
2567         kfree(lli_in_array);
2568
2569 update_dcb_counter:
2570         /* Update DCB counter */
2571         dma_ctx->nr_dcb_creat++;
2572
2573         goto end_function;
2574
2575 end_function_with_error:
2576         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2577         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2578         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2579         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2580         kfree(lli_out_array);
2581
2582
2583 end_function_free_lli_in:
2584         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2585         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2586         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2587         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2588         kfree(lli_in_array);
2589
2590 end_function:
2591
2592         return error;
2593
2594 }
2595
2596 /**
2597  * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2598  * @app_in_address: unsigned long; for data buffer in (user space)
2599  * @app_out_address: unsigned long; for data buffer out (user space)
2600  * @data_in_size: u32; for size of data
2601  * @block_size: u32; for block size
2602  * @tail_block_size: u32; for size of tail block
2603  * @isapplet: bool; to indicate external app
2604  * @is_kva: bool; kernel buffer; only used for kernel crypto module
2605  * @secure_dma; indicates whether this is secure_dma using IMR
2606  *
2607  * This function prepares the linked DMA tables and puts the
2608  * address for the linked list of tables inta a DCB (data control
2609  * block) the address of which is known by the SEP hardware
2610  * Note that all bus addresses that are passed to the SEP
2611  * are in 32 bit format; the SEP is a 32 bit device
2612  */
2613 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2614         unsigned long  app_in_address,
2615         unsigned long  app_out_address,
2616         u32  data_in_size,
2617         u32  block_size,
2618         u32  tail_block_size,
2619         bool isapplet,
2620         bool    is_kva,
2621         bool    secure_dma,
2622         struct sep_dcblock *dcb_region,
2623         void **dmatables_region,
2624         struct sep_dma_context **dma_ctx,
2625         struct scatterlist *src_sg,
2626         struct scatterlist *dst_sg)
2627 {
2628         int error = 0;
2629         /* Size of tail */
2630         u32 tail_size = 0;
2631         /* Address of the created DCB table */
2632         struct sep_dcblock *dcb_table_ptr = NULL;
2633         /* The physical address of the first input DMA table */
2634         dma_addr_t in_first_mlli_address = 0;
2635         /* Number of entries in the first input DMA table */
2636         u32  in_first_num_entries = 0;
2637         /* The physical address of the first output DMA table */
2638         dma_addr_t  out_first_mlli_address = 0;
2639         /* Number of entries in the first output DMA table */
2640         u32  out_first_num_entries = 0;
2641         /* Data in the first input/output table */
2642         u32  first_data_size = 0;
2643
2644         dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2645                 current->pid, app_in_address);
2646
2647         dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2648                 current->pid, app_out_address);
2649
2650         dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2651                 current->pid, data_in_size);
2652
2653         dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2654                 current->pid, block_size);
2655
2656         dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2657                 current->pid, tail_block_size);
2658
2659         dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2660                 current->pid, isapplet);
2661
2662         dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2663                 current->pid, is_kva);
2664
2665         dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2666                 current->pid, src_sg);
2667
2668         dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2669                 current->pid, dst_sg);
2670
2671         if (!dma_ctx) {
2672                 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2673                                                 current->pid);
2674                 error = -EINVAL;
2675                 goto end_function;
2676         }
2677
2678         if (*dma_ctx) {
2679                 /* In case there are multiple DCBs for this transaction */
2680                 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2681                                                 current->pid);
2682         } else {
2683                 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2684                 if (!(*dma_ctx)) {
2685                         dev_dbg(&sep->pdev->dev,
2686                                 "[PID%d] Not enough memory for DMA context\n",
2687                                 current->pid);
2688                   error = -ENOMEM;
2689                   goto end_function;
2690                 }
2691                 dev_dbg(&sep->pdev->dev,
2692                         "[PID%d] Created DMA context addr at 0x%p\n",
2693                         current->pid, *dma_ctx);
2694         }
2695
2696         (*dma_ctx)->secure_dma = secure_dma;
2697
2698         /* these are for kernel crypto only */
2699         (*dma_ctx)->src_sg = src_sg;
2700         (*dma_ctx)->dst_sg = dst_sg;
2701
2702         if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2703                 /* No more DCBs to allocate */
2704                 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2705                                                 current->pid);
2706                 error = -ENOSPC;
2707                 goto end_function_error;
2708         }
2709
2710         /* Allocate new DCB */
2711         if (dcb_region) {
2712                 dcb_table_ptr = dcb_region;
2713         } else {
2714                 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2715                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2716                         ((*dma_ctx)->nr_dcb_creat *
2717                                                 sizeof(struct sep_dcblock)));
2718         }
2719
2720         /* Set the default values in the DCB */
2721         dcb_table_ptr->input_mlli_address = 0;
2722         dcb_table_ptr->input_mlli_num_entries = 0;
2723         dcb_table_ptr->input_mlli_data_size = 0;
2724         dcb_table_ptr->output_mlli_address = 0;
2725         dcb_table_ptr->output_mlli_num_entries = 0;
2726         dcb_table_ptr->output_mlli_data_size = 0;
2727         dcb_table_ptr->tail_data_size = 0;
2728         dcb_table_ptr->out_vr_tail_pt = 0;
2729
2730         if (isapplet == true) {
2731
2732                 /* Check if there is enough data for DMA operation */
2733                 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2734                         if (is_kva == true) {
2735                                 error = -ENODEV;
2736                                 goto end_function_error;
2737                         } else {
2738                                 if (copy_from_user(dcb_table_ptr->tail_data,
2739                                         (void __user *)app_in_address,
2740                                         data_in_size)) {
2741                                         error = -EFAULT;
2742                                         goto end_function_error;
2743                                 }
2744                         }
2745
2746                         dcb_table_ptr->tail_data_size = data_in_size;
2747
2748                         /* Set the output user-space address for mem2mem op */
2749                         if (app_out_address)
2750                                 dcb_table_ptr->out_vr_tail_pt =
2751                                 (aligned_u64)app_out_address;
2752
2753                         /*
2754                          * Update both data length parameters in order to avoid
2755                          * second data copy and allow building of empty mlli
2756                          * tables
2757                          */
2758                         tail_size = 0x0;
2759                         data_in_size = 0x0;
2760
2761                 } else {
2762                         if (!app_out_address) {
2763                                 tail_size = data_in_size % block_size;
2764                                 if (!tail_size) {
2765                                         if (tail_block_size == block_size)
2766                                                 tail_size = block_size;
2767                                 }
2768                         } else {
2769                                 tail_size = 0;
2770                         }
2771                 }
2772                 if (tail_size) {
2773                         if (tail_size > sizeof(dcb_table_ptr->tail_data))
2774                                 return -EINVAL;
2775                         if (is_kva == true) {
2776                                 error = -ENODEV;
2777                                 goto end_function_error;
2778                         } else {
2779                                 /* We have tail data - copy it to DCB */
2780                                 if (copy_from_user(dcb_table_ptr->tail_data,
2781                                         (void __user *)(app_in_address +
2782                                         data_in_size - tail_size), tail_size)) {
2783                                         error = -EFAULT;
2784                                         goto end_function_error;
2785                                 }
2786                         }
2787                         if (app_out_address)
2788                                 /*
2789                                  * Calculate the output address
2790                                  * according to tail data size
2791                                  */
2792                                 dcb_table_ptr->out_vr_tail_pt =
2793                                         (aligned_u64)app_out_address +
2794                                         data_in_size - tail_size;
2795
2796                         /* Save the real tail data size */
2797                         dcb_table_ptr->tail_data_size = tail_size;
2798                         /*
2799                          * Update the data size without the tail
2800                          * data size AKA data for the dma
2801                          */
2802                         data_in_size = (data_in_size - tail_size);
2803                 }
2804         }
2805         /* Check if we need to build only input table or input/output */
2806         if (app_out_address) {
2807                 /* Prepare input/output tables */
2808                 error = sep_prepare_input_output_dma_table(sep,
2809                                 app_in_address,
2810                                 app_out_address,
2811                                 data_in_size,
2812                                 block_size,
2813                                 &in_first_mlli_address,
2814                                 &out_first_mlli_address,
2815                                 &in_first_num_entries,
2816                                 &out_first_num_entries,
2817                                 &first_data_size,
2818                                 is_kva,
2819                                 dmatables_region,
2820                                 *dma_ctx);
2821         } else {
2822                 /* Prepare input tables */
2823                 error = sep_prepare_input_dma_table(sep,
2824                                 app_in_address,
2825                                 data_in_size,
2826                                 block_size,
2827                                 &in_first_mlli_address,
2828                                 &in_first_num_entries,
2829                                 &first_data_size,
2830                                 is_kva,
2831                                 dmatables_region,
2832                                 *dma_ctx);
2833         }
2834
2835         if (error) {
2836                 dev_warn(&sep->pdev->dev,
2837                         "prepare DMA table call failed "
2838                         "from prepare DCB call\n");
2839                 goto end_function_error;
2840         }
2841
2842         /* Set the DCB values */
2843         dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2844         dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2845         dcb_table_ptr->input_mlli_data_size = first_data_size;
2846         dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2847         dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2848         dcb_table_ptr->output_mlli_data_size = first_data_size;
2849
2850         goto end_function;
2851
2852 end_function_error:
2853         kfree(*dma_ctx);
2854         *dma_ctx = NULL;
2855
2856 end_function:
2857         return error;
2858
2859 }
2860
2861
2862 /**
2863  * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2864  * @sep: pointer to struct sep_device
2865  * @isapplet: indicates external application (used for kernel access)
2866  * @is_kva: indicates kernel addresses (only used for kernel crypto)
2867  *
2868  * This function frees the DMA tables and DCB
2869  */
2870 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2871         bool is_kva, struct sep_dma_context **dma_ctx)
2872 {
2873         struct sep_dcblock *dcb_table_ptr;
2874         unsigned long pt_hold;
2875         void *tail_pt;
2876
2877         int i = 0;
2878         int error = 0;
2879         int error_temp = 0;
2880
2881         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2882                                         current->pid);
2883
2884         if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2885                 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2886                         current->pid);
2887
2888                 /* Tail stuff is only for non secure_dma */
2889                 /* Set pointer to first DCB table */
2890                 dcb_table_ptr = (struct sep_dcblock *)
2891                         (sep->shared_addr +
2892                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2893
2894                 /**
2895                  * Go over each DCB and see if
2896                  * tail pointer must be updated
2897                  */
2898                 for (i = 0; dma_ctx && *dma_ctx &&
2899                         i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2900                         if (dcb_table_ptr->out_vr_tail_pt) {
2901                                 pt_hold = (unsigned long)dcb_table_ptr->
2902                                         out_vr_tail_pt;
2903                                 tail_pt = (void *)pt_hold;
2904                                 if (is_kva == true) {
2905                                         error = -ENODEV;
2906                                         break;
2907                                 } else {
2908                                         error_temp = copy_to_user(
2909                                                 (void __user *)tail_pt,
2910                                                 dcb_table_ptr->tail_data,
2911                                                 dcb_table_ptr->tail_data_size);
2912                                 }
2913                                 if (error_temp) {
2914                                         /* Release the DMA resource */
2915                                         error = -EFAULT;
2916                                         break;
2917                                 }
2918                         }
2919                 }
2920         }
2921
2922         /* Free the output pages, if any */
2923         sep_free_dma_table_data_handler(sep, dma_ctx);
2924
2925         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2926                                         current->pid);
2927
2928         return error;
2929 }
2930
2931 /**
2932  * sep_prepare_dcb_handler - prepare a control block
2933  * @sep: pointer to struct sep_device
2934  * @arg: pointer to user parameters
2935  * @secure_dma: indicate whether we are using secure_dma on IMR
2936  *
2937  * This function will retrieve the RAR buffer physical addresses, type
2938  * & size corresponding to the RAR handles provided in the buffers vector.
2939  */
2940 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2941                                    bool secure_dma,
2942                                    struct sep_dma_context **dma_ctx)
2943 {
2944         int error;
2945         /* Command arguments */
2946         static struct build_dcb_struct command_args;
2947
2948         /* Get the command arguments */
2949         if (copy_from_user(&command_args, (void __user *)arg,
2950                                         sizeof(struct build_dcb_struct))) {
2951                 error = -EFAULT;
2952                 goto end_function;
2953         }
2954
2955         dev_dbg(&sep->pdev->dev,
2956                 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2957                         current->pid, command_args.app_in_address);
2958         dev_dbg(&sep->pdev->dev,
2959                 "[PID%d] app_out_address is %08llx\n",
2960                         current->pid, command_args.app_out_address);
2961         dev_dbg(&sep->pdev->dev,
2962                 "[PID%d] data_size is %x\n",
2963                         current->pid, command_args.data_in_size);
2964         dev_dbg(&sep->pdev->dev,
2965                 "[PID%d] block_size is %x\n",
2966                         current->pid, command_args.block_size);
2967         dev_dbg(&sep->pdev->dev,
2968                 "[PID%d] tail block_size is %x\n",
2969                         current->pid, command_args.tail_block_size);
2970         dev_dbg(&sep->pdev->dev,
2971                 "[PID%d] is_applet is %x\n",
2972                         current->pid, command_args.is_applet);
2973
2974         if (!command_args.app_in_address) {
2975                 dev_warn(&sep->pdev->dev,
2976                         "[PID%d] null app_in_address\n", current->pid);
2977                 error = -EINVAL;
2978                 goto end_function;
2979         }
2980
2981         error = sep_prepare_input_output_dma_table_in_dcb(sep,
2982                         (unsigned long)command_args.app_in_address,
2983                         (unsigned long)command_args.app_out_address,
2984                         command_args.data_in_size, command_args.block_size,
2985                         command_args.tail_block_size,
2986                         command_args.is_applet, false,
2987                         secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2988
2989 end_function:
2990         return error;
2991
2992 }
2993
2994 /**
2995  * sep_free_dcb_handler - free control block resources
2996  * @sep: pointer to struct sep_device
2997  *
2998  * This function frees the DCB resources and updates the needed
2999  * user-space buffers.
3000  */
3001 static int sep_free_dcb_handler(struct sep_device *sep,
3002                                 struct sep_dma_context **dma_ctx)
3003 {
3004         if (!dma_ctx || !(*dma_ctx)) {
3005                 dev_dbg(&sep->pdev->dev,
3006                         "[PID%d] no dma context defined, nothing to free\n",
3007                         current->pid);
3008                 return -EINVAL;
3009         }
3010
3011         dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3012                 current->pid,
3013                 (*dma_ctx)->nr_dcb_creat);
3014
3015         return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3016 }
3017
3018 /**
3019  * sep_ioctl - ioctl handler for sep device
3020  * @filp: pointer to struct file
3021  * @cmd: command
3022  * @arg: pointer to argument structure
3023  *
3024  * Implement the ioctl methods available on the SEP device.
3025  */
3026 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3027 {
3028         struct sep_private_data * const private_data = filp->private_data;
3029         struct sep_call_status *call_status = &private_data->call_status;
3030         struct sep_device *sep = private_data->device;
3031         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3032         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3033         int error = 0;
3034
3035         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3036                 current->pid, cmd);
3037         dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3038                 current->pid, *dma_ctx);
3039
3040         /* Make sure we own this device */
3041         error = sep_check_transaction_owner(sep);
3042         if (error) {
3043                 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3044                         current->pid);
3045                 goto end_function;
3046         }
3047
3048         /* Check that sep_mmap has been called before */
3049         if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3050                                 &call_status->status)) {
3051                 dev_dbg(&sep->pdev->dev,
3052                         "[PID%d] mmap not called\n", current->pid);
3053                 error = -EPROTO;
3054                 goto end_function;
3055         }
3056
3057         /* Check that the command is for SEP device */
3058         if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3059                 error = -ENOTTY;
3060                 goto end_function;
3061         }
3062
3063         switch (cmd) {
3064         case SEP_IOCSENDSEPCOMMAND:
3065                 dev_dbg(&sep->pdev->dev,
3066                         "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3067                         current->pid);
3068                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3069                                   &call_status->status)) {
3070                         dev_warn(&sep->pdev->dev,
3071                                 "[PID%d] send msg already done\n",
3072                                 current->pid);
3073                         error = -EPROTO;
3074                         goto end_function;
3075                 }
3076                 /* Send command to SEP */
3077                 error = sep_send_command_handler(sep);
3078                 if (!error)
3079                         set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3080                                 &call_status->status);
3081                 dev_dbg(&sep->pdev->dev,
3082                         "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3083                         current->pid);
3084                 break;
3085         case SEP_IOCENDTRANSACTION:
3086                 dev_dbg(&sep->pdev->dev,
3087                         "[PID%d] SEP_IOCENDTRANSACTION start\n",
3088                         current->pid);
3089                 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3090                                                     my_queue_elem);
3091                 dev_dbg(&sep->pdev->dev,
3092                         "[PID%d] SEP_IOCENDTRANSACTION end\n",
3093                         current->pid);
3094                 break;
3095         case SEP_IOCPREPAREDCB:
3096                 dev_dbg(&sep->pdev->dev,
3097                         "[PID%d] SEP_IOCPREPAREDCB start\n",
3098                         current->pid);
3099         case SEP_IOCPREPAREDCB_SECURE_DMA:
3100                 dev_dbg(&sep->pdev->dev,
3101                         "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3102                         current->pid);
3103                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3104                                   &call_status->status)) {
3105                         dev_dbg(&sep->pdev->dev,
3106                                 "[PID%d] dcb prep needed before send msg\n",
3107                                 current->pid);
3108                         error = -EPROTO;
3109                         goto end_function;
3110                 }
3111
3112                 if (!arg) {
3113                         dev_dbg(&sep->pdev->dev,
3114                                 "[PID%d] dcb null arg\n", current->pid);
3115                         error = -EINVAL;
3116                         goto end_function;
3117                 }
3118
3119                 if (cmd == SEP_IOCPREPAREDCB) {
3120                         /* No secure dma */
3121                         dev_dbg(&sep->pdev->dev,
3122                                 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3123                                 current->pid);
3124
3125                         error = sep_prepare_dcb_handler(sep, arg, false,
3126                                 dma_ctx);
3127                 } else {
3128                         /* Secure dma */
3129                         dev_dbg(&sep->pdev->dev,
3130                                 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3131                                 current->pid);
3132
3133                         error = sep_prepare_dcb_handler(sep, arg, true,
3134                                 dma_ctx);
3135                 }
3136                 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3137                         current->pid);
3138                 break;
3139         case SEP_IOCFREEDCB:
3140                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3141                         current->pid);
3142         case SEP_IOCFREEDCB_SECURE_DMA:
3143                 dev_dbg(&sep->pdev->dev,
3144                         "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3145                         current->pid);
3146                 error = sep_free_dcb_handler(sep, dma_ctx);
3147                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3148                         current->pid);
3149                 break;
3150         default:
3151                 error = -ENOTTY;
3152                 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3153                         current->pid);
3154                 break;
3155         }
3156
3157 end_function:
3158         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3159
3160         return error;
3161 }
3162
3163 /**
3164  * sep_inthandler - interrupt handler for sep device
3165  * @irq: interrupt
3166  * @dev_id: device id
3167  */
3168 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3169 {
3170         unsigned long lock_irq_flag;
3171         u32 reg_val, reg_val2 = 0;
3172         struct sep_device *sep = dev_id;
3173         irqreturn_t int_error = IRQ_HANDLED;
3174
3175         /* Are we in power save? */
3176 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3177         if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3178                 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3179                 return IRQ_NONE;
3180         }
3181 #endif
3182
3183         if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3184                 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3185                 return IRQ_NONE;
3186         }
3187
3188         /* Read the IRR register to check if this is SEP interrupt */
3189         reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3190
3191         dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3192
3193         if (reg_val & (0x1 << 13)) {
3194
3195                 /* Lock and update the counter of reply messages */
3196                 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3197                 sep->reply_ct++;
3198                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3199
3200                 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3201                                         sep->send_ct, sep->reply_ct);
3202
3203                 /* Is this a kernel client request */
3204                 if (sep->in_kernel) {
3205                         tasklet_schedule(&sep->finish_tasklet);
3206                         goto finished_interrupt;
3207                 }
3208
3209                 /* Is this printf or daemon request? */
3210                 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3211                 dev_dbg(&sep->pdev->dev,
3212                         "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3213
3214                 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3215
3216                 if ((reg_val2 >> 30) & 0x1) {
3217                         dev_dbg(&sep->pdev->dev, "int: printf request\n");
3218                 } else if (reg_val2 >> 31) {
3219                         dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3220                 } else {
3221                         dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3222                         wake_up(&sep->event_interrupt);
3223                 }
3224         } else {
3225                 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3226                 int_error = IRQ_NONE;
3227         }
3228
3229 finished_interrupt:
3230
3231         if (int_error == IRQ_HANDLED)
3232                 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3233
3234         return int_error;
3235 }
3236
3237 /**
3238  * sep_reconfig_shared_area - reconfigure shared area
3239  * @sep: pointer to struct sep_device
3240  *
3241  * Reconfig the shared area between HOST and SEP - needed in case
3242  * the DX_CC_Init function was called before OS loading.
3243  */
3244 static int sep_reconfig_shared_area(struct sep_device *sep)
3245 {
3246         int ret_val;
3247
3248         /* use to limit waiting for SEP */
3249         unsigned long end_time;
3250
3251         /* Send the new SHARED MESSAGE AREA to the SEP */
3252         dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3253                                 (unsigned long long)sep->shared_bus);
3254
3255         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3256
3257         /* Poll for SEP response */
3258         ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3259
3260         end_time = jiffies + (WAIT_TIME * HZ);
3261
3262         while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3263                 (ret_val != sep->shared_bus))
3264                 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3265
3266         /* Check the return value (register) */
3267         if (ret_val != sep->shared_bus) {
3268                 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3269                 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3270                 ret_val = -ENOMEM;
3271         } else
3272                 ret_val = 0;
3273
3274         dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3275
3276         return ret_val;
3277 }
3278
3279 /**
3280  *      sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3281  *                                              contexts into use
3282  *      @sep: SEP device
3283  *      @dcb_region: DCB region copy
3284  *      @dmatables_region: MLLI/DMA tables copy
3285  *      @dma_ctx: DMA context for current transaction
3286  */
3287 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3288                                         struct sep_dcblock **dcb_region,
3289                                         void **dmatables_region,
3290                                         struct sep_dma_context *dma_ctx)
3291 {
3292         void *dmaregion_free_start = NULL;
3293         void *dmaregion_free_end = NULL;
3294         void *dcbregion_free_start = NULL;
3295         void *dcbregion_free_end = NULL;
3296         ssize_t error = 0;
3297
3298         dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3299                 current->pid);
3300
3301         if (1 > dma_ctx->nr_dcb_creat) {
3302                 dev_warn(&sep->pdev->dev,
3303                          "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3304                          current->pid, dma_ctx->nr_dcb_creat);
3305                 error = -EINVAL;
3306                 goto end_function;
3307         }
3308
3309         dmaregion_free_start = sep->shared_addr
3310                                 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3311         dmaregion_free_end = dmaregion_free_start
3312                                 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3313
3314         if (dmaregion_free_start
3315              + dma_ctx->dmatables_len > dmaregion_free_end) {
3316                 error = -ENOMEM;
3317                 goto end_function;
3318         }
3319         memcpy(dmaregion_free_start,
3320                *dmatables_region,
3321                dma_ctx->dmatables_len);
3322         /* Free MLLI table copy */
3323         kfree(*dmatables_region);
3324         *dmatables_region = NULL;
3325
3326         /* Copy thread's DCB  table copy to DCB table region */
3327         dcbregion_free_start = sep->shared_addr +
3328                                 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3329         dcbregion_free_end = dcbregion_free_start +
3330                                 (SEP_MAX_NUM_SYNC_DMA_OPS *
3331                                         sizeof(struct sep_dcblock)) - 1;
3332
3333         if (dcbregion_free_start
3334              + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3335              > dcbregion_free_end) {
3336                 error = -ENOMEM;
3337                 goto end_function;
3338         }
3339
3340         memcpy(dcbregion_free_start,
3341                *dcb_region,
3342                dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3343
3344         /* Print the tables */
3345         dev_dbg(&sep->pdev->dev, "activate: input table\n");
3346         sep_debug_print_lli_tables(sep,
3347                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3348                 (*dcb_region)->input_mlli_address),
3349                 (*dcb_region)->input_mlli_num_entries,
3350                 (*dcb_region)->input_mlli_data_size);
3351
3352         dev_dbg(&sep->pdev->dev, "activate: output table\n");
3353         sep_debug_print_lli_tables(sep,
3354                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3355                 (*dcb_region)->output_mlli_address),
3356                 (*dcb_region)->output_mlli_num_entries,
3357                 (*dcb_region)->output_mlli_data_size);
3358
3359         dev_dbg(&sep->pdev->dev,
3360                  "[PID%d] printing activated tables\n", current->pid);
3361
3362 end_function:
3363         kfree(*dmatables_region);
3364         *dmatables_region = NULL;
3365
3366         kfree(*dcb_region);
3367         *dcb_region = NULL;
3368
3369         return error;
3370 }
3371
3372 /**
3373  *      sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3374  *      @sep: SEP device
3375  *      @dcb_region: DCB region buf to create for current transaction
3376  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3377  *      @dma_ctx: DMA context buf to create for current transaction
3378  *      @user_dcb_args: User arguments for DCB/MLLI creation
3379  *      @num_dcbs: Number of DCBs to create
3380  *      @secure_dma: Indicate use of IMR restricted memory secure dma
3381  */
3382 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3383                         struct sep_dcblock **dcb_region,
3384                         void **dmatables_region,
3385                         struct sep_dma_context **dma_ctx,
3386                         const struct build_dcb_struct __user *user_dcb_args,
3387                         const u32 num_dcbs, bool secure_dma)
3388 {
3389         int error = 0;
3390         int i = 0;
3391         struct build_dcb_struct *dcb_args = NULL;
3392
3393         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3394                 current->pid);
3395
3396         if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3397                 error = -EINVAL;
3398                 goto end_function;
3399         }
3400
3401         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3402                 dev_warn(&sep->pdev->dev,
3403                          "[PID%d] invalid number of dcbs 0x%08X\n",
3404                          current->pid, num_dcbs);
3405                 error = -EINVAL;
3406                 goto end_function;
3407         }
3408
3409         dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3410                            GFP_KERNEL);
3411         if (!dcb_args) {
3412                 error = -ENOMEM;
3413                 goto end_function;
3414         }
3415
3416         if (copy_from_user(dcb_args,
3417                         user_dcb_args,
3418                         num_dcbs * sizeof(struct build_dcb_struct))) {
3419                 error = -EFAULT;
3420                 goto end_function;
3421         }
3422
3423         /* Allocate thread-specific memory for DCB */
3424         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3425                               GFP_KERNEL);
3426         if (!(*dcb_region)) {
3427                 error = -ENOMEM;
3428                 goto end_function;
3429         }
3430
3431         /* Prepare DCB and MLLI table into the allocated regions */
3432         for (i = 0; i < num_dcbs; i++) {
3433                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3434                                 (unsigned long)dcb_args[i].app_in_address,
3435                                 (unsigned long)dcb_args[i].app_out_address,
3436                                 dcb_args[i].data_in_size,
3437                                 dcb_args[i].block_size,
3438                                 dcb_args[i].tail_block_size,
3439                                 dcb_args[i].is_applet,
3440                                 false, secure_dma,
3441                                 *dcb_region, dmatables_region,
3442                                 dma_ctx,
3443                                 NULL,
3444                                 NULL);
3445                 if (error) {
3446                         dev_warn(&sep->pdev->dev,
3447                                  "[PID%d] dma table creation failed\n",
3448                                  current->pid);
3449                         goto end_function;
3450                 }
3451
3452                 if (dcb_args[i].app_in_address != 0)
3453                         (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3454         }
3455
3456 end_function:
3457         kfree(dcb_args);
3458         return error;
3459
3460 }
3461
3462 /**
3463  *      sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3464  *      for kernel crypto
3465  *      @sep: SEP device
3466  *      @dcb_region: DCB region buf to create for current transaction
3467  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3468  *      @dma_ctx: DMA context buf to create for current transaction
3469  *      @user_dcb_args: User arguments for DCB/MLLI creation
3470  *      @num_dcbs: Number of DCBs to create
3471  *      This does that same thing as sep_create_dcb_dmatables_context
3472  *      except that it is used only for the kernel crypto operation. It is
3473  *      separate because there is no user data involved; the dcb data structure
3474  *      is specific for kernel crypto (build_dcb_struct_kernel)
3475  */
3476 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3477                         struct sep_dcblock **dcb_region,
3478                         void **dmatables_region,
3479                         struct sep_dma_context **dma_ctx,
3480                         const struct build_dcb_struct_kernel *dcb_data,
3481                         const u32 num_dcbs)
3482 {
3483         int error = 0;
3484         int i = 0;
3485
3486         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3487                 current->pid);
3488
3489         if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3490                 error = -EINVAL;
3491                 goto end_function;
3492         }
3493
3494         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3495                 dev_warn(&sep->pdev->dev,
3496                          "[PID%d] invalid number of dcbs 0x%08X\n",
3497                          current->pid, num_dcbs);
3498                 error = -EINVAL;
3499                 goto end_function;
3500         }
3501
3502         dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3503                 current->pid, num_dcbs);
3504
3505         /* Allocate thread-specific memory for DCB */
3506         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3507                               GFP_KERNEL);
3508         if (!(*dcb_region)) {
3509                 error = -ENOMEM;
3510                 goto end_function;
3511         }
3512
3513         /* Prepare DCB and MLLI table into the allocated regions */
3514         for (i = 0; i < num_dcbs; i++) {
3515                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3516                                 (unsigned long)dcb_data->app_in_address,
3517                                 (unsigned long)dcb_data->app_out_address,
3518                                 dcb_data->data_in_size,
3519                                 dcb_data->block_size,
3520                                 dcb_data->tail_block_size,
3521                                 dcb_data->is_applet,
3522                                 true,
3523                                 false,
3524                                 *dcb_region, dmatables_region,
3525                                 dma_ctx,
3526                                 dcb_data->src_sg,
3527                                 dcb_data->dst_sg);
3528                 if (error) {
3529                         dev_warn(&sep->pdev->dev,
3530                                  "[PID%d] dma table creation failed\n",
3531                                  current->pid);
3532                         goto end_function;
3533                 }
3534         }
3535
3536 end_function:
3537         return error;
3538
3539 }
3540
3541 /**
3542  *      sep_activate_msgarea_context - Takes the message area context into use
3543  *      @sep: SEP device
3544  *      @msg_region: Message area context buf
3545  *      @msg_len: Message area context buffer size
3546  */
3547 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3548                                             void **msg_region,
3549                                             const size_t msg_len)
3550 {
3551         dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3552                 current->pid);
3553
3554         if (!msg_region || !(*msg_region) ||
3555             SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3556                 dev_warn(&sep->pdev->dev,
3557                          "[PID%d] invalid act msgarea len 0x%08zX\n",
3558                          current->pid, msg_len);
3559                 return -EINVAL;
3560         }
3561
3562         memcpy(sep->shared_addr, *msg_region, msg_len);
3563
3564         return 0;
3565 }
3566
3567 /**
3568  *      sep_create_msgarea_context - Creates message area context
3569  *      @sep: SEP device
3570  *      @msg_region: Msg area region buf to create for current transaction
3571  *      @msg_user: Content for msg area region from user
3572  *      @msg_len: Message area size
3573  */
3574 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3575                                           void **msg_region,
3576                                           const void __user *msg_user,
3577                                           const size_t msg_len)
3578 {
3579         int error = 0;
3580
3581         dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3582                 current->pid);
3583
3584         if (!msg_region ||
3585             !msg_user ||
3586             SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3587             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3588                 dev_warn(&sep->pdev->dev,
3589                          "[PID%d] invalid creat msgarea len 0x%08zX\n",
3590                          current->pid, msg_len);
3591                 error = -EINVAL;
3592                 goto end_function;
3593         }
3594
3595         /* Allocate thread-specific memory for message buffer */
3596         *msg_region = kzalloc(msg_len, GFP_KERNEL);
3597         if (!(*msg_region)) {
3598                 error = -ENOMEM;
3599                 goto end_function;
3600         }
3601
3602         /* Copy input data to write() to allocated message buffer */
3603         if (copy_from_user(*msg_region, msg_user, msg_len)) {
3604                 error = -EFAULT;
3605                 goto end_function;
3606         }
3607
3608 end_function:
3609         if (error && msg_region) {
3610                 kfree(*msg_region);
3611                 *msg_region = NULL;
3612         }
3613
3614         return error;
3615 }
3616
3617
3618 /**
3619  *      sep_read - Returns results of an operation for fastcall interface
3620  *      @filp: File pointer
3621  *      @buf_user: User buffer for storing results
3622  *      @count_user: User buffer size
3623  *      @offset: File offset, not supported
3624  *
3625  *      The implementation does not support reading in chunks, all data must be
3626  *      consumed during a single read system call.
3627  */
3628 static ssize_t sep_read(struct file *filp,
3629                         char __user *buf_user, size_t count_user,
3630                         loff_t *offset)
3631 {
3632         struct sep_private_data * const private_data = filp->private_data;
3633         struct sep_call_status *call_status = &private_data->call_status;
3634         struct sep_device *sep = private_data->device;
3635         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3636         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3637         ssize_t error = 0, error_tmp = 0;
3638
3639         /* Am I the process that owns the transaction? */
3640         error = sep_check_transaction_owner(sep);
3641         if (error) {
3642                 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3643                         current->pid);
3644                 goto end_function;
3645         }
3646
3647         /* Checks that user has called necessary apis */
3648         if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3649                         &call_status->status)) {
3650                 dev_warn(&sep->pdev->dev,
3651                          "[PID%d] fastcall write not called\n",
3652                          current->pid);
3653                 error = -EPROTO;
3654                 goto end_function_error;
3655         }
3656
3657         if (!buf_user) {
3658                 dev_warn(&sep->pdev->dev,
3659                          "[PID%d] null user buffer\n",
3660                          current->pid);
3661                 error = -EINVAL;
3662                 goto end_function_error;
3663         }
3664
3665
3666         /* Wait for SEP to finish */
3667         wait_event(sep->event_interrupt,
3668                    test_bit(SEP_WORKING_LOCK_BIT,
3669                             &sep->in_use_flags) == 0);
3670
3671         sep_dump_message(sep);
3672
3673         dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3674                 current->pid, count_user);
3675
3676         /* In case user has allocated bigger buffer */
3677         if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3678                 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3679
3680         if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3681                 error = -EFAULT;
3682                 goto end_function_error;
3683         }
3684
3685         dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3686         error = count_user;
3687
3688 end_function_error:
3689         /* Copy possible tail data to user and free DCB and MLLIs */
3690         error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3691         if (error_tmp)
3692                 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3693                         current->pid);
3694
3695         /* End the transaction, wakeup pending ones */
3696         error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3697                 my_queue_elem);
3698         if (error_tmp)
3699                 dev_warn(&sep->pdev->dev,
3700                          "[PID%d] ending transaction failed\n",
3701                          current->pid);
3702
3703 end_function:
3704         return error;
3705 }
3706
3707 /**
3708  *      sep_fastcall_args_get - Gets fastcall params from user
3709  *      sep: SEP device
3710  *      @args: Parameters buffer
3711  *      @buf_user: User buffer for operation parameters
3712  *      @count_user: User buffer size
3713  */
3714 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3715                                             struct sep_fastcall_hdr *args,
3716                                             const char __user *buf_user,
3717                                             const size_t count_user)
3718 {
3719         ssize_t error = 0;
3720         size_t actual_count = 0;
3721
3722         if (!buf_user) {
3723                 dev_warn(&sep->pdev->dev,
3724                          "[PID%d] null user buffer\n",
3725                          current->pid);
3726                 error = -EINVAL;
3727                 goto end_function;
3728         }
3729
3730         if (count_user < sizeof(struct sep_fastcall_hdr)) {
3731                 dev_warn(&sep->pdev->dev,
3732                          "[PID%d] too small message size 0x%08zX\n",
3733                          current->pid, count_user);
3734                 error = -EINVAL;
3735                 goto end_function;
3736         }
3737
3738
3739         if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3740                 error = -EFAULT;
3741                 goto end_function;
3742         }
3743
3744         if (SEP_FC_MAGIC != args->magic) {
3745                 dev_warn(&sep->pdev->dev,
3746                          "[PID%d] invalid fastcall magic 0x%08X\n",
3747                          current->pid, args->magic);
3748                 error = -EINVAL;
3749                 goto end_function;
3750         }
3751
3752         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3753                 current->pid, args->num_dcbs);
3754         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3755                 current->pid, args->msg_len);
3756
3757         if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3758             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3759                 dev_warn(&sep->pdev->dev,
3760                          "[PID%d] invalid message length\n",
3761                          current->pid);
3762                 error = -EINVAL;
3763                 goto end_function;
3764         }
3765
3766         actual_count = sizeof(struct sep_fastcall_hdr)
3767                         + args->msg_len
3768                         + (args->num_dcbs * sizeof(struct build_dcb_struct));
3769
3770         if (actual_count != count_user) {
3771                 dev_warn(&sep->pdev->dev,
3772                          "[PID%d] inconsistent message "
3773                          "sizes 0x%08zX vs 0x%08zX\n",
3774                          current->pid, actual_count, count_user);
3775                 error = -EMSGSIZE;
3776                 goto end_function;
3777         }
3778
3779 end_function:
3780         return error;
3781 }
3782
3783 /**
3784  *      sep_write - Starts an operation for fastcall interface
3785  *      @filp: File pointer
3786  *      @buf_user: User buffer for operation parameters
3787  *      @count_user: User buffer size
3788  *      @offset: File offset, not supported
3789  *
3790  *      The implementation does not support writing in chunks,
3791  *      all data must be given during a single write system call.
3792  */
3793 static ssize_t sep_write(struct file *filp,
3794                          const char __user *buf_user, size_t count_user,
3795                          loff_t *offset)
3796 {
3797         struct sep_private_data * const private_data = filp->private_data;
3798         struct sep_call_status *call_status = &private_data->call_status;
3799         struct sep_device *sep = private_data->device;
3800         struct sep_dma_context *dma_ctx = NULL;
3801         struct sep_fastcall_hdr call_hdr = {0};
3802         void *msg_region = NULL;
3803         void *dmatables_region = NULL;
3804         struct sep_dcblock *dcb_region = NULL;
3805         ssize_t error = 0;
3806         struct sep_queue_info *my_queue_elem = NULL;
3807         bool my_secure_dma; /* are we using secure_dma (IMR)? */
3808
3809         dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3810                 current->pid, sep);
3811         dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3812                 current->pid, private_data);
3813
3814         error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3815         if (error)
3816                 goto end_function;
3817
3818         buf_user += sizeof(struct sep_fastcall_hdr);
3819
3820         if (call_hdr.secure_dma == 0)
3821                 my_secure_dma = false;
3822         else
3823                 my_secure_dma = true;
3824
3825         /*
3826          * Controlling driver memory usage by limiting amount of
3827          * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3828          * of threads can progress further at a time
3829          */
3830         dev_dbg(&sep->pdev->dev,
3831                 "[PID%d] waiting for double buffering region access\n",
3832                 current->pid);
3833         error = down_interruptible(&sep->sep_doublebuf);
3834         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3835                                         current->pid);
3836         if (error) {
3837                 /* Signal received */
3838                 goto end_function_error;
3839         }
3840
3841
3842         /*
3843          * Prepare contents of the shared area regions for
3844          * the operation into temporary buffers
3845          */
3846         if (0 < call_hdr.num_dcbs) {
3847                 error = sep_create_dcb_dmatables_context(sep,
3848                                 &dcb_region,
3849                                 &dmatables_region,
3850                                 &dma_ctx,
3851                                 (const struct build_dcb_struct __user *)
3852                                         buf_user,
3853                                 call_hdr.num_dcbs, my_secure_dma);
3854                 if (error)
3855                         goto end_function_error_doublebuf;
3856
3857                 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3858         }
3859
3860         error = sep_create_msgarea_context(sep,
3861                                            &msg_region,
3862                                            buf_user,
3863                                            call_hdr.msg_len);
3864         if (error)
3865                 goto end_function_error_doublebuf;
3866
3867         dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3868                                                         current->pid);
3869         my_queue_elem = sep_queue_status_add(sep,
3870                                 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3871                                 (dma_ctx) ? dma_ctx->input_data_len : 0,
3872                                      current->pid,
3873                                      current->comm, sizeof(current->comm));
3874
3875         if (!my_queue_elem) {
3876                 dev_dbg(&sep->pdev->dev,
3877                         "[PID%d] updating queue status error\n", current->pid);
3878                 error = -ENOMEM;
3879                 goto end_function_error_doublebuf;
3880         }
3881
3882         /* Wait until current process gets the transaction */
3883         error = sep_wait_transaction(sep);
3884
3885         if (error) {
3886                 /* Interrupted by signal, don't clear transaction */
3887                 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3888                         current->pid);
3889                 sep_queue_status_remove(sep, &my_queue_elem);
3890                 goto end_function_error_doublebuf;
3891         }
3892
3893         dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3894                 current->pid);
3895         private_data->my_queue_elem = my_queue_elem;
3896
3897         /* Activate shared area regions for the transaction */
3898         error = sep_activate_msgarea_context(sep, &msg_region,
3899                                              call_hdr.msg_len);
3900         if (error)
3901                 goto end_function_error_clear_transact;
3902
3903         sep_dump_message(sep);
3904
3905         if (0 < call_hdr.num_dcbs) {
3906                 error = sep_activate_dcb_dmatables_context(sep,
3907                                 &dcb_region,
3908                                 &dmatables_region,
3909                                 dma_ctx);
3910                 if (error)
3911                         goto end_function_error_clear_transact;
3912         }
3913
3914         /* Send command to SEP */
3915         error = sep_send_command_handler(sep);
3916         if (error)
3917                 goto end_function_error_clear_transact;
3918
3919         /* Store DMA context for the transaction */
3920         private_data->dma_ctx = dma_ctx;
3921         /* Update call status */
3922         set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3923         error = count_user;
3924
3925         up(&sep->sep_doublebuf);
3926         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3927                 current->pid);
3928
3929         goto end_function;
3930
3931 end_function_error_clear_transact:
3932         sep_end_transaction_handler(sep, &dma_ctx, call_status,
3933                                                 &private_data->my_queue_elem);
3934
3935 end_function_error_doublebuf:
3936         up(&sep->sep_doublebuf);
3937         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3938                 current->pid);
3939
3940 end_function_error:
3941         if (dma_ctx)
3942                 sep_free_dma_table_data_handler(sep, &dma_ctx);
3943
3944 end_function:
3945         kfree(dcb_region);
3946         kfree(dmatables_region);
3947         kfree(msg_region);
3948
3949         return error;
3950 }
3951 /**
3952  *      sep_seek - Handler for seek system call
3953  *      @filp: File pointer
3954  *      @offset: File offset
3955  *      @origin: Options for offset
3956  *
3957  *      Fastcall interface does not support seeking, all reads
3958  *      and writes are from/to offset zero
3959  */
3960 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3961 {
3962         return -ENOSYS;
3963 }
3964
3965
3966
3967 /**
3968  * sep_file_operations - file operation on sep device
3969  * @sep_ioctl:  ioctl handler from user space call
3970  * @sep_poll:   poll handler
3971  * @sep_open:   handles sep device open request
3972  * @sep_release:handles sep device release request
3973  * @sep_mmap:   handles memory mapping requests
3974  * @sep_read:   handles read request on sep device
3975  * @sep_write:  handles write request on sep device
3976  * @sep_seek:   handles seek request on sep device
3977  */
3978 static const struct file_operations sep_file_operations = {
3979         .owner = THIS_MODULE,
3980         .unlocked_ioctl = sep_ioctl,
3981         .poll = sep_poll,
3982         .open = sep_open,
3983         .release = sep_release,
3984         .mmap = sep_mmap,
3985         .read = sep_read,
3986         .write = sep_write,
3987         .llseek = sep_seek,
3988 };
3989
3990 /**
3991  * sep_sysfs_read - read sysfs entry per gives arguments
3992  * @filp: file pointer
3993  * @kobj: kobject pointer
3994  * @attr: binary file attributes
3995  * @buf: read to this buffer
3996  * @pos: offset to read
3997  * @count: amount of data to read
3998  *
3999  * This function is to read sysfs entries for sep driver per given arguments.
4000  */
4001 static ssize_t
4002 sep_sysfs_read(struct file *filp, struct kobject *kobj,
4003                 struct bin_attribute *attr,
4004                 char *buf, loff_t pos, size_t count)
4005 {
4006         unsigned long lck_flags;
4007         size_t nleft = count;
4008         struct sep_device *sep = sep_dev;
4009         struct sep_queue_info *queue_elem = NULL;
4010         u32 queue_num = 0;
4011         u32 i = 1;
4012
4013         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4014
4015         queue_num = sep->sep_queue_num;
4016         if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4017                 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4018
4019
4020         if (count < sizeof(queue_num)
4021                         + (queue_num * sizeof(struct sep_queue_data))) {
4022                 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4023                 return -EINVAL;
4024         }
4025
4026         memcpy(buf, &queue_num, sizeof(queue_num));
4027         buf += sizeof(queue_num);
4028         nleft -= sizeof(queue_num);
4029
4030         list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4031                 if (i++ > queue_num)
4032                         break;
4033
4034                 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4035                 nleft -= sizeof(queue_elem->data);
4036                 buf += sizeof(queue_elem->data);
4037         }
4038         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4039
4040         return count - nleft;
4041 }
4042
4043 /**
4044  * bin_attributes - defines attributes for queue_status
4045  * @attr: attributes (name & permissions)
4046  * @read: function pointer to read this file
4047  * @size: maxinum size of binary attribute
4048  */
4049 static const struct bin_attribute queue_status = {
4050         .attr = {.name = "queue_status", .mode = 0444},
4051         .read = sep_sysfs_read,
4052         .size = sizeof(u32)
4053                 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4054 };
4055
4056 /**
4057  * sep_register_driver_with_fs - register misc devices
4058  * @sep: pointer to struct sep_device
4059  *
4060  * This function registers the driver with the file system
4061  */
4062 static int sep_register_driver_with_fs(struct sep_device *sep)
4063 {
4064         int ret_val;
4065
4066         sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4067         sep->miscdev_sep.name = SEP_DEV_NAME;
4068         sep->miscdev_sep.fops = &sep_file_operations;
4069
4070         ret_val = misc_register(&sep->miscdev_sep);
4071         if (ret_val) {
4072                 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4073                         ret_val);
4074                 return ret_val;
4075         }
4076
4077         ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4078                                                                 &queue_status);
4079         if (ret_val) {
4080                 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4081                         ret_val);
4082                 return ret_val;
4083         }
4084
4085         return ret_val;
4086 }
4087
4088
4089 /**
4090  *sep_probe - probe a matching PCI device
4091  *@pdev:        pci_device
4092  *@ent: pci_device_id
4093  *
4094  *Attempt to set up and configure a SEP device that has been
4095  *discovered by the PCI layer. Allocates all required resources.
4096  */
4097 static int sep_probe(struct pci_dev *pdev,
4098         const struct pci_device_id *ent)
4099 {
4100         int error = 0;
4101         struct sep_device *sep = NULL;
4102
4103         if (sep_dev != NULL) {
4104                 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4105                 return -EBUSY;
4106         }
4107
4108         /* Enable the device */
4109         error = pci_enable_device(pdev);
4110         if (error) {
4111                 dev_warn(&pdev->dev, "error enabling pci device\n");
4112                 goto end_function;
4113         }
4114
4115         /* Allocate the sep_device structure for this device */
4116         sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4117         if (sep_dev == NULL) {
4118                 error = -ENOMEM;
4119                 goto end_function_disable_device;
4120         }
4121
4122         /*
4123          * We're going to use another variable for actually
4124          * working with the device; this way, if we have
4125          * multiple devices in the future, it would be easier
4126          * to make appropriate changes
4127          */
4128         sep = sep_dev;
4129
4130         sep->pdev = pci_dev_get(pdev);
4131
4132         init_waitqueue_head(&sep->event_transactions);
4133         init_waitqueue_head(&sep->event_interrupt);
4134         spin_lock_init(&sep->snd_rply_lck);
4135         spin_lock_init(&sep->sep_queue_lock);
4136         sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4137
4138         INIT_LIST_HEAD(&sep->sep_queue_status);
4139
4140         dev_dbg(&sep->pdev->dev,
4141                 "sep probe: PCI obtained, device being prepared\n");
4142
4143         /* Set up our register area */
4144         sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4145         if (!sep->reg_physical_addr) {
4146                 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4147                 error = -ENODEV;
4148                 goto end_function_free_sep_dev;
4149         }
4150
4151         sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4152         if (!sep->reg_physical_end) {
4153                 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4154                 error = -ENODEV;
4155                 goto end_function_free_sep_dev;
4156         }
4157
4158         sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4159                 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4160         if (!sep->reg_addr) {
4161                 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4162                 error = -ENODEV;
4163                 goto end_function_free_sep_dev;
4164         }
4165
4166         dev_dbg(&sep->pdev->dev,
4167                 "Register area start %llx end %llx virtual %p\n",
4168                 (unsigned long long)sep->reg_physical_addr,
4169                 (unsigned long long)sep->reg_physical_end,
4170                 sep->reg_addr);
4171
4172         /* Allocate the shared area */
4173         sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4174                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4175                 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4176                 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4177                 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4178
4179         if (sep_map_and_alloc_shared_area(sep)) {
4180                 error = -ENOMEM;
4181                 /* Allocation failed */
4182                 goto end_function_error;
4183         }
4184
4185         /* Clear ICR register */
4186         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4187
4188         /* Set the IMR register - open only GPR 2 */
4189         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4190
4191         /* Read send/receive counters from SEP */
4192         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4193         sep->reply_ct &= 0x3FFFFFFF;
4194         sep->send_ct = sep->reply_ct;
4195
4196         /* Get the interrupt line */
4197         error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4198                 "sep_driver", sep);
4199
4200         if (error)
4201                 goto end_function_deallocate_sep_shared_area;
4202
4203         /* The new chip requires a shared area reconfigure */
4204         error = sep_reconfig_shared_area(sep);
4205         if (error)
4206                 goto end_function_free_irq;
4207
4208         sep->in_use = 1;
4209
4210         /* Finally magic up the device nodes */
4211         /* Register driver with the fs */
4212         error = sep_register_driver_with_fs(sep);
4213
4214         if (error) {
4215                 dev_err(&sep->pdev->dev, "error registering dev file\n");
4216                 goto end_function_free_irq;
4217         }
4218
4219         sep->in_use = 0; /* through touching the device */
4220 #ifdef SEP_ENABLE_RUNTIME_PM
4221         pm_runtime_put_noidle(&sep->pdev->dev);
4222         pm_runtime_allow(&sep->pdev->dev);
4223         pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4224                 SUSPEND_DELAY);
4225         pm_runtime_use_autosuspend(&sep->pdev->dev);
4226         pm_runtime_mark_last_busy(&sep->pdev->dev);
4227         sep->power_save_setup = 1;
4228 #endif
4229         /* register kernel crypto driver */
4230 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4231         error = sep_crypto_setup();
4232         if (error) {
4233                 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4234                 goto end_function_free_irq;
4235         }
4236 #endif
4237         goto end_function;
4238
4239 end_function_free_irq:
4240         free_irq(pdev->irq, sep);
4241
4242 end_function_deallocate_sep_shared_area:
4243         /* De-allocate shared area */
4244         sep_unmap_and_free_shared_area(sep);
4245
4246 end_function_error:
4247         iounmap(sep->reg_addr);
4248
4249 end_function_free_sep_dev:
4250         pci_dev_put(sep_dev->pdev);
4251         kfree(sep_dev);
4252         sep_dev = NULL;
4253
4254 end_function_disable_device:
4255         pci_disable_device(pdev);
4256
4257 end_function:
4258         return error;
4259 }
4260
4261 /**
4262  * sep_remove - handles removing device from pci subsystem
4263  * @pdev:       pointer to pci device
4264  *
4265  * This function will handle removing our sep device from pci subsystem on exit
4266  * or unloading this module. It should free up all used resources, and unmap if
4267  * any memory regions mapped.
4268  */
4269 static void sep_remove(struct pci_dev *pdev)
4270 {
4271         struct sep_device *sep = sep_dev;
4272
4273         /* Unregister from fs */
4274         misc_deregister(&sep->miscdev_sep);
4275
4276         /* Unregister from kernel crypto */
4277 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4278         sep_crypto_takedown();
4279 #endif
4280         /* Free the irq */
4281         free_irq(sep->pdev->irq, sep);
4282
4283         /* Free the shared area  */
4284         sep_unmap_and_free_shared_area(sep_dev);
4285         iounmap(sep_dev->reg_addr);
4286
4287 #ifdef SEP_ENABLE_RUNTIME_PM
4288         if (sep->in_use) {
4289                 sep->in_use = 0;
4290                 pm_runtime_forbid(&sep->pdev->dev);
4291                 pm_runtime_get_noresume(&sep->pdev->dev);
4292         }
4293 #endif
4294         pci_dev_put(sep_dev->pdev);
4295         kfree(sep_dev);
4296         sep_dev = NULL;
4297 }
4298
4299 /* Initialize struct pci_device_id for our driver */
4300 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4301         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4302         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4303         {0}
4304 };
4305
4306 /* Export our pci_device_id structure to user space */
4307 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4308
4309 #ifdef SEP_ENABLE_RUNTIME_PM
4310
4311 /**
4312  * sep_pm_resume - rsume routine while waking up from S3 state
4313  * @dev:        pointer to sep device
4314  *
4315  * This function is to be used to wake up sep driver while system awakes from S3
4316  * state i.e. suspend to ram. The RAM in intact.
4317  * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4318  */
4319 static int sep_pci_resume(struct device *dev)
4320 {
4321         struct sep_device *sep = sep_dev;
4322
4323         dev_dbg(&sep->pdev->dev, "pci resume called\n");
4324
4325         if (sep->power_state == SEP_DRIVER_POWERON)
4326                 return 0;
4327
4328         /* Clear ICR register */
4329         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4330
4331         /* Set the IMR register - open only GPR 2 */
4332         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4333
4334         /* Read send/receive counters from SEP */
4335         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4336         sep->reply_ct &= 0x3FFFFFFF;
4337         sep->send_ct = sep->reply_ct;
4338
4339         sep->power_state = SEP_DRIVER_POWERON;
4340
4341         return 0;
4342 }
4343
4344 /**
4345  * sep_pm_suspend - suspend routine while going to S3 state
4346  * @dev:        pointer to sep device
4347  *
4348  * This function is to be used to suspend sep driver while system goes to S3
4349  * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4350  * Notes - revisit with more understanding of pm, ICR/IMR
4351  */
4352 static int sep_pci_suspend(struct device *dev)
4353 {
4354         struct sep_device *sep = sep_dev;
4355
4356         dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4357         if (sep->in_use == 1)
4358                 return -EAGAIN;
4359
4360         sep->power_state = SEP_DRIVER_POWEROFF;
4361
4362         /* Clear ICR register */
4363         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4364
4365         /* Set the IMR to block all */
4366         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4367
4368         return 0;
4369 }
4370
4371 /**
4372  * sep_pm_runtime_resume - runtime resume routine
4373  * @dev:        pointer to sep device
4374  *
4375  * Notes - revisit with more understanding of pm, ICR/IMR & counters
4376  */
4377 static int sep_pm_runtime_resume(struct device *dev)
4378 {
4379
4380         u32 retval2;
4381         u32 delay_count;
4382         struct sep_device *sep = sep_dev;
4383
4384         dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4385
4386         /**
4387          * Wait until the SCU boot is ready
4388          * This is done by iterating SCU_DELAY_ITERATION (10
4389          * microseconds each) up to SCU_DELAY_MAX (50) times.
4390          * This bit can be set in a random time that is less
4391          * than 500 microseconds after each power resume
4392          */
4393         retval2 = 0;
4394         delay_count = 0;
4395         while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4396                 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4397                 retval2 &= 0x00000008;
4398                 if (!retval2) {
4399                         udelay(SCU_DELAY_ITERATION);
4400                         delay_count += 1;
4401                 }
4402         }
4403
4404         if (!retval2) {
4405                 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4406                 return -EINVAL;
4407         }
4408
4409         /* Clear ICR register */
4410         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4411
4412         /* Set the IMR register - open only GPR 2 */
4413         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4414
4415         /* Read send/receive counters from SEP */
4416         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4417         sep->reply_ct &= 0x3FFFFFFF;
4418         sep->send_ct = sep->reply_ct;
4419
4420         return 0;
4421 }
4422
4423 /**
4424  * sep_pm_runtime_suspend - runtime suspend routine
4425  * @dev:        pointer to sep device
4426  *
4427  * Notes - revisit with more understanding of pm
4428  */
4429 static int sep_pm_runtime_suspend(struct device *dev)
4430 {
4431         struct sep_device *sep = sep_dev;
4432
4433         dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4434
4435         /* Clear ICR register */
4436         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4437         return 0;
4438 }
4439
4440 /**
4441  * sep_pm - power management for sep driver
4442  * @sep_pm_runtime_resume:      resume- no communication with cpu & main memory
4443  * @sep_pm_runtime_suspend:     suspend- no communication with cpu & main memory
4444  * @sep_pci_suspend:            suspend - main memory is still ON
4445  * @sep_pci_resume:             resume - main memory is still ON
4446  */
4447 static const struct dev_pm_ops sep_pm = {
4448         .runtime_resume = sep_pm_runtime_resume,
4449         .runtime_suspend = sep_pm_runtime_suspend,
4450         .resume = sep_pci_resume,
4451         .suspend = sep_pci_suspend,
4452 };
4453 #endif /* SEP_ENABLE_RUNTIME_PM */
4454
4455 /**
4456  * sep_pci_driver - registers this device with pci subsystem
4457  * @name:       name identifier for this driver
4458  * @sep_pci_id_tbl:     pointer to struct pci_device_id table
4459  * @sep_probe:  pointer to probe function in PCI driver
4460  * @sep_remove: pointer to remove function in PCI driver
4461  */
4462 static struct pci_driver sep_pci_driver = {
4463 #ifdef SEP_ENABLE_RUNTIME_PM
4464         .driver = {
4465                 .pm = &sep_pm,
4466         },
4467 #endif
4468         .name = "sep_sec_driver",
4469         .id_table = sep_pci_id_tbl,
4470         .probe = sep_probe,
4471         .remove = sep_remove
4472 };
4473
4474 module_pci_driver(sep_pci_driver);
4475 MODULE_LICENSE("GPL");