3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
71 static void sep_load_rom_code(struct sep_device *sep)
74 unsigned long i, k, j;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
98 j = CRYS_SEP_ROM_length;
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 edbg("SEP Driver: ROM polling ended\n");
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
121 /* Cold boot ended successfully */
123 /* Warmboot ended successfully */
125 /* ColdWarm boot ended successfully */
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
131 edbg("SEP Driver: ROM polling case %d\n", reg);
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
143 /*----------------------------------------
145 -----------------------------------------*/
147 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
148 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
150 /*--------------------------------------------
152 --------------------------------------------*/
154 /* debug messages level */
156 module_param(debug, int , 0);
157 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
159 /* Keep this a single static object for now to keep the conversion easy */
161 static struct sep_device sep_instance;
162 static struct sep_device *sep_dev = &sep_instance;
165 mutex for the access to the internals of the sep driver
167 static DEFINE_MUTEX(sep_mutex);
170 /* wait queue head (event) of the driver */
171 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174 * sep_load_firmware - copy firmware cache/resident
175 * @sep: device we are loading
177 * This functions copies the cache and resident from their source
178 * location into destination shared memory.
181 static int sep_load_firmware(struct sep_device *sep)
183 const struct firmware *fw;
184 char *cache_name = "cache.image.bin";
185 char *res_name = "resident.image.bin";
188 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
189 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
191 sep->rar_region_addr = sep->rar_addr;
192 sep->cache_bus = sep->rar_bus;
193 sep->cache_addr = sep->rar_addr;
196 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
198 edbg("SEP Driver:cant request cache fw\n");
201 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
203 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
204 sep->cache_size = fw->size;
205 release_firmware(fw);
207 sep->resident_bus = sep->cache_bus + sep->cache_size;
208 sep->resident_addr = sep->cache_addr + sep->cache_size;
211 error = request_firmware(&fw, res_name, &sep->pdev->dev);
213 edbg("SEP Driver:cant request res fw\n");
216 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
218 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
219 sep->resident_size = fw->size;
220 release_firmware(fw);
222 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
223 sep->resident_addr, (unsigned long long)sep->resident_bus,
224 sep->cache_addr, (unsigned long long)sep->cache_bus);
229 * sep_map_and_alloc_shared_area - allocate shared block
230 * @sep: security processor
231 * @size: size of shared area
233 * Allocate a shared buffer in host memory that can be used by both the
234 * kernel and also the hardware interface via DMA.
237 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
240 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
241 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
242 &sep->shared_bus, GFP_KERNEL);
244 if (!sep->shared_addr) {
245 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
248 sep->shared_area = sep->shared_addr;
249 /* set the bus address of the shared area */
250 sep->shared_area_bus = sep->shared_bus;
251 edbg("sep: shared_area %ld bytes @%p (bus %08llx)\n",
252 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
257 * sep_unmap_and_free_shared_area - free shared block
258 * @sep: security processor
260 * Free the shared area allocated to the security processor. The
261 * processor must have finished with this and any final posted
262 * writes cleared before we do so.
264 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
266 dma_free_coherent(&sep->pdev->dev, size,
267 sep->shared_area, sep->shared_area_bus);
271 * sep_shared_area_virt_to_bus - convert bus/virt addresses
273 * Returns the bus address inside the shared area according
274 * to the virtual address.
277 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
280 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
281 edbg("sep: virt to bus b %08llx v %p\n", pa, virt_address);
286 * sep_shared_area_bus_to_virt - convert bus/virt addresses
288 * Returns virtual address inside the shared area according
289 * to the bus address.
292 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
295 return sep->shared_addr + (bus_address - sep->shared_bus);
300 * sep_try_open - attempt to open a SEP device
301 * @sep: device to attempt to open
303 * Atomically attempt to get ownership of a SEP device.
304 * Returns 1 if the device was opened, 0 on failure.
307 static int sep_try_open(struct sep_device *sep)
309 if (!test_and_set_bit(0, &sep->in_use))
315 * sep_open - device open method
316 * @inode: inode of sep device
317 * @filp: file handle to sep device
319 * Open method for the SEP device. Called when userspace opens
320 * the SEP device node. Must also release the memory data pool
323 * Returns zero on success otherwise an error code.
326 static int sep_open(struct inode *inode, struct file *filp)
331 /* check the blocking mode */
332 if (filp->f_flags & O_NDELAY) {
333 if (sep_try_open(sep_dev) == 0)
336 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
339 /* Bind to the device, we only have one which makes it easy */
340 filp->private_data = sep_dev;
341 /* release data pool allocations */
342 sep_dev->data_pool_bytes_allocated = 0;
348 * sep_release - close a SEP device
349 * @inode: inode of SEP device
350 * @filp: file handle being closed
352 * Called on the final close of a SEP device. As the open protects against
353 * multiple simultaenous opens that means this method is called when the
354 * final reference to the open handle is dropped.
357 static int sep_release(struct inode *inode, struct file *filp)
359 struct sep_device *sep = filp->private_data;
360 #if 0 /*!SEP_DRIVER_POLLING_MODE */
362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
363 /* release IRQ line */
364 free_irq(SEP_DIRVER_IRQ_NUM, sep);
367 /* Ensure any blocked open progresses */
368 clear_bit(0, &sep->in_use);
373 /*---------------------------------------------------------------
374 map function - this functions maps the message shared area
375 -----------------------------------------------------------------*/
376 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
379 struct sep_device *sep = filp->private_data;
381 dbg("-------->SEP Driver: mmap start\n");
383 /* check that the size of the mapped range is as the size of the message
385 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
386 edbg("SEP Driver mmap requested size is more than allowed\n");
387 printk(KERN_WARNING "SEP Driver mmap requested size is more \
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
390 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
394 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
396 /* get bus address */
397 bus_addr = sep->shared_area_bus;
399 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
401 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
402 edbg("SEP Driver remap_page_range failed\n");
403 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
407 dbg("SEP Driver:<-------- mmap end\n");
413 /*-----------------------------------------------
415 *----------------------------------------------*/
416 static unsigned int sep_poll(struct file *filp, poll_table * wait)
419 unsigned int mask = 0;
420 unsigned long retval = 0; /* flow id */
421 struct sep_device *sep = filp->private_data;
423 dbg("---------->SEP Driver poll: start\n");
426 #if SEP_DRIVER_POLLING_MODE
428 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
429 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
431 for (count = 0; count < 10 * 4; count += 4)
432 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
437 /* add the event to the polling wait table */
438 poll_wait(filp, &sep_event, wait);
442 edbg("sep->send_ct is %lu\n", sep->send_ct);
443 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
445 /* check if the data is ready */
446 if (sep->send_ct == sep->reply_ct) {
447 for (count = 0; count < 12 * 4; count += 4)
448 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
450 for (count = 0; count < 10 * 4; count += 4)
451 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
453 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
454 edbg("retval is %lu\n", retval);
455 /* check if the this is sep reply or request */
457 edbg("SEP Driver: sep request in\n");
459 mask |= POLLOUT | POLLWRNORM;
461 edbg("SEP Driver: sep reply in\n");
462 mask |= POLLIN | POLLRDNORM;
465 dbg("SEP Driver:<-------- poll exit\n");
470 calculates time and sets it at the predefined address
472 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
475 /* address of time in the kernel */
479 dbg("SEP Driver:--------> sep_set_time start\n");
481 do_gettimeofday(&time);
483 /* set value in the SYSTEM MEMORY offset */
484 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
486 time_addr[0] = SEP_TIME_VAL_TOKEN;
487 time_addr[1] = time.tv_sec;
489 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
490 edbg("SEP Driver:time_addr is %p\n", time_addr);
491 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
493 /* set the output parameters if needed */
495 *address_ptr = sep_shared_area_virt_to_bus(sep, time_addr);
498 *time_in_sec_ptr = time.tv_sec;
500 dbg("SEP Driver:<-------- sep_set_time end\n");
506 This function raises interrupt to SEP that signals that is has a new
509 static void sep_send_command_handler(struct sep_device *sep)
513 dbg("SEP Driver:--------> sep_send_command_handler start\n");
514 sep_set_time(sep, 0, 0);
519 for (count = 0; count < 12 * 4; count += 4)
520 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
524 /* send interrupt to SEP */
525 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
526 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
531 This function raises interrupt to SEPm that signals that is has a
532 new command from HOST
534 static void sep_send_reply_command_handler(struct sep_device *sep)
538 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
542 for (count = 0; count < 12 * 4; count += 4)
543 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
546 /* send the interrupt to SEP */
547 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
548 /* update both counters */
551 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
555 This function handles the allocate data pool memory request
556 This function returns calculates the bus address of the
557 allocated memory, and the offset of this area from the mapped address.
558 Therefore, the FVOs in user space can calculate the exact virtual
559 address of this allocated memory
561 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
565 struct sep_driver_alloc_t command_args;
567 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
569 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
573 /* allocate memory */
574 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
579 /* set the virtual and bus address */
580 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
581 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
583 /* write the memory back to the user space */
584 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
588 /* set the allocation */
589 sep->data_pool_bytes_allocated += command_args.num_bytes;
592 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
597 This function handles write into allocated data pool command
599 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
604 unsigned long app_in_address;
605 unsigned long num_bytes;
606 void *data_pool_area_addr;
608 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
610 /* get the application address */
611 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
615 /* get the virtual kernel address address */
616 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
619 virt_address = (void *)va;
621 /* get the number of bytes */
622 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
626 /* calculate the start of the data pool */
627 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
630 /* check that the range of the virtual kernel address is correct */
631 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
635 /* copy the application data */
636 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
638 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
643 this function handles the read from data pool command
645 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
648 /* virtual address of dest application buffer */
649 unsigned long app_out_address;
650 /* virtual address of the data pool */
653 unsigned long num_bytes;
654 void *data_pool_area_addr;
656 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
658 /* get the application address */
659 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
663 /* get the virtual kernel address address */
664 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
667 virt_address = (void *)va;
669 /* get the number of bytes */
670 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
674 /* calculate the start of the data pool */
675 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
677 /* FIXME: These are incomplete all over the driver: what about + len
678 and when doing that also overflows */
679 /* check that the range of the virtual kernel address is correct */
680 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
685 /* copy the application data */
686 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
688 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
693 This function releases all the application virtual buffer physical pages,
694 that were previously locked
696 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
701 for (count = 0; count < num_pages; count++) {
702 /* the out array was written, therefore the data was changed */
703 if (!PageReserved(page_array_ptr[count]))
704 SetPageDirty(page_array_ptr[count]);
705 page_cache_release(page_array_ptr[count]);
708 /* free in pages - the data was only read, therefore no update was done
710 for (count = 0; count < num_pages; count++)
711 page_cache_release(page_array_ptr[count]);
716 kfree(page_array_ptr);
722 This function locks all the physical pages of the kernel virtual buffer
723 and construct a basic lli array, where each entry holds the physical
724 page address and the size that application data holds in this physical pages
726 static int sep_lock_kernel_pages(struct sep_device *sep,
727 unsigned long kernel_virt_addr,
728 unsigned long data_size,
729 unsigned long *num_pages_ptr,
730 struct sep_lli_entry_t **lli_array_ptr,
731 struct page ***page_array_ptr)
734 /* the the page of the end address of the user space buffer */
735 unsigned long end_page;
736 /* the page of the start address of the user space buffer */
737 unsigned long start_page;
738 /* the range in pages */
739 unsigned long num_pages;
740 struct sep_lli_entry_t *lli_array;
741 /* next kernel address to map */
742 unsigned long next_kernel_address;
745 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
747 /* set start and end pages and num pages */
748 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
749 start_page = kernel_virt_addr >> PAGE_SHIFT;
750 num_pages = end_page - start_page + 1;
752 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
753 edbg("SEP Driver: data_size is %lu\n", data_size);
754 edbg("SEP Driver: start_page is %lx\n", start_page);
755 edbg("SEP Driver: end_page is %lx\n", end_page);
756 edbg("SEP Driver: num_pages is %lu\n", num_pages);
758 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
760 edbg("SEP Driver: kmalloc for lli_array failed\n");
765 /* set the start address of the first page - app data may start not at
766 the beginning of the page */
767 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
769 /* check that not all the data is in the first page only */
770 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
771 lli_array[0].block_size = data_size;
773 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
776 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
778 /* advance the address to the start of the next page */
779 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
781 /* go from the second page to the prev before last */
782 for (count = 1; count < (num_pages - 1); count++) {
783 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
784 lli_array[count].block_size = PAGE_SIZE;
786 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
787 next_kernel_address += PAGE_SIZE;
790 /* if more then 1 pages locked - then update for the last page size needed */
792 /* update the address of the last page */
793 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
795 /* set the size of the last page */
796 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
798 if (lli_array[count].block_size == 0) {
799 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
800 dbg("data_size is %lu\n", data_size);
804 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
806 /* set output params */
807 *lli_array_ptr = lli_array;
808 *num_pages_ptr = num_pages;
811 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
816 This function locks all the physical pages of the application virtual buffer
817 and construct a basic lli array, where each entry holds the physical page
818 address and the size that application data holds in this physical pages
820 static int sep_lock_user_pages(struct sep_device *sep,
821 unsigned long app_virt_addr,
822 unsigned long data_size,
823 unsigned long *num_pages_ptr,
824 struct sep_lli_entry_t **lli_array_ptr,
825 struct page ***page_array_ptr)
828 /* the the page of the end address of the user space buffer */
829 unsigned long end_page;
830 /* the page of the start address of the user space buffer */
831 unsigned long start_page;
832 /* the range in pages */
833 unsigned long num_pages;
834 struct page **page_array;
835 struct sep_lli_entry_t *lli_array;
839 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
841 /* set start and end pages and num pages */
842 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
843 start_page = app_virt_addr >> PAGE_SHIFT;
844 num_pages = end_page - start_page + 1;
846 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
847 edbg("SEP Driver: data_size is %lu\n", data_size);
848 edbg("SEP Driver: start_page is %lu\n", start_page);
849 edbg("SEP Driver: end_page is %lu\n", end_page);
850 edbg("SEP Driver: num_pages is %lu\n", num_pages);
852 /* allocate array of pages structure pointers */
853 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
855 edbg("SEP Driver: kmalloc for page_array failed\n");
861 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
863 edbg("SEP Driver: kmalloc for lli_array failed\n");
866 goto end_function_with_error1;
869 /* convert the application virtual address into a set of physical */
870 down_read(¤t->mm->mmap_sem);
871 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
872 up_read(¤t->mm->mmap_sem);
874 /* check the number of pages locked - if not all then exit with error */
875 if (result != num_pages) {
876 dbg("SEP Driver: not all pages locked by get_user_pages\n");
879 goto end_function_with_error2;
882 /* flush the cache */
883 for (count = 0; count < num_pages; count++)
884 flush_dcache_page(page_array[count]);
886 /* set the start address of the first page - app data may start not at
887 the beginning of the page */
888 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
890 /* check that not all the data is in the first page only */
891 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
892 lli_array[0].block_size = data_size;
894 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
897 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
899 /* go from the second page to the prev before last */
900 for (count = 1; count < (num_pages - 1); count++) {
901 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
902 lli_array[count].block_size = PAGE_SIZE;
904 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
907 /* if more then 1 pages locked - then update for the last page size needed */
909 /* update the address of the last page */
910 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
912 /* set the size of the last page */
913 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
915 if (lli_array[count].block_size == 0) {
916 dbg("app_virt_addr is %08lx\n", app_virt_addr);
917 dbg("data_size is %lu\n", data_size);
920 edbg("lli_array[%lu].physical_address is %08lx, \
921 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
924 /* set output params */
925 *lli_array_ptr = lli_array;
926 *num_pages_ptr = num_pages;
927 *page_array_ptr = page_array;
930 end_function_with_error2:
931 /* release the cache */
932 for (count = 0; count < num_pages; count++)
933 page_cache_release(page_array[count]);
935 end_function_with_error1:
938 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
944 this function calculates the size of data that can be inserted into the lli
945 table from this array the condition is that either the table is full
946 (all etnries are entered), or there are no more entries in the lli array
948 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
950 unsigned long table_data_size = 0;
951 unsigned long counter;
953 /* calculate the data in the out lli table if till we fill the whole
954 table or till the data has ended */
955 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
956 table_data_size += lli_in_array_ptr[counter].block_size;
957 return table_data_size;
961 this functions builds ont lli table from the lli_array according to
962 the given size of data
964 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
966 unsigned long curr_table_data_size;
967 /* counter of lli array entry */
968 unsigned long array_counter;
970 dbg("SEP Driver:--------> sep_build_lli_table start\n");
972 /* init currrent table data size and lli array entry counter */
973 curr_table_data_size = 0;
975 *num_table_entries_ptr = 1;
977 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
979 /* fill the table till table size reaches the needed amount */
980 while (curr_table_data_size < table_data_size) {
981 /* update the number of entries in table */
982 (*num_table_entries_ptr)++;
984 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
985 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
986 curr_table_data_size += lli_table_ptr->block_size;
988 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
989 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
990 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
992 /* check for overflow of the table data */
993 if (curr_table_data_size > table_data_size) {
994 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
996 /* update the size of block in the table */
997 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
999 /* update the physical address in the lli array */
1000 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1002 /* update the block size left in the lli array */
1003 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1005 /* advance to the next entry in the lli_array */
1008 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1009 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1011 /* move to the next entry in table */
1015 /* set the info entry to default */
1016 lli_table_ptr->physical_address = 0xffffffff;
1017 lli_table_ptr->block_size = 0;
1019 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1020 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1021 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1023 /* set the output parameter */
1024 *num_processed_entries_ptr += array_counter;
1026 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1027 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1032 this function goes over the list of the print created tables and
1035 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1037 unsigned long table_count;
1038 unsigned long entries_count;
1040 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1043 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1044 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1045 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1047 /* print entries of the table (without info entry) */
1048 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1049 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1050 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1053 /* point to the info entry */
1056 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1057 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1060 table_data_size = lli_table_ptr->block_size & 0xffffff;
1061 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1062 lli_table_ptr = (struct sep_lli_entry_t *)
1063 (lli_table_ptr->physical_address);
1065 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1067 if ((unsigned long) lli_table_ptr != 0xffffffff)
1068 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1072 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1077 This function prepares only input DMA table for synhronic symmetric
1080 static int sep_prepare_input_dma_table(struct sep_device *sep,
1081 unsigned long app_virt_addr,
1082 unsigned long data_size,
1083 unsigned long block_size,
1084 unsigned long *lli_table_ptr,
1085 unsigned long *num_entries_ptr,
1086 unsigned long *table_data_size_ptr,
1087 bool isKernelVirtualAddress)
1089 /* pointer to the info entry of the table - the last entry */
1090 struct sep_lli_entry_t *info_entry_ptr;
1091 /* array of pointers ot page */
1092 struct sep_lli_entry_t *lli_array_ptr;
1093 /* points to the first entry to be processed in the lli_in_array */
1094 unsigned long current_entry;
1095 /* num entries in the virtual buffer */
1096 unsigned long sep_lli_entries;
1097 /* lli table pointer */
1098 struct sep_lli_entry_t *in_lli_table_ptr;
1099 /* the total data in one table */
1100 unsigned long table_data_size;
1101 /* number of entries in lli table */
1102 unsigned long num_entries_in_table;
1103 /* next table address */
1104 void *lli_table_alloc_addr;
1105 unsigned long result;
1107 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1109 edbg("SEP Driver:data_size is %lu\n", data_size);
1110 edbg("SEP Driver:block_size is %lu\n", block_size);
1112 /* initialize the pages pointers */
1113 sep->in_page_array = 0;
1114 sep->in_num_pages = 0;
1116 if (data_size == 0) {
1117 /* special case - created 2 entries table with zero data */
1118 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1119 /* FIXME: Should the entry below not be for _bus */
1120 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1121 in_lli_table_ptr->block_size = 0;
1124 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1125 in_lli_table_ptr->block_size = 0;
1127 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1128 *num_entries_ptr = 2;
1129 *table_data_size_ptr = 0;
1134 /* check if the pages are in Kernel Virtual Address layout */
1135 if (isKernelVirtualAddress == true)
1136 /* lock the pages of the kernel buffer and translate them to pages */
1137 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1139 /* lock the pages of the user buffer and translate them to pages */
1140 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1145 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1149 sep_lli_entries = sep->in_num_pages;
1151 /* initiate to point after the message area */
1152 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1154 /* loop till all the entries in in array are not processed */
1155 while (current_entry < sep_lli_entries) {
1156 /* set the new input and output tables */
1157 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1159 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1161 /* calculate the maximum size of data for input table */
1162 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1164 /* now calculate the table size so that it will be module block size */
1165 table_data_size = (table_data_size / block_size) * block_size;
1167 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1169 /* construct input lli table */
1170 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1172 if (info_entry_ptr == 0) {
1173 /* set the output parameters to physical addresses */
1174 *lli_table_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1175 *num_entries_ptr = num_entries_in_table;
1176 *table_data_size_ptr = table_data_size;
1178 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1180 /* update the info entry of the previous in table */
1181 info_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1182 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1185 /* save the pointer to the info entry of the current tables */
1186 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1189 /* print input tables */
1190 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1191 sep_shared_area_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1193 /* the array of the pages */
1194 kfree(lli_array_ptr);
1196 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1202 This function creates the input and output dma tables for
1203 symmetric operations (AES/DES) according to the block size from LLI arays
1205 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1206 struct sep_lli_entry_t *lli_in_array,
1207 unsigned long sep_in_lli_entries,
1208 struct sep_lli_entry_t *lli_out_array,
1209 unsigned long sep_out_lli_entries,
1210 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1212 /* points to the area where next lli table can be allocated: keep void *
1213 as there is pointer scaling to fix otherwise */
1214 void *lli_table_alloc_addr;
1215 /* input lli table */
1216 struct sep_lli_entry_t *in_lli_table_ptr;
1217 /* output lli table */
1218 struct sep_lli_entry_t *out_lli_table_ptr;
1219 /* pointer to the info entry of the table - the last entry */
1220 struct sep_lli_entry_t *info_in_entry_ptr;
1221 /* pointer to the info entry of the table - the last entry */
1222 struct sep_lli_entry_t *info_out_entry_ptr;
1223 /* points to the first entry to be processed in the lli_in_array */
1224 unsigned long current_in_entry;
1225 /* points to the first entry to be processed in the lli_out_array */
1226 unsigned long current_out_entry;
1227 /* max size of the input table */
1228 unsigned long in_table_data_size;
1229 /* max size of the output table */
1230 unsigned long out_table_data_size;
1231 /* flag te signifies if this is the first tables build from the arrays */
1232 unsigned long first_table_flag;
1233 /* the data size that should be in table */
1234 unsigned long table_data_size;
1235 /* number of etnries in the input table */
1236 unsigned long num_entries_in_table;
1237 /* number of etnries in the output table */
1238 unsigned long num_entries_out_table;
1240 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1242 /* initiate to pint after the message area */
1243 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1245 current_in_entry = 0;
1246 current_out_entry = 0;
1247 first_table_flag = 1;
1248 info_in_entry_ptr = 0;
1249 info_out_entry_ptr = 0;
1251 /* loop till all the entries in in array are not processed */
1252 while (current_in_entry < sep_in_lli_entries) {
1253 /* set the new input and output tables */
1254 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1256 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1258 /* set the first output tables */
1259 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1261 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1263 /* calculate the maximum size of data for input table */
1264 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1266 /* calculate the maximum size of data for output table */
1267 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1269 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1270 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1272 /* check where the data is smallest */
1273 table_data_size = in_table_data_size;
1274 if (table_data_size > out_table_data_size)
1275 table_data_size = out_table_data_size;
1277 /* now calculate the table size so that it will be module block size */
1278 table_data_size = (table_data_size / block_size) * block_size;
1280 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1282 /* construct input lli table */
1283 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1285 /* construct output lli table */
1286 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1288 /* if info entry is null - this is the first table built */
1289 if (info_in_entry_ptr == 0) {
1290 /* set the output parameters to physical addresses */
1291 *lli_table_in_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1292 *in_num_entries_ptr = num_entries_in_table;
1293 *lli_table_out_ptr = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1294 *out_num_entries_ptr = num_entries_out_table;
1295 *table_data_size_ptr = table_data_size;
1297 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1298 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1300 /* update the info entry of the previous in table */
1301 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1302 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1304 /* update the info entry of the previous in table */
1305 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1306 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1309 /* save the pointer to the info entry of the current tables */
1310 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1311 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1313 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1314 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1315 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1318 /* print input tables */
1319 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1320 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1321 /* print output tables */
1322 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1323 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1324 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1330 This function builds input and output DMA tables for synhronic
1331 symmetric operations (AES, DES). It also checks that each table
1332 is of the modular block size
1334 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1335 unsigned long app_virt_in_addr,
1336 unsigned long app_virt_out_addr,
1337 unsigned long data_size,
1338 unsigned long block_size,
1339 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1341 /* array of pointers of page */
1342 struct sep_lli_entry_t *lli_in_array;
1343 /* array of pointers of page */
1344 struct sep_lli_entry_t *lli_out_array;
1347 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1349 /* initialize the pages pointers */
1350 sep->in_page_array = 0;
1351 sep->out_page_array = 0;
1353 /* check if the pages are in Kernel Virtual Address layout */
1354 if (isKernelVirtualAddress == true) {
1355 /* lock the pages of the kernel buffer and translate them to pages */
1356 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1358 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1362 /* lock the pages of the user buffer and translate them to pages */
1363 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1365 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1370 if (isKernelVirtualAddress == true) {
1371 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1373 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1374 goto end_function_with_error1;
1377 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1379 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1380 goto end_function_with_error1;
1383 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1384 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1385 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1388 /* call the fucntion that creates table from the lli arrays */
1389 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1391 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1392 goto end_function_with_error2;
1395 /* fall through - free the lli entry arrays */
1396 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1397 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1398 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1399 end_function_with_error2:
1400 kfree(lli_out_array);
1401 end_function_with_error1:
1402 kfree(lli_in_array);
1404 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1410 this function handles tha request for creation of the DMA table
1411 for the synchronic symmetric operations (AES,DES)
1413 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1417 /* command arguments */
1418 struct sep_driver_build_sync_table_t command_args;
1420 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1422 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1426 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1427 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1428 edbg("data_size is %lu\n", command_args.data_in_size);
1429 edbg("block_size is %lu\n", command_args.block_size);
1431 /* check if we need to build only input table or input/output */
1432 if (command_args.app_out_address)
1433 /* prepare input and output tables */
1434 error = sep_prepare_input_output_dma_table(sep,
1435 command_args.app_in_address,
1436 command_args.app_out_address,
1437 command_args.data_in_size,
1438 command_args.block_size,
1439 &command_args.in_table_address,
1440 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1442 /* prepare input tables */
1443 error = sep_prepare_input_dma_table(sep,
1444 command_args.app_in_address,
1445 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1450 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1453 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1458 this function handles the request for freeing dma table for synhronic actions
1460 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1462 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1464 /* free input pages array */
1465 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1467 /* free output pages array if needed */
1468 if (sep->out_page_array)
1469 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1471 /* reset all the values */
1472 sep->in_page_array = 0;
1473 sep->out_page_array = 0;
1474 sep->in_num_pages = 0;
1475 sep->out_num_pages = 0;
1476 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1481 this function find a space for the new flow dma table
1483 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1484 unsigned long **table_address_ptr)
1487 /* pointer to the id field of the flow dma table */
1488 unsigned long *start_table_ptr;
1489 /* Do not make start_addr unsigned long * unless fixing the offset
1491 void *flow_dma_area_start_addr;
1492 unsigned long *flow_dma_area_end_addr;
1493 /* maximum table size in words */
1494 unsigned long table_size_in_words;
1496 /* find the start address of the flow DMA table area */
1497 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1499 /* set end address of the flow table area */
1500 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1502 /* set table size in words */
1503 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1505 /* set the pointer to the start address of DMA area */
1506 start_table_ptr = flow_dma_area_start_addr;
1508 /* find the space for the next table */
1509 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1510 start_table_ptr += table_size_in_words;
1512 /* check if we reached the end of floa tables area */
1513 if (start_table_ptr >= flow_dma_area_end_addr)
1516 *table_address_ptr = start_table_ptr;
1522 This function creates one DMA table for flow and returns its data,
1523 and pointer to its info entry
1525 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1526 unsigned long virt_buff_addr,
1527 unsigned long virt_buff_size,
1528 struct sep_lli_entry_t *table_data,
1529 struct sep_lli_entry_t **info_entry_ptr,
1530 struct sep_flow_context_t *flow_data_ptr,
1531 bool isKernelVirtualAddress)
1534 /* the range in pages */
1535 unsigned long lli_array_size;
1536 struct sep_lli_entry_t *lli_array;
1537 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1538 unsigned long *start_dma_table_ptr;
1539 /* total table data counter */
1540 unsigned long dma_table_data_count;
1541 /* pointer that will keep the pointer to the pages of the virtual buffer */
1542 struct page **page_array_ptr;
1543 unsigned long entry_count;
1545 /* find the space for the new table */
1546 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1550 /* check if the pages are in Kernel Virtual Address layout */
1551 if (isKernelVirtualAddress == true)
1552 /* lock kernel buffer in the memory */
1553 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1555 /* lock user buffer in the memory */
1556 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1561 /* set the pointer to page array at the beginning of table - this table is
1562 now considered taken */
1563 *start_dma_table_ptr = lli_array_size;
1565 /* point to the place of the pages pointers of the table */
1566 start_dma_table_ptr++;
1568 /* set the pages pointer */
1569 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1571 /* set the pointer to the first entry */
1572 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1574 /* now create the entries for table */
1575 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1576 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1578 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1580 /* set the total data of a table */
1581 dma_table_data_count += lli_array[entry_count].block_size;
1583 flow_dma_table_entry_ptr++;
1586 /* set the physical address */
1587 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1589 /* set the num_entries and total data size */
1590 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1592 /* set the info entry */
1593 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1594 flow_dma_table_entry_ptr->block_size = 0;
1596 /* set the pointer to info entry */
1597 *info_entry_ptr = flow_dma_table_entry_ptr;
1599 /* the array of the lli entries */
1608 This function creates a list of tables for flow and returns the data for
1609 the first and last tables of the list
1611 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1612 unsigned long num_virtual_buffers,
1613 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1616 unsigned long virt_buff_addr;
1617 unsigned long virt_buff_size;
1618 struct sep_lli_entry_t table_data;
1619 struct sep_lli_entry_t *info_entry_ptr;
1620 struct sep_lli_entry_t *prev_info_entry_ptr;
1625 prev_info_entry_ptr = 0;
1627 /* init the first table to default */
1628 table_data.physical_address = 0xffffffff;
1629 first_table_data_ptr->physical_address = 0xffffffff;
1630 table_data.block_size = 0;
1632 for (i = 0; i < num_virtual_buffers; i++) {
1633 /* get the virtual buffer address */
1634 error = get_user(virt_buff_addr, &first_buff_addr);
1638 /* get the virtual buffer size */
1640 error = get_user(virt_buff_size, &first_buff_addr);
1644 /* advance the address to point to the next pair of address|size */
1647 /* now prepare the one flow LLI table from the data */
1648 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1653 /* if this is the first table - save it to return to the user
1655 *first_table_data_ptr = table_data;
1657 /* set the pointer to info entry */
1658 prev_info_entry_ptr = info_entry_ptr;
1660 /* not first table - the previous table info entry should
1662 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1664 /* set the pointer to info entry */
1665 prev_info_entry_ptr = info_entry_ptr;
1669 /* set the last table data */
1670 *last_table_data_ptr = table_data;
1676 this function goes over all the flow tables connected to the given
1677 table and deallocate them
1679 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1682 unsigned long *table_ptr;
1683 /* end address of the flow dma area */
1684 unsigned long num_entries;
1685 unsigned long num_pages;
1686 struct page **pages_ptr;
1687 /* maximum table size in words */
1688 struct sep_lli_entry_t *info_entry_ptr;
1690 /* set the pointer to the first table */
1691 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1693 /* set the num of entries */
1694 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1695 & SEP_NUM_ENTRIES_MASK;
1697 /* go over all the connected tables */
1698 while (*table_ptr != 0xffffffff) {
1699 /* get number of pages */
1700 num_pages = *(table_ptr - 2);
1702 /* get the pointer to the pages */
1703 pages_ptr = (struct page **) (*(table_ptr - 1));
1705 /* free the pages */
1706 sep_free_dma_pages(pages_ptr, num_pages, 1);
1708 /* goto to the info entry */
1709 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1711 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1712 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1719 * sep_find_flow_context - find a flow
1720 * @sep: the SEP we are working with
1721 * @flow_id: flow identifier
1723 * Returns a pointer the matching flow, or NULL if the flow does not
1727 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1728 unsigned long flow_id)
1732 * always search for flow with id default first - in case we
1733 * already started working on the flow there can be no situation
1734 * when 2 flows are with default flag
1736 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1737 if (sep->flows[count].flow_id == flow_id)
1738 return &sep->flows[count];
1745 this function handles the request to create the DMA tables for flow
1747 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1751 struct sep_driver_build_flow_table_t command_args;
1752 /* first table - output */
1753 struct sep_lli_entry_t first_table_data;
1754 /* dma table data */
1755 struct sep_lli_entry_t last_table_data;
1756 /* pointer to the info entry of the previuos DMA table */
1757 struct sep_lli_entry_t *prev_info_entry_ptr;
1758 /* pointer to the flow data strucutre */
1759 struct sep_flow_context_t *flow_context_ptr;
1761 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1763 /* init variables */
1764 prev_info_entry_ptr = 0;
1765 first_table_data.physical_address = 0xffffffff;
1767 /* find the free structure for flow data */
1768 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1769 if (flow_context_ptr == NULL)
1772 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1776 /* create flow tables */
1777 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1779 goto end_function_with_error;
1781 /* check if flow is static */
1782 if (!command_args.flow_type)
1783 /* point the info entry of the last to the info entry of the first */
1784 last_table_data = first_table_data;
1786 /* set output params */
1787 command_args.first_table_addr = first_table_data.physical_address;
1788 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1789 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1791 /* send the parameters to user application */
1792 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1794 goto end_function_with_error;
1796 /* all the flow created - update the flow entry with temp id */
1797 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1799 /* set the processing tables data in the context */
1800 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1801 flow_context_ptr->input_tables_in_process = first_table_data;
1803 flow_context_ptr->output_tables_in_process = first_table_data;
1807 end_function_with_error:
1808 /* free the allocated tables */
1809 sep_deallocated_flow_tables(&first_table_data);
1811 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1816 this function handles add tables to flow
1818 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1821 unsigned long num_entries;
1822 struct sep_driver_add_flow_table_t command_args;
1823 struct sep_flow_context_t *flow_context_ptr;
1824 /* first dma table data */
1825 struct sep_lli_entry_t first_table_data;
1826 /* last dma table data */
1827 struct sep_lli_entry_t last_table_data;
1828 /* pointer to the info entry of the current DMA table */
1829 struct sep_lli_entry_t *info_entry_ptr;
1831 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1833 /* get input parameters */
1834 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1838 /* find the flow structure for the flow id */
1839 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1840 if (flow_context_ptr == NULL)
1843 /* prepare the flow dma tables */
1844 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1846 goto end_function_with_error;
1848 /* now check if there is already an existing add table for this flow */
1849 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1850 /* this buffer was for input buffers */
1851 if (flow_context_ptr->input_tables_flag) {
1852 /* add table already exists - add the new tables to the end
1854 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1856 info_entry_ptr = (struct sep_lli_entry_t *)
1857 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1859 /* connect to list of tables */
1860 *info_entry_ptr = first_table_data;
1862 /* set the first table data */
1863 first_table_data = flow_context_ptr->first_input_table;
1865 /* set the input flag */
1866 flow_context_ptr->input_tables_flag = 1;
1868 /* set the first table data */
1869 flow_context_ptr->first_input_table = first_table_data;
1871 /* set the last table data */
1872 flow_context_ptr->last_input_table = last_table_data;
1873 } else { /* this is output tables */
1875 /* this buffer was for input buffers */
1876 if (flow_context_ptr->output_tables_flag) {
1877 /* add table already exists - add the new tables to
1878 the end of the previous */
1879 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1881 info_entry_ptr = (struct sep_lli_entry_t *)
1882 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1884 /* connect to list of tables */
1885 *info_entry_ptr = first_table_data;
1887 /* set the first table data */
1888 first_table_data = flow_context_ptr->first_output_table;
1890 /* set the input flag */
1891 flow_context_ptr->output_tables_flag = 1;
1893 /* set the first table data */
1894 flow_context_ptr->first_output_table = first_table_data;
1896 /* set the last table data */
1897 flow_context_ptr->last_output_table = last_table_data;
1900 /* set output params */
1901 command_args.first_table_addr = first_table_data.physical_address;
1902 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1903 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1905 /* send the parameters to user application */
1906 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1907 end_function_with_error:
1908 /* free the allocated tables */
1909 sep_deallocated_flow_tables(&first_table_data);
1911 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1916 this function add the flow add message to the specific flow
1918 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1921 struct sep_driver_add_message_t command_args;
1922 struct sep_flow_context_t *flow_context_ptr;
1924 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1926 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1931 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1936 /* find the flow context */
1937 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1938 if (flow_context_ptr == NULL)
1941 /* copy the message into context */
1942 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1943 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1945 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1951 this function returns the bus and virtual addresses of the static pool
1953 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1956 struct sep_driver_static_pool_addr_t command_args;
1958 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1960 /*prepare the output parameters in the struct */
1961 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1962 command_args.virtual_static_address = (unsigned long)sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1964 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1966 /* send the parameters to user application */
1967 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1968 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1973 this address gets the offset of the physical address from the start
1976 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
1979 struct sep_driver_get_mapped_offset_t command_args;
1981 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
1983 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
1987 if (command_args.physical_address < sep->shared_area_bus) {
1992 /*prepare the output parameters in the struct */
1993 command_args.offset = command_args.physical_address - sep->shared_area_bus;
1995 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
1997 /* send the parameters to user application */
1998 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2000 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2008 static int sep_start_handler(struct sep_device *sep)
2010 unsigned long reg_val;
2011 unsigned long error = 0;
2013 dbg("SEP Driver:--------> sep_start_handler start\n");
2015 /* wait in polling for message from SEP */
2017 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2020 /* check the value */
2022 /* fatal error - read error status from GPRO */
2023 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2024 dbg("SEP Driver:<-------- sep_start_handler end\n");
2029 this function handles the request for SEP initialization
2031 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2033 unsigned long message_word;
2034 unsigned long *message_ptr;
2035 struct sep_driver_init_t command_args;
2036 unsigned long counter;
2037 unsigned long error;
2038 unsigned long reg_val;
2040 dbg("SEP Driver:--------> sep_init_handler start\n");
2043 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2045 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2050 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2051 /*sep_configure_dma_burst(); */
2053 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2055 message_ptr = (unsigned long *) command_args.message_addr;
2057 /* set the base address of the SRAM */
2058 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2060 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2061 get_user(message_word, message_ptr);
2062 /* write data to SRAM */
2063 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2064 edbg("SEP Driver:message_word is %lu\n", message_word);
2065 /* wait for write complete */
2066 sep_wait_sram_write(sep);
2068 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2070 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2073 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2074 while (!(reg_val & 0xFFFFFFFD));
2076 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2078 /* check the value */
2079 if (reg_val == 0x1) {
2080 edbg("SEP Driver:init failed\n");
2082 error = sep_read_reg(sep, 0x8060);
2083 edbg("SEP Driver:sw monitor is %lu\n", error);
2085 /* fatal error - read erro status from GPRO */
2086 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2087 edbg("SEP Driver:error is %lu\n", error);
2090 dbg("SEP Driver:<-------- sep_init_handler end\n");
2096 this function handles the request cache and resident reallocation
2098 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2101 struct sep_driver_realloc_cache_resident_t command_args;
2104 /* copy cache and resident to the their intended locations */
2105 error = sep_load_firmware(sep);
2109 command_args.new_base_addr = sep->shared_area_bus;
2111 /* find the new base address according to the lowest address between
2112 cache, resident and shared area */
2113 if (sep->resident_bus < command_args.new_base_addr)
2114 command_args.new_base_addr = sep->resident_bus;
2115 if (sep->cache_bus < command_args.new_base_addr)
2116 command_args.new_base_addr = sep->cache_bus;
2118 /* set the return parameters */
2119 command_args.new_cache_addr = sep->cache_bus;
2120 command_args.new_resident_addr = sep->resident_bus;
2122 /* set the new shared area */
2123 command_args.new_shared_area_addr = sep->shared_area_bus;
2125 edbg("SEP Driver:command_args.new_shared_area is %08llx\n", command_args.new_shared_area_addr);
2126 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2127 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2128 edbg("SEP Driver:command_args.new_cache_addr is %08llx\n", command_args.new_cache_addr);
2130 /* return to user */
2131 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2137 this function handles the request for get time
2139 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2142 struct sep_driver_get_time_t command_args;
2144 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2146 error = copy_to_user((void __user *)arg,
2147 &command_args, sizeof(struct sep_driver_get_time_t));
2153 This API handles the end transaction request
2155 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2157 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2159 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2161 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2163 /* release IRQ line */
2164 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2166 /* lock the sep mutex */
2167 mutex_unlock(&sep_mutex);
2170 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2177 This function handler the set flow id command
2179 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2182 unsigned long flow_id;
2183 struct sep_flow_context_t *flow_data_ptr;
2185 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2187 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2191 /* find the flow data structure that was just used for creating new flow
2192 - its id should be default */
2193 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2194 if (flow_data_ptr == NULL)
2198 flow_data_ptr->flow_id = flow_id;
2201 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2209 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2212 struct sep_device *sep = filp->private_data;
2214 dbg("------------>SEP Driver: ioctl start\n");
2216 edbg("SEP Driver: cmd is %x\n", cmd);
2219 case SEP_IOCSENDSEPCOMMAND:
2220 /* send command to SEP */
2221 sep_send_command_handler(sep);
2222 edbg("SEP Driver: after sep_send_command_handler\n");
2224 case SEP_IOCSENDSEPRPLYCOMMAND:
2225 /* send reply command to SEP */
2226 sep_send_reply_command_handler(sep);
2228 case SEP_IOCALLOCDATAPOLL:
2229 /* allocate data pool */
2230 error = sep_allocate_data_pool_memory_handler(sep, arg);
2232 case SEP_IOCWRITEDATAPOLL:
2233 /* write data into memory pool */
2234 error = sep_write_into_data_pool_handler(sep, arg);
2236 case SEP_IOCREADDATAPOLL:
2237 /* read data from data pool into application memory */
2238 error = sep_read_from_data_pool_handler(sep, arg);
2240 case SEP_IOCCREATESYMDMATABLE:
2241 /* create dma table for synhronic operation */
2242 error = sep_create_sync_dma_tables_handler(sep, arg);
2244 case SEP_IOCCREATEFLOWDMATABLE:
2245 /* create flow dma tables */
2246 error = sep_create_flow_dma_tables_handler(sep, arg);
2248 case SEP_IOCFREEDMATABLEDATA:
2249 /* free the pages */
2250 error = sep_free_dma_table_data_handler(sep);
2252 case SEP_IOCSETFLOWID:
2254 error = sep_set_flow_id_handler(sep, arg);
2256 case SEP_IOCADDFLOWTABLE:
2257 /* add tables to the dynamic flow */
2258 error = sep_add_flow_tables_handler(sep, arg);
2260 case SEP_IOCADDFLOWMESSAGE:
2261 /* add message of add tables to flow */
2262 error = sep_add_flow_tables_message_handler(sep, arg);
2264 case SEP_IOCSEPSTART:
2265 /* start command to sep */
2266 error = sep_start_handler(sep);
2268 case SEP_IOCSEPINIT:
2269 /* init command to sep */
2270 error = sep_init_handler(sep, arg);
2272 case SEP_IOCGETSTATICPOOLADDR:
2273 /* get the physical and virtual addresses of the static pool */
2274 error = sep_get_static_pool_addr_handler(sep, arg);
2276 case SEP_IOCENDTRANSACTION:
2277 error = sep_end_transaction_handler(sep, arg);
2279 case SEP_IOCREALLOCCACHERES:
2280 error = sep_realloc_cache_resident_handler(sep, arg);
2282 case SEP_IOCGETMAPPEDADDROFFSET:
2283 error = sep_get_physical_mapped_offset_handler(sep, arg);
2286 error = sep_get_time_handler(sep, arg);
2292 dbg("SEP Driver:<-------- ioctl end\n");
2298 #if !SEP_DRIVER_POLLING_MODE
2300 /* handler for flow done interrupt */
2302 static void sep_flow_done_handler(struct work_struct *work)
2304 struct sep_flow_context_t *flow_data_ptr;
2306 /* obtain the mutex */
2307 mutex_lock(&sep_mutex);
2309 /* get the pointer to context */
2310 flow_data_ptr = (struct sep_flow_context_t *) work;
2312 /* free all the current input tables in sep */
2313 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2315 /* free all the current tables output tables in SEP (if needed) */
2316 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2317 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2319 /* check if we have additional tables to be sent to SEP only input
2320 flag may be checked */
2321 if (flow_data_ptr->input_tables_flag) {
2322 /* copy the message to the shared RAM and signal SEP */
2323 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2325 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2327 mutex_unlock(&sep_mutex);
2330 interrupt handler function
2332 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2334 irqreturn_t int_error;
2335 unsigned long reg_val;
2336 unsigned long flow_id;
2337 struct sep_flow_context_t *flow_context_ptr;
2338 struct sep_device *sep = dev_id;
2340 int_error = IRQ_HANDLED;
2342 /* read the IRR register to check if this is SEP interrupt */
2343 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2344 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2346 /* check if this is the flow interrupt */
2347 if (0 /*reg_val & (0x1 << 11) */ ) {
2348 /* read GPRO to find out the which flow is done */
2349 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2351 /* find the contex of the flow */
2352 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2353 if (flow_context_ptr == NULL)
2354 goto end_function_with_error;
2356 /* queue the work */
2357 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2358 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2361 /* check if this is reply interrupt from SEP */
2362 if (reg_val & (0x1 << 13)) {
2363 /* update the counter of reply messages */
2365 /* wake up the waiting process */
2366 wake_up(&sep_event);
2368 int_error = IRQ_NONE;
2372 end_function_with_error:
2373 /* clear the interrupt */
2374 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2385 static void sep_wait_busy(struct sep_device *sep)
2390 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2395 PATCH for configuring the DMA to single burst instead of multi-burst
2397 static void sep_configure_dma_burst(struct sep_device *sep)
2399 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2401 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2403 /* request access to registers from SEP */
2404 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2406 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2410 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2412 /* set the DMA burst register to single burst */
2413 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2415 /* release the sep busy */
2416 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2419 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2426 Function that is activaed on the succesful probe of the SEP device
2428 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2431 struct sep_device *sep;
2433 int size; /* size of memory for allocation */
2435 edbg("Sep pci probe starting\n");
2436 if (sep_dev != NULL) {
2437 dev_warn(&pdev->dev, "only one SEP supported.\n");
2441 /* enable the device */
2442 error = pci_enable_device(pdev);
2444 edbg("error enabling pci device\n");
2448 /* set the pci dev pointer */
2449 sep_dev = &sep_instance;
2450 sep = &sep_instance;
2452 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2453 /* transaction counter that coordinates the transactions between SEP
2456 /* counter for the messages from sep */
2458 /* counter for the number of bytes allocated in the pool
2459 for the current transaction */
2460 sep->data_pool_bytes_allocated = 0;
2462 /* calculate the total size for allocation */
2463 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2464 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2466 /* allocate the shared area */
2467 if (sep_map_and_alloc_shared_area(sep, size)) {
2469 /* allocation failed */
2470 goto end_function_error;
2472 /* now set the memory regions */
2473 sep->message_shared_area_addr = sep->shared_area;
2475 edbg("SEP Driver: sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
2477 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2478 /* send the new SHARED MESSAGE AREA to the SEP */
2479 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2481 /* poll for SEP response */
2482 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2483 while (retval != 0xffffffff && retval != sep->shared_area_bus)
2484 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2486 /* check the return value (register) */
2487 if (retval != sep->shared_area_bus) {
2489 goto end_function_deallocate_sep_shared_area;
2492 /* init the flow contextes */
2493 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2494 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2496 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2497 if (sep->flow_wq == NULL) {
2499 edbg("sep_driver:flow queue creation failed\n");
2500 goto end_function_deallocate_sep_shared_area;
2502 edbg("SEP Driver: create flow workqueue \n");
2503 /* load the rom code */
2504 sep_load_rom_code(sep);
2506 sep->pdev = pci_dev_get(pdev);
2508 /* get the io memory start address */
2509 sep->io_bus = pci_resource_start(pdev, 0);
2511 edbg("SEP Driver error pci resource start\n");
2512 goto end_function_deallocate_sep_shared_area;
2515 /* get the io memory end address */
2516 sep->io_end_bus = pci_resource_end(pdev, 0);
2517 if (!sep->io_end_bus) {
2518 edbg("SEP Driver error pci resource end\n");
2519 goto end_function_deallocate_sep_shared_area;
2522 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2524 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2526 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2528 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2530 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2531 if (!sep->io_addr) {
2532 edbg("SEP Driver error ioremap of io memory\n");
2533 goto end_function_deallocate_sep_shared_area;
2536 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2538 sep->reg_addr = (void __iomem *) sep->io_addr;
2540 /* set up system base address and shared memory location */
2542 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2543 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2544 &sep->rar_bus, GFP_KERNEL);
2546 if (!sep->rar_addr) {
2547 edbg("SEP Driver:can't allocate rar\n");
2548 goto end_function_uniomap;
2551 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2552 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2554 #if !SEP_DRIVER_POLLING_MODE
2556 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2558 /* clear ICR register */
2559 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2561 /* set the IMR register - open only GPR 2 */
2562 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2564 edbg("SEP Driver: about to call request_irq\n");
2565 /* get the interrupt line */
2566 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2568 goto end_function_free_res;
2571 edbg("SEP Driver: about to write IMR REG_ADDR");
2573 /* set the IMR register - open only GPR 2 */
2574 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2576 end_function_free_res:
2577 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2578 sep->rar_addr, sep->rar_bus);
2579 #endif /* SEP_DRIVER_POLLING_MODE */
2580 end_function_uniomap:
2581 iounmap(sep->io_addr);
2582 end_function_deallocate_sep_shared_area:
2583 /* de-allocate shared area */
2584 sep_unmap_and_free_shared_area(sep, size);
2591 static struct pci_device_id sep_pci_id_tbl[] = {
2592 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2596 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2598 /* field for registering driver to PCI device */
2599 static struct pci_driver sep_pci_driver = {
2600 .name = "sep_sec_driver",
2601 .id_table = sep_pci_id_tbl,
2603 /* FIXME: remove handler */
2606 /* major and minor device numbers */
2607 static dev_t sep_devno;
2609 /* the files operations structure of the driver */
2610 static struct file_operations sep_file_operations = {
2611 .owner = THIS_MODULE,
2615 .release = sep_release,
2620 /* cdev struct of the driver */
2621 static struct cdev sep_cdev;
2624 this function registers the driver to the file system
2626 static int sep_register_driver_to_fs(void)
2628 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2630 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2635 cdev_init(&sep_cdev, &sep_file_operations);
2636 sep_cdev.owner = THIS_MODULE;
2638 /* register the driver with the kernel */
2639 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2642 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2643 goto end_function_unregister_devnum;
2648 end_function_unregister_devnum:
2650 /* unregister dev numbers */
2651 unregister_chrdev_region(sep_devno, 1);
2658 /*--------------------------------------------------------------
2660 ----------------------------------------------------------------*/
2661 static int __init sep_init(void)
2664 dbg("SEP Driver:-------->Init start\n");
2665 /* FIXME: Probe can occur before we are ready to survive a probe */
2666 ret_val = pci_register_driver(&sep_pci_driver);
2668 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2669 goto end_function_unregister_from_fs;
2671 /* register driver to fs */
2672 ret_val = sep_register_driver_to_fs();
2674 goto end_function_unregister_pci;
2676 end_function_unregister_pci:
2677 pci_unregister_driver(&sep_pci_driver);
2678 end_function_unregister_from_fs:
2679 /* unregister from fs */
2680 cdev_del(&sep_cdev);
2681 /* unregister dev numbers */
2682 unregister_chrdev_region(sep_devno, 1);
2684 dbg("SEP Driver:<-------- Init end\n");
2689 /*-------------------------------------------------------------
2691 --------------------------------------------------------------*/
2692 static void __exit sep_exit(void)
2696 dbg("SEP Driver:--------> Exit start\n");
2698 /* unregister from fs */
2699 cdev_del(&sep_cdev);
2700 /* unregister dev numbers */
2701 unregister_chrdev_region(sep_devno, 1);
2702 /* calculate the total size for de-allocation */
2703 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2704 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2705 /* FIXME: We need to do this in the unload for the device */
2706 /* free shared area */
2708 sep_unmap_and_free_shared_area(sep_dev, size);
2709 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2710 iounmap((void *) sep_dev->reg_addr);
2711 edbg("SEP Driver: iounmap \n");
2713 edbg("SEP Driver: release_mem_region \n");
2714 dbg("SEP Driver:<-------- Exit end\n");
2718 module_init(sep_init);
2719 module_exit(sep_exit);
2721 MODULE_LICENSE("GPL");