Merge tag 'v4.4-rc2'
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / ump / linux / ump_osk_low_level_mem.c
1 /*
2  * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /**
12  * @file ump_osk_memory.c
13  * Implementation of the OS abstraction layer for the kernel device driver
14  */
15
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
18
19 #include "ump_osk.h"
20 #include "ump_uk_types.h"
21 #include "ump_ukk.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h>            /* kernel module definitions */
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/slab.h>
27
28 #include <asm/memory.h>
29 #include <asm/uaccess.h>                        /* to verify pointers from user space */
30 #include <asm/cacheflush.h>
31 #include <linux/dma-mapping.h>
32
33 typedef struct ump_vma_usage_tracker {
34         atomic_t references;
35         ump_memory_allocation *descriptor;
36 } ump_vma_usage_tracker;
37
38 static void ump_vma_open(struct vm_area_struct *vma);
39 static void ump_vma_close(struct vm_area_struct *vma);
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
41 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
42 #else
43 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address);
44 #endif
45
46 static struct vm_operations_struct ump_vm_ops = {
47         .open = ump_vma_open,
48         .close = ump_vma_close,
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
50         .fault = ump_cpu_page_fault_handler
51 #else
52         .nopfn = ump_cpu_page_fault_handler
53 #endif
54 };
55
56 /*
57  * Page fault for VMA region
58  * This should never happen since we always map in the entire virtual memory range.
59  */
60 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
61 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
62 #else
63 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address)
64 #endif
65 {
66 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
67         void __user *address;
68         address = vmf->virtual_address;
69 #endif
70         MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
71         MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
72
73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
74         return VM_FAULT_SIGBUS;
75 #else
76         return NOPFN_SIGBUS;
77 #endif
78 }
79
80 static void ump_vma_open(struct vm_area_struct *vma)
81 {
82         ump_vma_usage_tracker *vma_usage_tracker;
83         int new_val;
84
85         vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
86         BUG_ON(NULL == vma_usage_tracker);
87
88         new_val = atomic_inc_return(&vma_usage_tracker->references);
89
90         DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
91 }
92
93 static void ump_vma_close(struct vm_area_struct *vma)
94 {
95         ump_vma_usage_tracker *vma_usage_tracker;
96         _ump_uk_unmap_mem_s args;
97         int new_val;
98
99         vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
100         BUG_ON(NULL == vma_usage_tracker);
101
102         new_val = atomic_dec_return(&vma_usage_tracker->references);
103
104         DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
105
106         if (0 == new_val) {
107                 ump_memory_allocation *descriptor;
108
109                 descriptor = vma_usage_tracker->descriptor;
110
111                 args.ctx = descriptor->ump_session;
112                 args.cookie = descriptor->cookie;
113                 args.mapping = descriptor->mapping;
114                 args.size = descriptor->size;
115
116                 args._ukk_private = NULL; /** @note unused */
117
118                 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
119                 _ump_ukk_unmap_mem(& args);
120
121                 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
122         }
123 }
124
125 _mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor)
126 {
127         ump_vma_usage_tracker *vma_usage_tracker;
128         struct vm_area_struct *vma;
129
130         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
131
132         vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
133         if (NULL == vma_usage_tracker) {
134                 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
135                 return -_MALI_OSK_ERR_FAULT;
136         }
137
138         vma = (struct vm_area_struct *)descriptor->process_mapping_info;
139         if (NULL == vma) {
140                 kfree(vma_usage_tracker);
141                 return _MALI_OSK_ERR_FAULT;
142         }
143
144         vma->vm_private_data = vma_usage_tracker;
145         vma->vm_flags |= VM_IO;
146 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
147         vma->vm_flags |= VM_RESERVED;
148 #else
149         vma->vm_flags |= VM_DONTDUMP;
150         vma->vm_flags |= VM_DONTEXPAND;
151         vma->vm_flags |= VM_PFNMAP;
152 #endif
153
154
155         if (0 == descriptor->is_cached) {
156                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
157         }
158         DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot));
159
160         /* Setup the functions which handle further VMA handling */
161         vma->vm_ops = &ump_vm_ops;
162
163         /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
164         descriptor->mapping = (void __user *)vma->vm_start;
165
166         atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
167         vma_usage_tracker->descriptor = descriptor;
168
169         return _MALI_OSK_ERR_OK;
170 }
171
172 void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor)
173 {
174         struct vm_area_struct *vma;
175         ump_vma_usage_tracker *vma_usage_tracker;
176
177         if (NULL == descriptor) return;
178
179         /* Linux does the right thing as part of munmap to remove the mapping
180          * All that remains is that we remove the vma_usage_tracker setup in init() */
181         vma = (struct vm_area_struct *)descriptor->process_mapping_info;
182
183         vma_usage_tracker = vma->vm_private_data;
184
185         /* We only get called if mem_mapregion_init succeeded */
186         kfree(vma_usage_tracker);
187         return;
188 }
189
190 _mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size)
191 {
192         struct vm_area_struct *vma;
193         _mali_osk_errcode_t retval;
194
195         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
196
197         vma = (struct vm_area_struct *)descriptor->process_mapping_info;
198
199         if (NULL == vma) return _MALI_OSK_ERR_FAULT;
200
201         retval = remap_pfn_range(vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
202
203         DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
204                     ump_dd_secure_id_get(descriptor->handle),
205                     (unsigned long)vma,
206                     (unsigned long)(vma->vm_start + offset),
207                     (unsigned long)*phys_addr,
208                     size,
209                     (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
210
211         return retval;
212 }
213
214 static void level1_cache_flush_all(void)
215 {
216         DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
217         __cpuc_flush_kern_all();
218 }
219
220 void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data)
221 {
222         int i;
223
224         /* Flush L1 using virtual address, the entire range in one go.
225          * Only flush if user space process has a valid write mapping on given address. */
226         if ((mem) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) {
227                 __cpuc_flush_dcache_area(virt, size);
228                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size));
229         } else {
230                 if (session_data) {
231                         if (op == _UMP_UK_MSYNC_FLUSH_L1) {
232                                 DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
233                                 session_data->has_pending_level1_cache_flush = 0;
234                                 level1_cache_flush_all();
235                                 return;
236                         } else {
237                                 if (session_data->cache_operations_ongoing) {
238                                         session_data->has_pending_level1_cache_flush++;
239                                         DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush));
240                                 } else {
241                                         /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
242                                         level1_cache_flush_all();
243                                 }
244                         }
245                 } else {
246                         DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
247                         level1_cache_flush_all();
248                 }
249         }
250
251         if (NULL == mem) return;
252
253         if (mem->size_bytes == size) {
254                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n", mem->secure_id));
255         } else {
256                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
257                             mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
258         }
259
260
261         /* Flush L2 using physical addresses, block for block. */
262         for (i = 0 ; i < mem->nr_blocks; i++) {
263                 u32 start_p, end_p;
264                 ump_dd_physical_block *block;
265                 block = &mem->block_array[i];
266
267                 if (offset >= block->size) {
268                         offset -= block->size;
269                         continue;
270                 }
271
272                 if (offset) {
273                         start_p = (u32)block->addr + offset;
274                         /* We'll zero the offset later, after using it to calculate end_p. */
275                 } else {
276                         start_p = (u32)block->addr;
277                 }
278
279                 if (size < block->size - offset) {
280                         end_p = start_p + size;
281                         size = 0;
282                 } else {
283                         if (offset) {
284                                 end_p = start_p + (block->size - offset);
285                                 size -= block->size - offset;
286                                 offset = 0;
287                         } else {
288                                 end_p = start_p + block->size;
289                                 size -= block->size;
290                         }
291                 }
292
293                 switch (op) {
294                 case _UMP_UK_MSYNC_CLEAN:
295                         outer_clean_range(start_p, end_p);
296                         break;
297                 case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
298                         outer_flush_range(start_p, end_p);
299                         break;
300                 case _UMP_UK_MSYNC_INVALIDATE:
301                         outer_inv_range(start_p, end_p);
302                         break;
303                 default:
304                         break;
305                 }
306
307                 if (0 == size) {
308                         /* Nothing left to flush. */
309                         break;
310                 }
311         }
312
313         return;
314 }