9cf7ad7916688b23a97c714f256a1c86a5c0f418
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory.c
1 /*
2  * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/platform_device.h>
19 #include <linux/idr.h>
20
21 #include "mali_osk.h"
22 #include "mali_executor.h"
23
24 #include "mali_memory.h"
25 #include "mali_memory_os_alloc.h"
26 #include "mali_memory_block_alloc.h"
27 #include "mali_memory_util.h"
28 #include "mali_memory_virtual.h"
29 #include "mali_memory_manager.h"
30 #include "mali_memory_cow.h"
31 #include "mali_memory_swap_alloc.h"
32 #include "mali_memory_defer_bind.h"
33 #if defined(CONFIG_DMA_SHARED_BUFFER)
34 #include "mali_memory_secure.h"
35 #endif
36
37 extern unsigned int mali_dedicated_mem_size;
38 extern unsigned int mali_shared_mem_size;
39
40 #define MALI_VM_NUM_FAULT_PREFETCH (0x8)
41
42 static void mali_mem_vma_open(struct vm_area_struct *vma)
43 {
44         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
45         MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
46
47         /* If need to share the allocation, add ref_count here */
48         mali_allocation_ref(alloc);
49         return;
50 }
51 static void mali_mem_vma_close(struct vm_area_struct *vma)
52 {
53         /* If need to share the allocation, unref ref_count here */
54         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
55
56         mali_allocation_unref(&alloc);
57         vma->vm_private_data = NULL;
58 }
59
60 static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
61 {
62         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
63         mali_mem_backend *mem_bkend = NULL;
64         int ret;
65         int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
66
67         unsigned long address = (unsigned long)vmf->virtual_address;
68         MALI_DEBUG_ASSERT(alloc->backend_handle);
69         MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
70
71         /* Get backend memory & Map on CPU */
72         mutex_lock(&mali_idr_mutex);
73         if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
74                 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
75                 mutex_unlock(&mali_idr_mutex);
76                 return VM_FAULT_SIGBUS;
77         }
78         mutex_unlock(&mali_idr_mutex);
79         MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
80
81         if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
82                         (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
83             (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
84                 /*check if use page fault to do COW*/
85                 MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
86                 mutex_lock(&mem_bkend->mutex);
87                 ret = mali_mem_cow_allocate_on_demand(mem_bkend,
88                                                       (address - vma->vm_start) / PAGE_SIZE);
89                 mutex_unlock(&mem_bkend->mutex);
90
91                 if (ret != _MALI_OSK_ERR_OK) {
92                         return VM_FAULT_OOM;
93                 }
94                 prefetch_num = 1;
95
96                 /* handle COW modified range cpu mapping
97                  we zap the mapping in cow_modify_range, it will trigger page fault
98                  when CPU access it, so here we map it to CPU*/
99                 mutex_lock(&mem_bkend->mutex);
100                 ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
101                 mutex_unlock(&mem_bkend->mutex);
102
103                 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
104                         return VM_FAULT_SIGBUS;
105                 }
106         } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
107                    (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
108                 u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
109                 int ret = _MALI_OSK_ERR_OK;
110
111                 mutex_lock(&mem_bkend->mutex);
112                 if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
113                         ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
114                 } else {
115                         ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
116                 }
117                 mutex_unlock(&mem_bkend->mutex);
118
119                 if (ret != _MALI_OSK_ERR_OK) {
120                         MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
121                         return VM_FAULT_OOM;
122                 } else {
123                         return VM_FAULT_LOCKED;
124                 }
125         } else {
126                 MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
127                 /*NOT support yet or OOM*/
128                 return VM_FAULT_OOM;
129         }
130         return VM_FAULT_NOPAGE;
131 }
132
133 static struct vm_operations_struct mali_kernel_vm_ops = {
134         .open = mali_mem_vma_open,
135         .close = mali_mem_vma_close,
136         .fault = mali_mem_vma_fault,
137 };
138
139
140 /** @ map mali allocation to CPU address
141 *
142 * Supported backend types:
143 * --MALI_MEM_OS
144 * -- need to add COW?
145  *Not supported backend types:
146 * -_MALI_MEMORY_BIND_BACKEND_UMP
147 * -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
148 * -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
149 *
150 */
151 int mali_mmap(struct file *filp, struct vm_area_struct *vma)
152 {
153         struct mali_session_data *session;
154         mali_mem_allocation *mali_alloc = NULL;
155         u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
156         struct mali_vma_node *mali_vma_node = NULL;
157         mali_mem_backend *mem_bkend = NULL;
158         int ret = -EFAULT;
159
160         session = (struct mali_session_data *)filp->private_data;
161         if (NULL == session) {
162                 MALI_PRINT_ERROR(("mmap called without any session data available\n"));
163                 return -EFAULT;
164         }
165
166         MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
167                              (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
168                              (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
169
170         /* Operations used on any memory system */
171         /* do not need to anything in vm open/close now */
172
173         /* find mali allocation structure by vaddress*/
174         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
175         if (likely(mali_vma_node)) {
176                 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
177                 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
178                 if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
179                         /* only allow to use start address for mmap */
180                         MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
181                         return -EFAULT;
182                 }
183         } else {
184                 MALI_DEBUG_ASSERT(NULL == mali_vma_node);
185                 return -EFAULT;
186         }
187
188         mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
189
190         if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
191                 MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
192                 return -EFAULT;
193         }
194
195         /* Get backend memory & Map on CPU */
196         mutex_lock(&mali_idr_mutex);
197         if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
198                 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
199                 mutex_unlock(&mali_idr_mutex);
200                 return -EFAULT;
201         }
202         mutex_unlock(&mali_idr_mutex);
203
204         if (!(MALI_MEM_SWAP == mali_alloc->type ||
205               (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
206                 /* Set some bits which indicate that, the memory is IO memory, meaning
207                  * that no paging is to be performed and the memory should not be
208                  * included in crash dumps. And that the memory is reserved, meaning
209                  * that it's present and can never be paged out (see also previous
210                  * entry)
211                  */
212                 vma->vm_flags |= VM_IO;
213                 vma->vm_flags |= VM_DONTCOPY;
214                 vma->vm_flags |= VM_PFNMAP;
215 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
216                 vma->vm_flags |= VM_RESERVED;
217 #else
218                 vma->vm_flags |= VM_DONTDUMP;
219                 vma->vm_flags |= VM_DONTEXPAND;
220 #endif
221         } else if (MALI_MEM_SWAP == mali_alloc->type) {
222                 vma->vm_pgoff = mem_bkend->start_idx;
223         }
224
225         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
226         vma->vm_ops = &mali_kernel_vm_ops;
227
228         mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
229
230         /* If it's a copy-on-write mapping, map to read only */
231         if (!(vma->vm_flags & VM_WRITE)) {
232                 MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
233                 /* add VM_WRITE for do_page_fault will check this when a write fault */
234                 vma->vm_flags |= VM_WRITE | VM_READ;
235                 vma->vm_page_prot = PAGE_READONLY;
236                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
237                 mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
238                 goto out;
239         }
240
241         if (mem_bkend->type == MALI_MEM_OS) {
242                 ret = mali_mem_os_cpu_map(mem_bkend, vma);
243         } else if (mem_bkend->type == MALI_MEM_COW &&
244                    (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
245                 ret = mali_mem_cow_cpu_map(mem_bkend, vma);
246         } else if (mem_bkend->type == MALI_MEM_BLOCK) {
247                 ret = mali_mem_block_cpu_map(mem_bkend, vma);
248         } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
249                         (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
250                 /*For swappable memory, CPU page table will be created by page fault handler. */
251                 ret = 0;
252         } else if (mem_bkend->type == MALI_MEM_SECURE) {
253 #if defined(CONFIG_DMA_SHARED_BUFFER)
254                 ret = mali_mem_secure_cpu_map(mem_bkend, vma);
255 #else
256                 MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
257                 return -EFAULT;
258 #endif
259         } else {
260                 /* Not support yet*/
261                 MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
262                 return -EFAULT;
263         }
264
265         if (ret != 0) {
266                 MALI_DEBUG_PRINT(1, ("ret != 0\n"));
267                 return -EFAULT;
268         }
269 out:
270         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
271
272         vma->vm_private_data = (void *)mali_alloc;
273         mali_alloc->cpu_mapping.vma = vma;
274
275         mali_allocation_ref(mali_alloc);
276
277         return 0;
278 }
279
280 _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
281 {
282         u32 size = descriptor->psize;
283         struct mali_session_data *session = descriptor->session;
284
285         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
286
287         /* Map dma-buf into this session's page tables */
288
289         if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
290                 size += MALI_MMU_PAGE_SIZE;
291         }
292
293         return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
294 }
295
296 _mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
297 {
298         u32 old_size = descriptor->psize;
299         struct mali_session_data *session = descriptor->session;
300
301         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
302
303         if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
304                 new_size  += MALI_MMU_PAGE_SIZE;
305         }
306
307         if (new_size > old_size) {
308                 MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
309                 return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
310         }
311         return _MALI_OSK_ERR_OK;
312 }
313
314 void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
315 {
316         if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
317                 size += MALI_MMU_PAGE_SIZE;
318         }
319
320         /* Umap and flush L2 */
321         mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
322         mali_executor_zap_all_active(session);
323 }
324
325 u32 _mali_ukk_report_memory_usage(void)
326 {
327         u32 sum = 0;
328
329         if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
330                 sum += mali_mem_block_allocator_stat();
331         }
332
333         sum += mali_mem_os_stat();
334
335         return sum;
336 }
337
338 u32 _mali_ukk_report_total_memory_size(void)
339 {
340         return mali_dedicated_mem_size + mali_shared_mem_size;
341 }
342
343
344 /**
345  * Per-session memory descriptor mapping table sizes
346  */
347 #define MALI_MEM_DESCRIPTORS_INIT 64
348 #define MALI_MEM_DESCRIPTORS_MAX 65536
349
350 _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
351 {
352         MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
353
354         session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
355                                     _MALI_OSK_LOCK_ORDER_MEM_SESSION);
356
357         if (NULL == session_data->memory_lock) {
358                 MALI_ERROR(_MALI_OSK_ERR_FAULT);
359         }
360
361         session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
362         if (NULL == session_data->cow_lock) {
363                 _mali_osk_mutex_term(session_data->memory_lock);
364                 MALI_ERROR(_MALI_OSK_ERR_FAULT);
365         }
366
367         mali_memory_manager_init(&session_data->allocation_mgr);
368
369         MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
370         MALI_SUCCESS;
371 }
372
373 void mali_memory_session_end(struct mali_session_data *session)
374 {
375         MALI_DEBUG_PRINT(3, ("MMU session end\n"));
376
377         if (NULL == session) {
378                 MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
379                 return;
380         }
381         /* free allocation */
382         mali_free_session_allocations(session);
383         /* do some check in unint*/
384         mali_memory_manager_uninit(&session->allocation_mgr);
385
386         /* Free the lock */
387         _mali_osk_mutex_term(session->memory_lock);
388         _mali_osk_mutex_term(session->cow_lock);
389         return;
390 }
391
392 _mali_osk_errcode_t mali_memory_initialize(void)
393 {
394         _mali_osk_errcode_t err;
395
396         idr_init(&mali_backend_idr);
397         mutex_init(&mali_idr_mutex);
398
399         err = mali_mem_swap_init();
400         if (err != _MALI_OSK_ERR_OK) {
401                 return err;
402         }
403         err = mali_mem_os_init();
404         if (_MALI_OSK_ERR_OK == err) {
405                 err = mali_mem_defer_bind_manager_init();
406         }
407
408         return err;
409 }
410
411 void mali_memory_terminate(void)
412 {
413         mali_mem_swap_term();
414         mali_mem_defer_bind_manager_destory();
415         mali_mem_os_term();
416         if (mali_memory_have_dedicated_memory()) {
417                 mali_mem_block_allocator_destroy();
418         }
419 }
420
421
422 struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
423 {
424         mali_page_node *page_node = NULL;
425
426         page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
427         MALI_DEBUG_ASSERT(NULL != page_node);
428
429         if (page_node) {
430                 page_node->type = type;
431                 INIT_LIST_HEAD(&page_node->list);
432         }
433
434         return page_node;
435 }
436
437 void _mali_page_node_ref(struct mali_page_node *node)
438 {
439         if (node->type == MALI_PAGE_NODE_OS) {
440                 /* add ref to this page */
441                 get_page(node->page);
442         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
443                 mali_mem_block_add_ref(node);
444         } else if (node->type == MALI_PAGE_NODE_SWAP) {
445                 atomic_inc(&node->swap_it->ref_count);
446         } else {
447                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
448         }
449 }
450
451 void _mali_page_node_unref(struct mali_page_node *node)
452 {
453         if (node->type == MALI_PAGE_NODE_OS) {
454                 /* unref to this page */
455                 put_page(node->page);
456         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
457                 mali_mem_block_dec_ref(node);
458         } else {
459                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
460         }
461 }
462
463
464 void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
465 {
466         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
467         node->page = page;
468 }
469
470
471 void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
472 {
473         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
474         node->swap_it = item;
475 }
476
477 void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
478 {
479         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
480         node->blk_it = item;
481 }
482
483
484 int _mali_page_node_get_ref_count(struct mali_page_node *node)
485 {
486         if (node->type == MALI_PAGE_NODE_OS) {
487                 /* get ref count of this page */
488                 return page_count(node->page);
489         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
490                 return mali_mem_block_get_ref_count(node);
491         } else if (node->type == MALI_PAGE_NODE_SWAP) {
492                 return atomic_read(&node->swap_it->ref_count);
493         } else {
494                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
495         }
496         return -1;
497 }
498
499
500 dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
501 {
502         if (node->type == MALI_PAGE_NODE_OS) {
503                 return page_private(node->page);
504         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
505                 return _mali_blk_item_get_phy_addr(node->blk_it);
506         } else if (node->type == MALI_PAGE_NODE_SWAP) {
507                 return node->swap_it->dma_addr;
508         } else {
509                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
510         }
511         return 0;
512 }
513
514
515 unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
516 {
517         if (node->type == MALI_PAGE_NODE_OS) {
518                 return page_to_pfn(node->page);
519         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
520                 /* get phy addr for BLOCK page*/
521                 return _mali_blk_item_get_pfn(node->blk_it);
522         } else if (node->type == MALI_PAGE_NODE_SWAP) {
523                 return page_to_pfn(node->swap_it->page);
524         } else {
525                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
526         }
527         return 0;
528 }
529
530