1e57bd6740429520e39dfceeb0c59e3c7a6f6854
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory.c
1 /*
2  * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/platform_device.h>
19 #include <linux/idr.h>
20
21 #include "mali_osk.h"
22 #include "mali_executor.h"
23
24 #include "mali_memory.h"
25 #include "mali_memory_os_alloc.h"
26 #include "mali_memory_block_alloc.h"
27 #include "mali_memory_util.h"
28 #include "mali_memory_virtual.h"
29 #include "mali_memory_manager.h"
30 #include "mali_memory_cow.h"
31 #include "mali_memory_swap_alloc.h"
32 #include "mali_memory_defer_bind.h"
33
34 extern unsigned int mali_dedicated_mem_size;
35 extern unsigned int mali_shared_mem_size;
36
37 #define MALI_VM_NUM_FAULT_PREFETCH (0x8)
38
39 static void mali_mem_vma_open(struct vm_area_struct *vma)
40 {
41         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
42         MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
43
44         /* If need to share the allocation, add ref_count here */
45         mali_allocation_ref(alloc);
46         return;
47 }
48 static void mali_mem_vma_close(struct vm_area_struct *vma)
49 {
50         /* If need to share the allocation, unref ref_count here */
51         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
52
53         mali_allocation_unref(&alloc);
54         vma->vm_private_data = NULL;
55 }
56
57 static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
58 {
59         mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
60         mali_mem_backend *mem_bkend = NULL;
61         int ret;
62         int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
63
64         unsigned long address = (unsigned long)vmf->virtual_address;
65         MALI_DEBUG_ASSERT(alloc->backend_handle);
66         MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
67
68         /* Get backend memory & Map on CPU */
69         mutex_lock(&mali_idr_mutex);
70         if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
71                 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
72                 mutex_unlock(&mali_idr_mutex);
73                 return VM_FAULT_SIGBUS;
74         }
75         mutex_unlock(&mali_idr_mutex);
76         MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
77
78         if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
79                         (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
80             (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
81                 /*check if use page fault to do COW*/
82                 MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
83                 mutex_lock(&mem_bkend->mutex);
84                 ret = mali_mem_cow_allocate_on_demand(mem_bkend,
85                                                       (address - vma->vm_start) / PAGE_SIZE);
86                 mutex_unlock(&mem_bkend->mutex);
87
88                 if (ret != _MALI_OSK_ERR_OK) {
89                         return VM_FAULT_OOM;
90                 }
91                 prefetch_num = 1;
92
93                 /* handle COW modified range cpu mapping
94                  we zap the mapping in cow_modify_range, it will trigger page fault
95                  when CPU access it, so here we map it to CPU*/
96                 mutex_lock(&mem_bkend->mutex);
97                 ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
98                 mutex_unlock(&mem_bkend->mutex);
99
100                 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
101                         return VM_FAULT_SIGBUS;
102                 }
103         } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
104                    (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
105                 u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
106                 int ret = _MALI_OSK_ERR_OK;
107
108                 mutex_lock(&mem_bkend->mutex);
109                 if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
110                         ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
111                 } else {
112                         ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
113                 }
114                 mutex_unlock(&mem_bkend->mutex);
115
116                 if (ret != _MALI_OSK_ERR_OK) {
117                         MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
118                         return VM_FAULT_OOM;
119                 } else {
120                         return VM_FAULT_LOCKED;
121                 }
122         } else {
123                 MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
124         }
125         return VM_FAULT_NOPAGE;
126 }
127
128 static struct vm_operations_struct mali_kernel_vm_ops = {
129         .open = mali_mem_vma_open,
130         .close = mali_mem_vma_close,
131         .fault = mali_mem_vma_fault,
132 };
133
134
135 /** @ map mali allocation to CPU address
136 *
137 * Supported backend types:
138 * --MALI_MEM_OS
139 * -- need to add COW?
140  *Not supported backend types:
141 * -_MALI_MEMORY_BIND_BACKEND_UMP
142 * -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
143 * -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
144 *
145 */
146 int mali_mmap(struct file *filp, struct vm_area_struct *vma)
147 {
148         struct mali_session_data *session;
149         mali_mem_allocation *mali_alloc = NULL;
150         u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
151         struct mali_vma_node *mali_vma_node = NULL;
152         mali_mem_backend *mem_bkend = NULL;
153         int ret = -EFAULT;
154
155         session = (struct mali_session_data *)filp->private_data;
156         if (NULL == session) {
157                 MALI_PRINT_ERROR(("mmap called without any session data available\n"));
158                 return -EFAULT;
159         }
160
161         MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
162                              (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
163                              (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
164
165         /* Operations used on any memory system */
166         /* do not need to anything in vm open/close now */
167
168         /* find mali allocation structure by vaddress*/
169         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
170         if (likely(mali_vma_node)) {
171                 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
172                 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
173                 if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
174                         /* only allow to use start address for mmap */
175                         MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
176                         return -EFAULT;
177                 }
178         } else {
179                 MALI_DEBUG_ASSERT(NULL == mali_vma_node);
180                 return -EFAULT;
181         }
182
183         mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
184
185         if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
186                 MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
187                 return -EFAULT;
188         }
189
190         /* Get backend memory & Map on CPU */
191         mutex_lock(&mali_idr_mutex);
192         if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
193                 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
194                 mutex_unlock(&mali_idr_mutex);
195                 return -EFAULT;
196         }
197         mutex_unlock(&mali_idr_mutex);
198
199         if (!(MALI_MEM_SWAP == mali_alloc->type ||
200               (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
201                 /* Set some bits which indicate that, the memory is IO memory, meaning
202                  * that no paging is to be performed and the memory should not be
203                  * included in crash dumps. And that the memory is reserved, meaning
204                  * that it's present and can never be paged out (see also previous
205                  * entry)
206                  */
207                 vma->vm_flags |= VM_IO;
208                 vma->vm_flags |= VM_DONTCOPY;
209                 vma->vm_flags |= VM_PFNMAP;
210 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
211                 vma->vm_flags |= VM_RESERVED;
212 #else
213                 vma->vm_flags |= VM_DONTDUMP;
214                 vma->vm_flags |= VM_DONTEXPAND;
215 #endif
216         } else if (MALI_MEM_SWAP == mali_alloc->type) {
217                 vma->vm_pgoff = mem_bkend->start_idx;
218         }
219
220         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
221         vma->vm_ops = &mali_kernel_vm_ops;
222
223         mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
224
225         /* If it's a copy-on-write mapping, map to read only */
226         if (!(vma->vm_flags & VM_WRITE)) {
227                 MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
228                 /* add VM_WRITE for do_page_fault will check this when a write fault */
229                 vma->vm_flags |= VM_WRITE | VM_READ;
230                 vma->vm_page_prot = PAGE_READONLY;
231                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
232                 mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
233                 goto out;
234         }
235
236         if (mem_bkend->type == MALI_MEM_OS) {
237                 ret = mali_mem_os_cpu_map(mem_bkend, vma);
238         } else if (mem_bkend->type == MALI_MEM_COW &&
239                    (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
240                 ret = mali_mem_cow_cpu_map(mem_bkend, vma);
241         } else if (mem_bkend->type == MALI_MEM_BLOCK) {
242                 ret = mali_mem_block_cpu_map(mem_bkend, vma);
243         } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
244                         (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
245                 /*For swappable memory, CPU page table will be created by page fault handler. */
246                 ret = 0;
247         } else {
248                 /* Not support yet*/
249                 MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
250                 return -EFAULT;
251         }
252
253         if (ret != 0) {
254                 MALI_DEBUG_PRINT(1, ("ret != 0\n"));
255                 return -EFAULT;
256         }
257 out:
258         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
259
260         vma->vm_private_data = (void *)mali_alloc;
261         mali_alloc->cpu_mapping.vma = vma;
262
263         mali_allocation_ref(mali_alloc);
264
265         return 0;
266 }
267
268 _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
269 {
270         u32 size = descriptor->psize;
271         struct mali_session_data *session = descriptor->session;
272
273         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
274
275         /* Map dma-buf into this session's page tables */
276
277         if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
278                 size += MALI_MMU_PAGE_SIZE;
279         }
280
281         return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
282 }
283
284 _mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
285 {
286         u32 old_size = descriptor->psize;
287         struct mali_session_data *session = descriptor->session;
288
289         MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
290
291         if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
292                 new_size  += MALI_MMU_PAGE_SIZE;
293         }
294
295         if (new_size > old_size) {
296                 MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
297                 return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
298         }
299         return _MALI_OSK_ERR_OK;
300 }
301
302 void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
303 {
304         if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
305                 size += MALI_MMU_PAGE_SIZE;
306         }
307
308         /* Umap and flush L2 */
309         mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
310         mali_executor_zap_all_active(session);
311 }
312
313 u32 _mali_ukk_report_memory_usage(void)
314 {
315         u32 sum = 0;
316
317         if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
318                 sum += mali_mem_block_allocator_stat();
319         }
320
321         sum += mali_mem_os_stat();
322
323         return sum;
324 }
325
326 u32 _mali_ukk_report_total_memory_size(void)
327 {
328         return mali_dedicated_mem_size + mali_shared_mem_size;
329 }
330
331
332 /**
333  * Per-session memory descriptor mapping table sizes
334  */
335 #define MALI_MEM_DESCRIPTORS_INIT 64
336 #define MALI_MEM_DESCRIPTORS_MAX 65536
337
338 _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
339 {
340         MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
341
342         session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
343                                     _MALI_OSK_LOCK_ORDER_MEM_SESSION);
344
345         if (NULL == session_data->memory_lock) {
346                 MALI_ERROR(_MALI_OSK_ERR_FAULT);
347         }
348
349         session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
350         if (NULL == session_data->cow_lock) {
351                 _mali_osk_mutex_term(session_data->memory_lock);
352                 _mali_osk_free(session_data);
353                 MALI_ERROR(_MALI_OSK_ERR_FAULT);
354         }
355
356         mali_memory_manager_init(&session_data->allocation_mgr);
357
358         MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
359         MALI_SUCCESS;
360 }
361
362 void mali_memory_session_end(struct mali_session_data *session)
363 {
364         MALI_DEBUG_PRINT(3, ("MMU session end\n"));
365
366         if (NULL == session) {
367                 MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
368                 return;
369         }
370         /* free allocation */
371         mali_free_session_allocations(session);
372         /* do some check in unint*/
373         mali_memory_manager_uninit(&session->allocation_mgr);
374
375         /* Free the lock */
376         _mali_osk_mutex_term(session->memory_lock);
377         _mali_osk_mutex_term(session->cow_lock);
378         return;
379 }
380
381 _mali_osk_errcode_t mali_memory_initialize(void)
382 {
383         _mali_osk_errcode_t err;
384
385         idr_init(&mali_backend_idr);
386         mutex_init(&mali_idr_mutex);
387
388         err = mali_mem_swap_init();
389         if (err != _MALI_OSK_ERR_OK) {
390                 return err;
391         }
392         err = mali_mem_os_init();
393         if (_MALI_OSK_ERR_OK == err) {
394                 err = mali_mem_defer_bind_manager_init();
395         }
396
397         return err;
398 }
399
400 void mali_memory_terminate(void)
401 {
402         mali_mem_swap_term();
403         mali_mem_defer_bind_manager_destory();
404         mali_mem_os_term();
405         if (mali_memory_have_dedicated_memory()) {
406                 mali_mem_block_allocator_destroy();
407         }
408 }
409
410
411 struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
412 {
413         mali_page_node *page_node = NULL;
414
415         page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
416         MALI_DEBUG_ASSERT(NULL != page_node);
417
418         if (page_node) {
419                 page_node->type = type;
420                 INIT_LIST_HEAD(&page_node->list);
421         }
422
423         return page_node;
424 }
425
426 void _mali_page_node_ref(struct mali_page_node *node)
427 {
428         if (node->type == MALI_PAGE_NODE_OS) {
429                 /* add ref to this page */
430                 get_page(node->page);
431         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
432                 mali_mem_block_add_ref(node);
433         } else if (node->type == MALI_PAGE_NODE_SWAP) {
434                 atomic_inc(&node->swap_it->ref_count);
435         } else {
436                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
437         }
438 }
439
440 void _mali_page_node_unref(struct mali_page_node *node)
441 {
442         if (node->type == MALI_PAGE_NODE_OS) {
443                 /* unref to this page */
444                 put_page(node->page);
445         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
446                 mali_mem_block_dec_ref(node);
447         } else {
448                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
449         }
450 }
451
452
453 void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
454 {
455         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
456         node->page = page;
457 }
458
459
460 void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
461 {
462         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
463         node->swap_it = item;
464 }
465
466 void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
467 {
468         MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
469         node->blk_it = item;
470 }
471
472
473 int _mali_page_node_get_ref_count(struct mali_page_node *node)
474 {
475         if (node->type == MALI_PAGE_NODE_OS) {
476                 /* get ref count of this page */
477                 return page_count(node->page);
478         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
479                 return mali_mem_block_get_ref_count(node);
480         } else if (node->type == MALI_PAGE_NODE_SWAP) {
481                 return atomic_read(&node->swap_it->ref_count);
482         } else {
483                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
484         }
485         return -1;
486 }
487
488
489 dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
490 {
491         if (node->type == MALI_PAGE_NODE_OS) {
492                 return page_private(node->page);
493         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
494                 return _mali_blk_item_get_phy_addr(node->blk_it);
495         } else if (node->type == MALI_PAGE_NODE_SWAP) {
496                 return node->swap_it->dma_addr;
497         } else {
498                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
499         }
500         return 0;
501 }
502
503
504 unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
505 {
506         if (node->type == MALI_PAGE_NODE_OS) {
507                 return page_to_pfn(node->page);
508         } else if (node->type == MALI_PAGE_NODE_BLOCK) {
509                 /* get phy addr for BLOCK page*/
510                 return _mali_blk_item_get_pfn(node->blk_it);
511         } else if (node->type == MALI_PAGE_NODE_SWAP) {
512                 return page_to_pfn(node->swap_it->page);
513         } else {
514                 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
515         }
516         return 0;
517 }
518
519