178abcfae37da6cbe4eb6e9e51a65a5f838140fa
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory_manager.c
1 /*
2  * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19
20 #include <linux/platform_device.h>
21 #if defined(CONFIG_DMA_SHARED_BUFFER)
22 #include <linux/dma-buf.h>
23 #endif
24 #include <linux/idr.h>
25
26 #include "mali_osk.h"
27 #include "mali_osk_mali.h"
28 #include "mali_kernel_linux.h"
29 #include "mali_scheduler.h"
30 #include "mali_memory.h"
31 #include "mali_memory_os_alloc.h"
32 #if defined(CONFIG_DMA_SHARED_BUFFER)
33 #include "mali_memory_dma_buf.h"
34 #include "mali_memory_secure.h"
35 #endif
36 #if defined(CONFIG_MALI400_UMP)
37 #include "mali_memory_ump.h"
38 #endif
39 #include "mali_memory_manager.h"
40 #include "mali_memory_virtual.h"
41 #include "mali_memory_util.h"
42 #include "mali_memory_external.h"
43 #include "mali_memory_cow.h"
44 #include "mali_memory_block_alloc.h"
45 #include "mali_ukk.h"
46 #include "mali_memory_swap_alloc.h"
47
48 /*
49 * New memory system interface
50 */
51
52 /*inti idr for backend memory */
53 struct idr mali_backend_idr;
54 struct mutex mali_idr_mutex;
55
56 /* init allocation manager */
57 int mali_memory_manager_init(struct mali_allocation_manager *mgr)
58 {
59         /* init Locks */
60         rwlock_init(&mgr->vm_lock);
61         mutex_init(&mgr->list_mutex);
62
63         /* init link */
64         INIT_LIST_HEAD(&mgr->head);
65
66         /* init RB tree */
67         mgr->allocation_mgr_rb = RB_ROOT;
68         mgr->mali_allocation_num = 0;
69         return 0;
70 }
71
72 /* Deinit allocation manager
73 * Do some check for debug
74 */
75 void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
76 {
77         /* check RB tree is empty */
78         MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
79         /* check allocation List */
80         MALI_DEBUG_ASSERT(list_empty(&mgr->head));
81 }
82
83 /* Prepare memory descriptor */
84 static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
85 {
86         mali_mem_allocation *mali_allocation;
87
88         /* Allocate memory */
89         mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
90         if (NULL == mali_allocation) {
91                 MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
92                 return NULL;
93         }
94
95         MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
96
97         /* do init */
98         mali_allocation->flags = 0;
99         mali_allocation->session = session;
100
101         INIT_LIST_HEAD(&mali_allocation->list);
102         _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
103
104         /**
105         *add to session list
106         */
107         mutex_lock(&session->allocation_mgr.list_mutex);
108         list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
109         session->allocation_mgr.mali_allocation_num++;
110         mutex_unlock(&session->allocation_mgr.list_mutex);
111
112         return mali_allocation;
113 }
114
115 void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
116 {
117         MALI_DEBUG_ASSERT_POINTER(alloc);
118         MALI_DEBUG_ASSERT_POINTER(alloc->session);
119         mutex_lock(&alloc->session->allocation_mgr.list_mutex);
120         list_del(&alloc->list);
121         alloc->session->allocation_mgr.mali_allocation_num--;
122         mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
123
124         kfree(alloc);
125 }
126
127 int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
128 {
129         mali_mem_backend *mem_backend = NULL;
130         s32 ret = -ENOSPC;
131         s32 index = -1;
132         *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
133         if (NULL == *backend) {
134                 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
135                 return -1;
136         }
137         mem_backend = *backend;
138         mem_backend->size = psize;
139         mutex_init(&mem_backend->mutex);
140         INIT_LIST_HEAD(&mem_backend->list);
141         mem_backend->using_count = 0;
142
143
144         /* link backend with id */
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
146 again:
147         if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
148                 kfree(mem_backend);
149                 return -ENOMEM;
150         }
151         mutex_lock(&mali_idr_mutex);
152         ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
153         mutex_unlock(&mali_idr_mutex);
154
155         if (-ENOSPC == ret) {
156                 kfree(mem_backend);
157                 return -ENOSPC;
158         }
159         if (-EAGAIN == ret)
160                 goto again;
161 #else
162         mutex_lock(&mali_idr_mutex);
163         ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
164         mutex_unlock(&mali_idr_mutex);
165         index = ret;
166         if (ret < 0) {
167                 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
168                 kfree(mem_backend);
169                 return -ENOSPC;
170         }
171 #endif
172         return index;
173 }
174
175
176 static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
177 {
178         mali_mem_backend *mem_backend = *backend;
179
180         mutex_lock(&mali_idr_mutex);
181         idr_remove(&mali_backend_idr, backend_handle);
182         mutex_unlock(&mali_idr_mutex);
183         kfree(mem_backend);
184         *backend = NULL;
185 }
186
187 mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
188 {
189         struct mali_vma_node *mali_vma_node = NULL;
190         mali_mem_backend *mem_bkend = NULL;
191         mali_mem_allocation *mali_alloc = NULL;
192         MALI_DEBUG_ASSERT_POINTER(session);
193         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
194         if (NULL == mali_vma_node)  {
195                 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
196                 return NULL;
197         }
198         mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
199         /* Get backend memory & Map on CPU */
200         mutex_lock(&mali_idr_mutex);
201         mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
202         mutex_unlock(&mali_idr_mutex);
203         MALI_DEBUG_ASSERT(NULL != mem_bkend);
204         return mem_bkend;
205 }
206
207 static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
208 {
209         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
210         int retval = 0;
211         mali_mem_allocation *mali_allocation = NULL;
212         mali_mem_os_mem tmp_os_mem;
213         s32 change_page_count;
214
215         MALI_DEBUG_ASSERT_POINTER(session);
216         MALI_DEBUG_ASSERT_POINTER(mem_backend);
217         MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
218         MALI_DEBUG_ASSERT(0 == physical_size %  MALI_MMU_PAGE_SIZE);
219
220         mali_allocation = mem_backend->mali_allocation;
221         MALI_DEBUG_ASSERT_POINTER(mali_allocation);
222
223         MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
224         MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
225
226         mutex_lock(&mem_backend->mutex);
227
228         /* Do resize*/
229         if (physical_size > mem_backend->size) {
230                 u32 add_size = physical_size - mem_backend->size;
231
232                 MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
233
234                 /* Allocate new pages from os mem */
235                 retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
236
237                 if (retval) {
238                         if (-ENOMEM == retval) {
239                                 ret = _MALI_OSK_ERR_NOMEM;
240                         } else {
241                                 ret = _MALI_OSK_ERR_FAULT;
242                         }
243                         MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
244                         goto failed_alloc_memory;
245                 }
246
247                 MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
248
249                 /* Resize the memory of the backend */
250                 ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
251
252                 if (ret) {
253                         MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory      resizing failed !\n"));
254                         goto failed_resize_pages;
255                 }
256
257                 /*Resize cpu mapping */
258                 if (NULL != mali_allocation->cpu_mapping.vma) {
259                         ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start  + mem_backend->size, add_size);
260                         if (unlikely(ret != _MALI_OSK_ERR_OK)) {
261                                 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
262                                 goto  failed_cpu_map;
263                         }
264                 }
265
266                 /* Resize mali mapping */
267                 _mali_osk_mutex_wait(session->memory_lock);
268                 ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
269
270                 if (ret) {
271                         MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
272                         goto failed_gpu_map;
273                 }
274
275                 ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
276                                            mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
277                 if (ret) {
278                         MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
279                         goto failed_gpu_map;
280                 }
281
282                 _mali_osk_mutex_signal(session->memory_lock);
283         } else {
284                 u32 dec_size, page_count;
285                 u32 vaddr = 0;
286                 INIT_LIST_HEAD(&tmp_os_mem.pages);
287                 tmp_os_mem.count = 0;
288
289                 dec_size = mem_backend->size - physical_size;
290                 MALI_DEBUG_ASSERT(0 == dec_size %  MALI_MMU_PAGE_SIZE);
291
292                 page_count = dec_size / MALI_MMU_PAGE_SIZE;
293                 vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
294
295                 /* Resize the memory of the backend */
296                 ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
297
298                 if (ret) {
299                         MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
300                         goto failed_resize_pages;
301                 }
302
303                 /* Resize mali map */
304                 _mali_osk_mutex_wait(session->memory_lock);
305                 mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
306                 _mali_osk_mutex_signal(session->memory_lock);
307
308                 /* Zap cpu mapping */
309                 if (0 != mali_allocation->cpu_mapping.addr) {
310                         MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
311                         zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
312                 }
313
314                 /* Free those extra pages */
315                 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
316         }
317
318         /* Resize memory allocation and memory backend */
319         change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
320         mali_allocation->psize = physical_size;
321         mem_backend->size = physical_size;
322         mutex_unlock(&mem_backend->mutex);
323
324         if (change_page_count > 0) {
325                 atomic_add(change_page_count, &session->mali_mem_allocated_pages);
326                 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
327                         session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
328                 }
329
330         } else {
331                 atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
332         }
333
334         return _MALI_OSK_ERR_OK;
335
336 failed_gpu_map:
337         _mali_osk_mutex_signal(session->memory_lock);
338 failed_cpu_map:
339         if (physical_size > mem_backend->size) {
340                 mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
341                                          (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
342         } else {
343                 mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
344         }
345 failed_resize_pages:
346         if (0 != tmp_os_mem.count)
347                 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
348 failed_alloc_memory:
349
350         mutex_unlock(&mem_backend->mutex);
351         return ret;
352 }
353
354
355 /* Set GPU MMU properties */
356 static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
357 {
358         if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
359                 *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
360         } else {
361                 *properties = MALI_MMU_FLAGS_DEFAULT;
362         }
363 }
364
365 _mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
366 {
367         mali_mem_backend *mem_backend = NULL;
368         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
369         mali_mem_allocation *mali_allocation = NULL;
370         u32 new_physical_size;
371         MALI_DEBUG_ASSERT_POINTER(session);
372         MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
373
374         /* Get the memory backend that need to be resize. */
375         mem_backend = mali_mem_backend_struct_search(session, mali_addr);
376
377         if (NULL == mem_backend)  {
378                 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
379                 return ret;
380         }
381
382         mali_allocation = mem_backend->mali_allocation;
383
384         MALI_DEBUG_ASSERT_POINTER(mali_allocation);
385
386         new_physical_size = add_size + mem_backend->size;
387
388         if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
389                 return ret;
390
391         MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
392
393         ret = mali_mem_resize(session, mem_backend, new_physical_size);
394
395         return ret;
396 }
397
398 /**
399 *  function@_mali_ukk_mem_allocate - allocate mali memory
400 */
401 _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
402 {
403         struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
404         mali_mem_backend *mem_backend = NULL;
405         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
406         int retval = 0;
407         mali_mem_allocation *mali_allocation = NULL;
408         struct mali_vma_node *mali_vma_node = NULL;
409
410         MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
411
412         /* Check if the address is allocated
413         */
414         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
415
416         if (unlikely(mali_vma_node)) {
417                 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
418                 return _MALI_OSK_ERR_FAULT;
419         }
420         /**
421         *create mali memory allocation
422         */
423
424         mali_allocation = mali_mem_allocation_struct_create(session);
425
426         if (mali_allocation == NULL) {
427                 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
428                 return _MALI_OSK_ERR_NOMEM;
429         }
430         mali_allocation->psize = args->psize;
431         mali_allocation->vsize = args->vsize;
432
433         /* MALI_MEM_OS if need to support mem resize,
434          * or MALI_MEM_BLOCK if have dedicated memory,
435          * or MALI_MEM_OS,
436          * or MALI_MEM_SWAP.
437          */
438         if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
439                 mali_allocation->type = MALI_MEM_SWAP;
440         } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
441                 mali_allocation->type = MALI_MEM_OS;
442                 mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
443         } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) {
444                 mali_allocation->type = MALI_MEM_SECURE;
445         } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
446                 mali_allocation->type = MALI_MEM_BLOCK;
447         } else {
448                 mali_allocation->type = MALI_MEM_OS;
449         }
450
451         /**
452         *add allocation node to RB tree for index
453         */
454         mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
455         mali_allocation->mali_vma_node.vm_node.size = args->vsize;
456
457         mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
458
459         mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
460         if (mali_allocation->backend_handle < 0) {
461                 ret = _MALI_OSK_ERR_NOMEM;
462                 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
463                 goto failed_alloc_backend;
464         }
465
466
467         mem_backend->mali_allocation = mali_allocation;
468         mem_backend->type = mali_allocation->type;
469
470         mali_allocation->mali_mapping.addr = args->gpu_vaddr;
471
472         /* set gpu mmu propery */
473         _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
474         /* do prepare for MALI mapping */
475         if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
476                 _mali_osk_mutex_wait(session->memory_lock);
477
478                 ret = mali_mem_mali_map_prepare(mali_allocation);
479                 if (0 != ret) {
480                         _mali_osk_mutex_signal(session->memory_lock);
481                         goto failed_prepare_map;
482                 }
483                 _mali_osk_mutex_signal(session->memory_lock);
484         }
485
486         if (mali_allocation->psize == 0) {
487                 mem_backend->os_mem.count = 0;
488                 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
489                 goto done;
490         }
491
492         if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
493                 mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
494                 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
495                 /* init for defer bind backend*/
496                 mem_backend->os_mem.count = 0;
497                 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
498
499                 goto done;
500         }
501
502         if (likely(mali_allocation->psize > 0)) {
503
504                 if (MALI_MEM_SECURE == mem_backend->type) {
505 #if defined(CONFIG_DMA_SHARED_BUFFER)
506                         ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
507                         if (_MALI_OSK_ERR_OK != ret) {
508                                 MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n"));
509                                 goto failed_alloc_pages;
510                         }
511 #else
512                         ret = _MALI_OSK_ERR_UNSUPPORTED;
513                         MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n"));
514                         goto failed_alloc_pages;
515 #endif
516                 } else {
517
518                         /**
519                         *allocate physical memory
520                         */
521                         if (mem_backend->type == MALI_MEM_OS) {
522                                 retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
523                         } else if (mem_backend->type == MALI_MEM_BLOCK) {
524                                 /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
525                                 if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
526                                         retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
527                                         mem_backend->type = MALI_MEM_OS;
528                                         mali_allocation->type = MALI_MEM_OS;
529                                 }
530                         } else if (MALI_MEM_SWAP == mem_backend->type) {
531                                 retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
532                         }  else {
533                                 /* ONLY support mem_os type */
534                                 MALI_DEBUG_ASSERT(0);
535                         }
536
537                         if (retval) {
538                                 ret = _MALI_OSK_ERR_NOMEM;
539                                 MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
540                                 goto failed_alloc_pages;
541                         }
542                 }
543         }
544
545         /**
546         *map to GPU side
547         */
548         if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
549                 _mali_osk_mutex_wait(session->memory_lock);
550                 /* Map on Mali */
551
552                 if (mem_backend->type == MALI_MEM_OS) {
553                         ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
554                                                    mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
555
556                 } else if (mem_backend->type == MALI_MEM_BLOCK) {
557                         mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
558                                                 mali_allocation->mali_mapping.properties);
559                 } else if (mem_backend->type == MALI_MEM_SWAP) {
560                         ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
561                                                      mali_allocation->mali_mapping.properties);
562                 } else if (mem_backend->type == MALI_MEM_SECURE) {
563 #if defined(CONFIG_DMA_SHARED_BUFFER)
564                         ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties);
565 #endif
566                 } else { /* unsupport type */
567                         MALI_DEBUG_ASSERT(0);
568                 }
569
570                 _mali_osk_mutex_signal(session->memory_lock);
571         }
572 done:
573         if (MALI_MEM_OS == mem_backend->type) {
574                 atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
575         } else if (MALI_MEM_BLOCK == mem_backend->type) {
576                 atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
577         } else if (MALI_MEM_SECURE == mem_backend->type) {
578                 atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages);
579         } else {
580                 MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
581                 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
582                 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
583         }
584
585         if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
586                 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
587         }
588         return _MALI_OSK_ERR_OK;
589
590 failed_alloc_pages:
591         mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
592 failed_prepare_map:
593         mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
594 failed_alloc_backend:
595
596         mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
597         mali_mem_allocation_struct_destory(mali_allocation);
598
599         return ret;
600 }
601
602
603 _mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
604 {
605         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
606         u32 vaddr = args->gpu_vaddr;
607         mali_mem_allocation *mali_alloc = NULL;
608         struct mali_vma_node *mali_vma_node = NULL;
609
610         /* find mali allocation structure by vaddress*/
611         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
612         if (NULL == mali_vma_node) {
613                 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
614                 return _MALI_OSK_ERR_INVALID_ARGS;
615         }
616         MALI_DEBUG_ASSERT(NULL != mali_vma_node);
617         mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
618
619         if (mali_alloc)
620                 /* check ref_count */
621                 args->free_pages_nr = mali_allocation_unref(&mali_alloc);
622
623         return _MALI_OSK_ERR_OK;
624 }
625
626
627 /**
628 * Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
629 * It will allocate a new mem allocation and bind external memory to it.
630 * Supported backend type are:
631 * _MALI_MEMORY_BIND_BACKEND_UMP
632 * _MALI_MEMORY_BIND_BACKEND_DMA_BUF
633 * _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
634 * CPU access is not supported yet
635 */
636 _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
637 {
638         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
639         mali_mem_backend *mem_backend = NULL;
640         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
641         mali_mem_allocation *mali_allocation = NULL;
642         MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
643
644         /**
645         * allocate mali allocation.
646         */
647         mali_allocation = mali_mem_allocation_struct_create(session);
648
649         if (mali_allocation == NULL) {
650                 return _MALI_OSK_ERR_NOMEM;
651         }
652         mali_allocation->psize = args->size;
653         mali_allocation->vsize = args->size;
654         mali_allocation->mali_mapping.addr = args->vaddr;
655
656         /* add allocation node to RB tree for index  */
657         mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
658         mali_allocation->mali_vma_node.vm_node.size = args->size;
659         mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
660
661         /* allocate backend*/
662         if (mali_allocation->psize > 0) {
663                 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
664                 if (mali_allocation->backend_handle < 0) {
665                         goto Failed_alloc_backend;
666                 }
667
668         } else {
669                 goto Failed_alloc_backend;
670         }
671
672         mem_backend->size = mali_allocation->psize;
673         mem_backend->mali_allocation = mali_allocation;
674
675         switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
676         case  _MALI_MEMORY_BIND_BACKEND_UMP:
677 #if defined(CONFIG_MALI400_UMP)
678                 mali_allocation->type = MALI_MEM_UMP;
679                 mem_backend->type = MALI_MEM_UMP;
680                 ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
681                                             args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
682                 if (_MALI_OSK_ERR_OK != ret) {
683                         MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
684                         goto  Failed_bind_backend;
685                 }
686 #else
687                 MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
688                 goto Failed_bind_backend;
689 #endif
690                 break;
691         case  _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
692 #if defined(CONFIG_DMA_SHARED_BUFFER)
693                 mali_allocation->type = MALI_MEM_DMA_BUF;
694                 mem_backend->type = MALI_MEM_DMA_BUF;
695                 ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
696                                             args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
697                 if (_MALI_OSK_ERR_OK != ret) {
698                         MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
699                         goto Failed_bind_backend;
700                 }
701 #else
702                 MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
703                 goto Failed_bind_backend;
704 #endif
705                 break;
706         case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
707                 /* not allowed */
708                 MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
709                 goto Failed_bind_backend;
710                 break;
711
712         case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
713                 mali_allocation->type = MALI_MEM_EXTERNAL;
714                 mem_backend->type = MALI_MEM_EXTERNAL;
715                 ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
716                                             args->mem_union.bind_ext_memory.flags);
717                 if (_MALI_OSK_ERR_OK != ret) {
718                         MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
719                         goto Failed_bind_backend;
720                 }
721                 break;
722
723         case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
724                 /* not allowed */
725                 MALI_DEBUG_PRINT_ERROR(("External cow memory  type not supported !\n"));
726                 goto Failed_bind_backend;
727                 break;
728
729         default:
730                 MALI_DEBUG_PRINT_ERROR(("Invalid memory type  not supported !\n"));
731                 goto Failed_bind_backend;
732                 break;
733         }
734         MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
735         atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
736         return _MALI_OSK_ERR_OK;
737
738 Failed_bind_backend:
739         mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
740
741 Failed_alloc_backend:
742         mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
743         mali_mem_allocation_struct_destory(mali_allocation);
744
745         MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
746         return ret;
747 }
748
749
750 /*
751 * Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
752 * This function unbind the backend memory and free the allocation
753 * no ref_count for this type of memory
754 */
755 _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
756 {
757         /**/
758         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
759         mali_mem_allocation *mali_allocation = NULL;
760         struct mali_vma_node *mali_vma_node = NULL;
761         u32 mali_addr = args->vaddr;
762         MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
763
764         /* find the allocation by vaddr */
765         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
766         if (likely(mali_vma_node)) {
767                 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
768                 mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
769         } else {
770                 MALI_DEBUG_ASSERT(NULL != mali_vma_node);
771                 return _MALI_OSK_ERR_INVALID_ARGS;
772         }
773
774         if (NULL != mali_allocation)
775                 /* check ref_count */
776                 mali_allocation_unref(&mali_allocation);
777         return _MALI_OSK_ERR_OK;
778 }
779
780 /*
781 * Function _mali_ukk_mem_cow --  COW for an allocation
782 * This function allocate new pages for  a range (range, range+size) of allocation
783 *  And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
784 */
785 _mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
786 {
787         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
788         mali_mem_backend *target_backend = NULL;
789         mali_mem_backend *mem_backend = NULL;
790         struct mali_vma_node *mali_vma_node = NULL;
791         mali_mem_allocation *mali_allocation = NULL;
792
793         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
794         /* Get the target backend for cow */
795         target_backend = mali_mem_backend_struct_search(session, args->target_handle);
796
797         if (NULL == target_backend || 0 == target_backend->size) {
798                 MALI_DEBUG_ASSERT_POINTER(target_backend);
799                 MALI_DEBUG_ASSERT(0 != target_backend->size);
800                 return ret;
801         }
802
803         /*Cow not support resized mem */
804         MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
805
806         /* Check if the new mali address is allocated */
807         mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
808
809         if (unlikely(mali_vma_node)) {
810                 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
811                 return ret;
812         }
813
814         /* create new alloction for COW*/
815         mali_allocation = mali_mem_allocation_struct_create(session);
816         if (mali_allocation == NULL) {
817                 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
818                 return _MALI_OSK_ERR_NOMEM;
819         }
820         mali_allocation->psize = args->target_size;
821         mali_allocation->vsize = args->target_size;
822         mali_allocation->type = MALI_MEM_COW;
823
824         /*add allocation node to RB tree for index*/
825         mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
826         mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
827         mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
828
829         /* create new backend for COW memory */
830         mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
831         if (mali_allocation->backend_handle < 0) {
832                 ret = _MALI_OSK_ERR_NOMEM;
833                 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
834                 goto failed_alloc_backend;
835         }
836         mem_backend->mali_allocation = mali_allocation;
837         mem_backend->type = mali_allocation->type;
838
839         if (target_backend->type == MALI_MEM_SWAP ||
840             (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
841                 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
842                 /**
843                  *     CoWed swap backends couldn't be mapped as non-linear vma, because if one
844                  * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
845                  * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
846                  * is a conflict.
847                  *     To resolve this problem, we have to do some fake things, we reserved about 64MB
848                  * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
849                  * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
850                  * backends will be mapped as linear and will add to priority tree of global swap file, while
851                  * these vmas will never be found by using normal page->index, these pages in those vma
852                  * also couldn't be swapped out.
853                  */
854                 mem_backend->start_idx = 0;
855         }
856
857         /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
858         *for a modified range and keep the page which not in the modified range and Add ref to it
859         */
860         MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x;  cow_addr: 0x%x,  size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
861                              mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
862
863         ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
864         if (_MALI_OSK_ERR_OK != ret) {
865                 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
866                 goto failed_do_cow;
867         }
868
869         /**
870         *map to GPU side
871         */
872         mali_allocation->mali_mapping.addr = args->vaddr;
873         /* set gpu mmu propery */
874         _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
875
876         _mali_osk_mutex_wait(session->memory_lock);
877         /* Map on Mali */
878         ret = mali_mem_mali_map_prepare(mali_allocation);
879         if (0 != ret) {
880                 MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
881                 goto failed_gpu_map;
882         }
883
884         if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
885                 mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
886         }
887
888         _mali_osk_mutex_signal(session->memory_lock);
889
890         mutex_lock(&target_backend->mutex);
891         target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
892         mutex_unlock(&target_backend->mutex);
893
894         atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
895         if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
896                 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
897         }
898         return _MALI_OSK_ERR_OK;
899
900 failed_gpu_map:
901         _mali_osk_mutex_signal(session->memory_lock);
902         mali_mem_cow_release(mem_backend, MALI_FALSE);
903         mem_backend->cow_mem.count = 0;
904 failed_do_cow:
905         mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
906 failed_alloc_backend:
907         mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
908         mali_mem_allocation_struct_destory(mali_allocation);
909
910         return ret;
911 }
912
913 _mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
914 {
915         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
916         mali_mem_backend *mem_backend = NULL;
917         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
918
919         MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
920         /* Get the backend that need to be modified. */
921         mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
922
923         if (NULL == mem_backend || 0 == mem_backend->size) {
924                 MALI_DEBUG_ASSERT_POINTER(mem_backend);
925                 MALI_DEBUG_ASSERT(0 != mem_backend->size);
926                 return ret;
927         }
928
929         MALI_DEBUG_ASSERT(MALI_MEM_COW  == mem_backend->type);
930
931         ret =  mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
932         args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
933         if (_MALI_OSK_ERR_OK != ret)
934                 return  ret;
935         _mali_osk_mutex_wait(session->memory_lock);
936         if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
937                 mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
938         }
939         _mali_osk_mutex_signal(session->memory_lock);
940
941         atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
942         if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
943                 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
944         }
945
946         return _MALI_OSK_ERR_OK;
947 }
948
949
950 _mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
951 {
952         mali_mem_backend *mem_backend = NULL;
953         _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
954
955         struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
956
957         MALI_DEBUG_ASSERT_POINTER(session);
958         MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
959         MALI_DEBUG_ASSERT(0 == args->psize %  MALI_MMU_PAGE_SIZE);
960
961         /* Get the memory backend that need to be resize. */
962         mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
963
964         if (NULL == mem_backend)  {
965                 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
966                 return ret;
967         }
968
969         MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
970
971         ret = mali_mem_resize(session, mem_backend, args->psize);
972
973         return ret;
974 }
975
976 _mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
977 {
978         args->memory_usage = _mali_ukk_report_memory_usage();
979         if (0 != args->vaddr) {
980                 mali_mem_backend *mem_backend = NULL;
981                 struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
982                 /* Get the backend that need to be modified. */
983                 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
984                 if (NULL == mem_backend) {
985                         MALI_DEBUG_ASSERT_POINTER(mem_backend);
986                         return _MALI_OSK_ERR_FAULT;
987                 }
988
989                 if (MALI_MEM_COW == mem_backend->type)
990                         args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
991         }
992         return _MALI_OSK_ERR_OK;
993 }