2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
20 #include <linux/platform_device.h>
21 #if defined(CONFIG_DMA_SHARED_BUFFER)
22 #include <linux/dma-buf.h>
24 #include <linux/idr.h>
27 #include "mali_osk_mali.h"
28 #include "mali_kernel_linux.h"
29 #include "mali_scheduler.h"
30 #include "mali_memory.h"
31 #include "mali_memory_os_alloc.h"
32 #if defined(CONFIG_DMA_SHARED_BUFFER)
33 #include "mali_memory_dma_buf.h"
34 #include "mali_memory_secure.h"
36 #if defined(CONFIG_MALI400_UMP)
37 #include "mali_memory_ump.h"
39 #include "mali_memory_manager.h"
40 #include "mali_memory_virtual.h"
41 #include "mali_memory_util.h"
42 #include "mali_memory_external.h"
43 #include "mali_memory_cow.h"
44 #include "mali_memory_block_alloc.h"
46 #include "mali_memory_swap_alloc.h"
49 * New memory system interface
52 /*inti idr for backend memory */
53 struct idr mali_backend_idr;
54 struct mutex mali_idr_mutex;
56 /* init allocation manager */
57 int mali_memory_manager_init(struct mali_allocation_manager *mgr)
60 rwlock_init(&mgr->vm_lock);
61 mutex_init(&mgr->list_mutex);
64 INIT_LIST_HEAD(&mgr->head);
67 mgr->allocation_mgr_rb = RB_ROOT;
68 mgr->mali_allocation_num = 0;
72 /* Deinit allocation manager
73 * Do some check for debug
75 void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
77 /* check RB tree is empty */
78 MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
79 /* check allocation List */
80 MALI_DEBUG_ASSERT(list_empty(&mgr->head));
83 /* Prepare memory descriptor */
84 static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
86 mali_mem_allocation *mali_allocation;
89 mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
90 if (NULL == mali_allocation) {
91 MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
95 MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
98 mali_allocation->flags = 0;
99 mali_allocation->session = session;
101 INIT_LIST_HEAD(&mali_allocation->list);
102 _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
107 mutex_lock(&session->allocation_mgr.list_mutex);
108 list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
109 session->allocation_mgr.mali_allocation_num++;
110 mutex_unlock(&session->allocation_mgr.list_mutex);
112 return mali_allocation;
115 void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
117 MALI_DEBUG_ASSERT_POINTER(alloc);
118 MALI_DEBUG_ASSERT_POINTER(alloc->session);
119 mutex_lock(&alloc->session->allocation_mgr.list_mutex);
120 list_del(&alloc->list);
121 alloc->session->allocation_mgr.mali_allocation_num--;
122 mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
127 int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
129 mali_mem_backend *mem_backend = NULL;
132 *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
133 if (NULL == *backend) {
134 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
137 mem_backend = *backend;
138 mem_backend->size = psize;
139 mutex_init(&mem_backend->mutex);
140 INIT_LIST_HEAD(&mem_backend->list);
141 mem_backend->using_count = 0;
144 /* link backend with id */
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
147 if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
151 mutex_lock(&mali_idr_mutex);
152 ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
153 mutex_unlock(&mali_idr_mutex);
155 if (-ENOSPC == ret) {
162 mutex_lock(&mali_idr_mutex);
163 ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
164 mutex_unlock(&mali_idr_mutex);
167 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
176 static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
178 mali_mem_backend *mem_backend = *backend;
180 mutex_lock(&mali_idr_mutex);
181 idr_remove(&mali_backend_idr, backend_handle);
182 mutex_unlock(&mali_idr_mutex);
187 mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
189 struct mali_vma_node *mali_vma_node = NULL;
190 mali_mem_backend *mem_bkend = NULL;
191 mali_mem_allocation *mali_alloc = NULL;
192 MALI_DEBUG_ASSERT_POINTER(session);
193 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
194 if (NULL == mali_vma_node) {
195 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
198 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
199 /* Get backend memory & Map on CPU */
200 mutex_lock(&mali_idr_mutex);
201 mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
202 mutex_unlock(&mali_idr_mutex);
203 MALI_DEBUG_ASSERT(NULL != mem_bkend);
207 static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
209 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
211 mali_mem_allocation *mali_allocation = NULL;
212 mali_mem_os_mem tmp_os_mem;
213 s32 change_page_count;
215 MALI_DEBUG_ASSERT_POINTER(session);
216 MALI_DEBUG_ASSERT_POINTER(mem_backend);
217 MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
218 MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE);
220 mali_allocation = mem_backend->mali_allocation;
221 MALI_DEBUG_ASSERT_POINTER(mali_allocation);
223 MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
224 MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
226 mutex_lock(&mem_backend->mutex);
229 if (physical_size > mem_backend->size) {
230 u32 add_size = physical_size - mem_backend->size;
232 MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
234 /* Allocate new pages from os mem */
235 retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
238 if (-ENOMEM == retval) {
239 ret = _MALI_OSK_ERR_NOMEM;
241 ret = _MALI_OSK_ERR_FAULT;
243 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
244 goto failed_alloc_memory;
247 MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
249 /* Resize the memory of the backend */
250 ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
253 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n"));
254 goto failed_resize_pages;
257 /*Resize cpu mapping */
258 if (NULL != mali_allocation->cpu_mapping.vma) {
259 ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size);
260 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
261 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
266 /* Resize mali mapping */
267 _mali_osk_mutex_wait(session->memory_lock);
268 ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
271 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
275 ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
276 mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
278 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
282 _mali_osk_mutex_signal(session->memory_lock);
284 u32 dec_size, page_count;
286 INIT_LIST_HEAD(&tmp_os_mem.pages);
287 tmp_os_mem.count = 0;
289 dec_size = mem_backend->size - physical_size;
290 MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE);
292 page_count = dec_size / MALI_MMU_PAGE_SIZE;
293 vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
295 /* Resize the memory of the backend */
296 ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
299 MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
300 goto failed_resize_pages;
303 /* Resize mali map */
304 _mali_osk_mutex_wait(session->memory_lock);
305 mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
306 _mali_osk_mutex_signal(session->memory_lock);
308 /* Zap cpu mapping */
309 if (0 != mali_allocation->cpu_mapping.addr) {
310 MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
311 zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
314 /* Free those extra pages */
315 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
318 /* Resize memory allocation and memory backend */
319 change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
320 mali_allocation->psize = physical_size;
321 mem_backend->size = physical_size;
322 mutex_unlock(&mem_backend->mutex);
324 if (change_page_count > 0) {
325 atomic_add(change_page_count, &session->mali_mem_allocated_pages);
326 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
327 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
331 atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
334 return _MALI_OSK_ERR_OK;
337 _mali_osk_mutex_signal(session->memory_lock);
339 if (physical_size > mem_backend->size) {
340 mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
341 (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
343 mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
346 if (0 != tmp_os_mem.count)
347 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
350 mutex_unlock(&mem_backend->mutex);
355 /* Set GPU MMU properties */
356 static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
358 if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
359 *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
361 *properties = MALI_MMU_FLAGS_DEFAULT;
365 _mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
367 mali_mem_backend *mem_backend = NULL;
368 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
369 mali_mem_allocation *mali_allocation = NULL;
370 u32 new_physical_size;
371 MALI_DEBUG_ASSERT_POINTER(session);
372 MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
374 /* Get the memory backend that need to be resize. */
375 mem_backend = mali_mem_backend_struct_search(session, mali_addr);
377 if (NULL == mem_backend) {
378 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
382 mali_allocation = mem_backend->mali_allocation;
384 MALI_DEBUG_ASSERT_POINTER(mali_allocation);
386 new_physical_size = add_size + mem_backend->size;
388 if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
391 MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
393 ret = mali_mem_resize(session, mem_backend, new_physical_size);
399 * function@_mali_ukk_mem_allocate - allocate mali memory
401 _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
403 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
404 mali_mem_backend *mem_backend = NULL;
405 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
407 mali_mem_allocation *mali_allocation = NULL;
408 struct mali_vma_node *mali_vma_node = NULL;
410 MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
412 /* Check if the address is allocated
414 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
416 if (unlikely(mali_vma_node)) {
417 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
418 return _MALI_OSK_ERR_FAULT;
421 *create mali memory allocation
424 mali_allocation = mali_mem_allocation_struct_create(session);
426 if (mali_allocation == NULL) {
427 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
428 return _MALI_OSK_ERR_NOMEM;
430 mali_allocation->psize = args->psize;
431 mali_allocation->vsize = args->vsize;
433 /* MALI_MEM_OS if need to support mem resize,
434 * or MALI_MEM_BLOCK if have dedicated memory,
438 if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
439 mali_allocation->type = MALI_MEM_SWAP;
440 } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
441 mali_allocation->type = MALI_MEM_OS;
442 mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
443 } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) {
444 mali_allocation->type = MALI_MEM_SECURE;
445 } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
446 mali_allocation->type = MALI_MEM_BLOCK;
448 mali_allocation->type = MALI_MEM_OS;
452 *add allocation node to RB tree for index
454 mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
455 mali_allocation->mali_vma_node.vm_node.size = args->vsize;
457 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
459 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
460 if (mali_allocation->backend_handle < 0) {
461 ret = _MALI_OSK_ERR_NOMEM;
462 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
463 goto failed_alloc_backend;
467 mem_backend->mali_allocation = mali_allocation;
468 mem_backend->type = mali_allocation->type;
470 mali_allocation->mali_mapping.addr = args->gpu_vaddr;
472 /* set gpu mmu propery */
473 _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
474 /* do prepare for MALI mapping */
475 if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
476 _mali_osk_mutex_wait(session->memory_lock);
478 ret = mali_mem_mali_map_prepare(mali_allocation);
480 _mali_osk_mutex_signal(session->memory_lock);
481 goto failed_prepare_map;
483 _mali_osk_mutex_signal(session->memory_lock);
486 if (mali_allocation->psize == 0) {
487 mem_backend->os_mem.count = 0;
488 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
492 if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
493 mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
494 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
495 /* init for defer bind backend*/
496 mem_backend->os_mem.count = 0;
497 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
502 if (likely(mali_allocation->psize > 0)) {
504 if (MALI_MEM_SECURE == mem_backend->type) {
505 #if defined(CONFIG_DMA_SHARED_BUFFER)
506 ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
507 if (_MALI_OSK_ERR_OK != ret) {
508 MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n"));
509 goto failed_alloc_pages;
512 ret = _MALI_OSK_ERR_UNSUPPORTED;
513 MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n"));
514 goto failed_alloc_pages;
519 *allocate physical memory
521 if (mem_backend->type == MALI_MEM_OS) {
522 retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
523 } else if (mem_backend->type == MALI_MEM_BLOCK) {
524 /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
525 if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
526 retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
527 mem_backend->type = MALI_MEM_OS;
528 mali_allocation->type = MALI_MEM_OS;
530 } else if (MALI_MEM_SWAP == mem_backend->type) {
531 retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
533 /* ONLY support mem_os type */
534 MALI_DEBUG_ASSERT(0);
538 ret = _MALI_OSK_ERR_NOMEM;
539 MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
540 goto failed_alloc_pages;
548 if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
549 _mali_osk_mutex_wait(session->memory_lock);
552 if (mem_backend->type == MALI_MEM_OS) {
553 ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
554 mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
556 } else if (mem_backend->type == MALI_MEM_BLOCK) {
557 mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
558 mali_allocation->mali_mapping.properties);
559 } else if (mem_backend->type == MALI_MEM_SWAP) {
560 ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
561 mali_allocation->mali_mapping.properties);
562 } else if (mem_backend->type == MALI_MEM_SECURE) {
563 #if defined(CONFIG_DMA_SHARED_BUFFER)
564 ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties);
566 } else { /* unsupport type */
567 MALI_DEBUG_ASSERT(0);
570 _mali_osk_mutex_signal(session->memory_lock);
573 if (MALI_MEM_OS == mem_backend->type) {
574 atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
575 } else if (MALI_MEM_BLOCK == mem_backend->type) {
576 atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
577 } else if (MALI_MEM_SECURE == mem_backend->type) {
578 atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages);
580 MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
581 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
582 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
585 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
586 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
588 return _MALI_OSK_ERR_OK;
591 mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
593 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
594 failed_alloc_backend:
596 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
597 mali_mem_allocation_struct_destory(mali_allocation);
603 _mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
605 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
606 u32 vaddr = args->gpu_vaddr;
607 mali_mem_allocation *mali_alloc = NULL;
608 struct mali_vma_node *mali_vma_node = NULL;
610 /* find mali allocation structure by vaddress*/
611 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
612 if (NULL == mali_vma_node) {
613 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
614 return _MALI_OSK_ERR_INVALID_ARGS;
616 MALI_DEBUG_ASSERT(NULL != mali_vma_node);
617 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
620 /* check ref_count */
621 args->free_pages_nr = mali_allocation_unref(&mali_alloc);
623 return _MALI_OSK_ERR_OK;
628 * Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
629 * It will allocate a new mem allocation and bind external memory to it.
630 * Supported backend type are:
631 * _MALI_MEMORY_BIND_BACKEND_UMP
632 * _MALI_MEMORY_BIND_BACKEND_DMA_BUF
633 * _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
634 * CPU access is not supported yet
636 _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
638 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
639 mali_mem_backend *mem_backend = NULL;
640 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
641 mali_mem_allocation *mali_allocation = NULL;
642 MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
645 * allocate mali allocation.
647 mali_allocation = mali_mem_allocation_struct_create(session);
649 if (mali_allocation == NULL) {
650 return _MALI_OSK_ERR_NOMEM;
652 mali_allocation->psize = args->size;
653 mali_allocation->vsize = args->size;
654 mali_allocation->mali_mapping.addr = args->vaddr;
656 /* add allocation node to RB tree for index */
657 mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
658 mali_allocation->mali_vma_node.vm_node.size = args->size;
659 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
661 /* allocate backend*/
662 if (mali_allocation->psize > 0) {
663 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
664 if (mali_allocation->backend_handle < 0) {
665 goto Failed_alloc_backend;
669 goto Failed_alloc_backend;
672 mem_backend->size = mali_allocation->psize;
673 mem_backend->mali_allocation = mali_allocation;
675 switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
676 case _MALI_MEMORY_BIND_BACKEND_UMP:
677 #if defined(CONFIG_MALI400_UMP)
678 mali_allocation->type = MALI_MEM_UMP;
679 mem_backend->type = MALI_MEM_UMP;
680 ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
681 args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
682 if (_MALI_OSK_ERR_OK != ret) {
683 MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
684 goto Failed_bind_backend;
687 MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
688 goto Failed_bind_backend;
691 case _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
692 #if defined(CONFIG_DMA_SHARED_BUFFER)
693 mali_allocation->type = MALI_MEM_DMA_BUF;
694 mem_backend->type = MALI_MEM_DMA_BUF;
695 ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
696 args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
697 if (_MALI_OSK_ERR_OK != ret) {
698 MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
699 goto Failed_bind_backend;
702 MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
703 goto Failed_bind_backend;
706 case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
708 MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
709 goto Failed_bind_backend;
712 case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
713 mali_allocation->type = MALI_MEM_EXTERNAL;
714 mem_backend->type = MALI_MEM_EXTERNAL;
715 ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
716 args->mem_union.bind_ext_memory.flags);
717 if (_MALI_OSK_ERR_OK != ret) {
718 MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
719 goto Failed_bind_backend;
723 case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
725 MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n"));
726 goto Failed_bind_backend;
730 MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n"));
731 goto Failed_bind_backend;
734 MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
735 atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
736 return _MALI_OSK_ERR_OK;
739 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
741 Failed_alloc_backend:
742 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
743 mali_mem_allocation_struct_destory(mali_allocation);
745 MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
751 * Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
752 * This function unbind the backend memory and free the allocation
753 * no ref_count for this type of memory
755 _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
758 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
759 mali_mem_allocation *mali_allocation = NULL;
760 struct mali_vma_node *mali_vma_node = NULL;
761 u32 mali_addr = args->vaddr;
762 MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
764 /* find the allocation by vaddr */
765 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
766 if (likely(mali_vma_node)) {
767 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
768 mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
770 MALI_DEBUG_ASSERT(NULL != mali_vma_node);
771 return _MALI_OSK_ERR_INVALID_ARGS;
774 if (NULL != mali_allocation)
775 /* check ref_count */
776 mali_allocation_unref(&mali_allocation);
777 return _MALI_OSK_ERR_OK;
781 * Function _mali_ukk_mem_cow -- COW for an allocation
782 * This function allocate new pages for a range (range, range+size) of allocation
783 * And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
785 _mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
787 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
788 mali_mem_backend *target_backend = NULL;
789 mali_mem_backend *mem_backend = NULL;
790 struct mali_vma_node *mali_vma_node = NULL;
791 mali_mem_allocation *mali_allocation = NULL;
793 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
794 /* Get the target backend for cow */
795 target_backend = mali_mem_backend_struct_search(session, args->target_handle);
797 if (NULL == target_backend || 0 == target_backend->size) {
798 MALI_DEBUG_ASSERT_POINTER(target_backend);
799 MALI_DEBUG_ASSERT(0 != target_backend->size);
803 /*Cow not support resized mem */
804 MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
806 /* Check if the new mali address is allocated */
807 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
809 if (unlikely(mali_vma_node)) {
810 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
814 /* create new alloction for COW*/
815 mali_allocation = mali_mem_allocation_struct_create(session);
816 if (mali_allocation == NULL) {
817 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
818 return _MALI_OSK_ERR_NOMEM;
820 mali_allocation->psize = args->target_size;
821 mali_allocation->vsize = args->target_size;
822 mali_allocation->type = MALI_MEM_COW;
824 /*add allocation node to RB tree for index*/
825 mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
826 mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
827 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
829 /* create new backend for COW memory */
830 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
831 if (mali_allocation->backend_handle < 0) {
832 ret = _MALI_OSK_ERR_NOMEM;
833 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
834 goto failed_alloc_backend;
836 mem_backend->mali_allocation = mali_allocation;
837 mem_backend->type = mali_allocation->type;
839 if (target_backend->type == MALI_MEM_SWAP ||
840 (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
841 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
843 * CoWed swap backends couldn't be mapped as non-linear vma, because if one
844 * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
845 * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
847 * To resolve this problem, we have to do some fake things, we reserved about 64MB
848 * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
849 * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
850 * backends will be mapped as linear and will add to priority tree of global swap file, while
851 * these vmas will never be found by using normal page->index, these pages in those vma
852 * also couldn't be swapped out.
854 mem_backend->start_idx = 0;
857 /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
858 *for a modified range and keep the page which not in the modified range and Add ref to it
860 MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
861 mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
863 ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
864 if (_MALI_OSK_ERR_OK != ret) {
865 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
872 mali_allocation->mali_mapping.addr = args->vaddr;
873 /* set gpu mmu propery */
874 _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
876 _mali_osk_mutex_wait(session->memory_lock);
878 ret = mali_mem_mali_map_prepare(mali_allocation);
880 MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
884 if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
885 mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
888 _mali_osk_mutex_signal(session->memory_lock);
890 mutex_lock(&target_backend->mutex);
891 target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
892 mutex_unlock(&target_backend->mutex);
894 atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
895 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
896 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
898 return _MALI_OSK_ERR_OK;
901 _mali_osk_mutex_signal(session->memory_lock);
902 mali_mem_cow_release(mem_backend, MALI_FALSE);
903 mem_backend->cow_mem.count = 0;
905 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
906 failed_alloc_backend:
907 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
908 mali_mem_allocation_struct_destory(mali_allocation);
913 _mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
915 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
916 mali_mem_backend *mem_backend = NULL;
917 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
919 MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
920 /* Get the backend that need to be modified. */
921 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
923 if (NULL == mem_backend || 0 == mem_backend->size) {
924 MALI_DEBUG_ASSERT_POINTER(mem_backend);
925 MALI_DEBUG_ASSERT(0 != mem_backend->size);
929 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type);
931 ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
932 args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
933 if (_MALI_OSK_ERR_OK != ret)
935 _mali_osk_mutex_wait(session->memory_lock);
936 if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
937 mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
939 _mali_osk_mutex_signal(session->memory_lock);
941 atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
942 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
943 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
946 return _MALI_OSK_ERR_OK;
950 _mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
952 mali_mem_backend *mem_backend = NULL;
953 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
955 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
957 MALI_DEBUG_ASSERT_POINTER(session);
958 MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
959 MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE);
961 /* Get the memory backend that need to be resize. */
962 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
964 if (NULL == mem_backend) {
965 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
969 MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
971 ret = mali_mem_resize(session, mem_backend, args->psize);
976 _mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
978 args->memory_usage = _mali_ukk_report_memory_usage();
979 if (0 != args->vaddr) {
980 mali_mem_backend *mem_backend = NULL;
981 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
982 /* Get the backend that need to be modified. */
983 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
984 if (NULL == mem_backend) {
985 MALI_DEBUG_ASSERT_POINTER(mem_backend);
986 return _MALI_OSK_ERR_FAULT;
989 if (MALI_MEM_COW == mem_backend->type)
990 args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
992 return _MALI_OSK_ERR_OK;