2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
20 #include <linux/platform_device.h>
21 #if defined(CONFIG_DMA_SHARED_BUFFER)
22 #include <linux/dma-buf.h>
24 #include <linux/idr.h>
27 #include "mali_osk_mali.h"
28 #include "mali_kernel_linux.h"
29 #include "mali_scheduler.h"
30 #include "mali_memory.h"
31 #include "mali_memory_os_alloc.h"
32 #if defined(CONFIG_DMA_SHARED_BUFFER)
33 #include "mali_memory_dma_buf.h"
35 #if defined(CONFIG_MALI400_UMP)
36 #include "mali_memory_ump.h"
38 #include "mali_memory_manager.h"
39 #include "mali_memory_virtual.h"
40 #include "mali_memory_util.h"
41 #include "mali_memory_external.h"
42 #include "mali_memory_cow.h"
43 #include "mali_memory_block_alloc.h"
45 #include "mali_memory_swap_alloc.h"
48 * New memory system interface
51 /*inti idr for backend memory */
52 struct idr mali_backend_idr;
53 struct mutex mali_idr_mutex;
55 /* init allocation manager */
56 int mali_memory_manager_init(struct mali_allocation_manager *mgr)
59 rwlock_init(&mgr->vm_lock);
60 mutex_init(&mgr->list_mutex);
63 INIT_LIST_HEAD(&mgr->head);
66 mgr->allocation_mgr_rb = RB_ROOT;
67 mgr->mali_allocation_num = 0;
71 /* Deinit allocation manager
72 * Do some check for debug
74 void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
76 /* check RB tree is empty */
77 MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
78 /* check allocation List */
79 MALI_DEBUG_ASSERT(list_empty(&mgr->head));
82 /* Prepare memory descriptor */
83 static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
85 mali_mem_allocation *mali_allocation;
88 mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
89 if (NULL == mali_allocation) {
90 MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
94 MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
97 mali_allocation->flags = 0;
98 mali_allocation->session = session;
100 INIT_LIST_HEAD(&mali_allocation->list);
101 _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
106 mutex_lock(&session->allocation_mgr.list_mutex);
107 list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
108 session->allocation_mgr.mali_allocation_num++;
109 mutex_unlock(&session->allocation_mgr.list_mutex);
111 return mali_allocation;
114 void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
116 MALI_DEBUG_ASSERT_POINTER(alloc);
117 MALI_DEBUG_ASSERT_POINTER(alloc->session);
118 mutex_lock(&alloc->session->allocation_mgr.list_mutex);
119 list_del(&alloc->list);
120 alloc->session->allocation_mgr.mali_allocation_num--;
121 mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
126 int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
128 mali_mem_backend *mem_backend = NULL;
131 *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
132 if (NULL == *backend) {
133 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
136 mem_backend = *backend;
137 mem_backend->size = psize;
138 mutex_init(&mem_backend->mutex);
139 INIT_LIST_HEAD(&mem_backend->list);
140 mem_backend->using_count = 0;
143 /* link backend with id */
144 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
146 if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
150 mutex_lock(&mali_idr_mutex);
151 ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
152 mutex_unlock(&mali_idr_mutex);
154 if (-ENOSPC == ret) {
161 mutex_lock(&mali_idr_mutex);
162 ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
163 mutex_unlock(&mali_idr_mutex);
166 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
175 static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
177 mali_mem_backend *mem_backend = *backend;
179 mutex_lock(&mali_idr_mutex);
180 idr_remove(&mali_backend_idr, backend_handle);
181 mutex_unlock(&mali_idr_mutex);
186 mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
188 struct mali_vma_node *mali_vma_node = NULL;
189 mali_mem_backend *mem_bkend = NULL;
190 mali_mem_allocation *mali_alloc = NULL;
191 MALI_DEBUG_ASSERT_POINTER(session);
192 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
193 if (NULL == mali_vma_node) {
194 MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
197 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
198 /* Get backend memory & Map on CPU */
199 mutex_lock(&mali_idr_mutex);
200 mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
201 mutex_unlock(&mali_idr_mutex);
202 MALI_DEBUG_ASSERT(NULL != mem_bkend);
206 static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
208 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
210 mali_mem_allocation *mali_allocation = NULL;
211 mali_mem_os_mem tmp_os_mem;
212 s32 change_page_count;
214 MALI_DEBUG_ASSERT_POINTER(session);
215 MALI_DEBUG_ASSERT_POINTER(mem_backend);
216 MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
217 MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE);
219 mali_allocation = mem_backend->mali_allocation;
220 MALI_DEBUG_ASSERT_POINTER(mali_allocation);
222 MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
223 MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
225 mutex_lock(&mem_backend->mutex);
228 if (physical_size > mem_backend->size) {
229 u32 add_size = physical_size - mem_backend->size;
231 MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
233 /* Allocate new pages from os mem */
234 retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
237 if (-ENOMEM == retval) {
238 ret = _MALI_OSK_ERR_NOMEM;
240 ret = _MALI_OSK_ERR_FAULT;
242 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
243 goto failed_alloc_memory;
246 MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
248 /* Resize the memory of the backend */
249 ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
252 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n"));
253 goto failed_resize_pages;
256 /*Resize cpu mapping */
257 if (NULL != mali_allocation->cpu_mapping.vma) {
258 ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size);
259 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
260 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
265 /* Resize mali mapping */
266 _mali_osk_mutex_wait(session->memory_lock);
267 ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
270 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
274 ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
275 mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
277 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
281 _mali_osk_mutex_signal(session->memory_lock);
283 u32 dec_size, page_count;
285 INIT_LIST_HEAD(&tmp_os_mem.pages);
286 tmp_os_mem.count = 0;
288 dec_size = mem_backend->size - physical_size;
289 MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE);
291 page_count = dec_size / MALI_MMU_PAGE_SIZE;
292 vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
294 /* Resize the memory of the backend */
295 ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
298 MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
299 goto failed_resize_pages;
302 /* Resize mali map */
303 _mali_osk_mutex_wait(session->memory_lock);
304 mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
305 _mali_osk_mutex_signal(session->memory_lock);
307 /* Zap cpu mapping */
308 if (0 != mali_allocation->cpu_mapping.addr) {
309 MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
310 zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
313 /* Free those extra pages */
314 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
317 /* Resize memory allocation and memory backend */
318 change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
319 mali_allocation->psize = physical_size;
320 mem_backend->size = physical_size;
321 mutex_unlock(&mem_backend->mutex);
323 if (change_page_count > 0) {
324 atomic_add(change_page_count, &session->mali_mem_allocated_pages);
325 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
326 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
330 atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
333 return _MALI_OSK_ERR_OK;
336 _mali_osk_mutex_signal(session->memory_lock);
338 if (physical_size > mem_backend->size) {
339 mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
340 (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
342 mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
345 if (0 != tmp_os_mem.count)
346 mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
349 mutex_unlock(&mem_backend->mutex);
354 /* Set GPU MMU properties */
355 static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
357 if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
358 *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
360 *properties = MALI_MMU_FLAGS_DEFAULT;
364 _mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
366 mali_mem_backend *mem_backend = NULL;
367 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
368 mali_mem_allocation *mali_allocation = NULL;
369 u32 new_physical_size;
370 MALI_DEBUG_ASSERT_POINTER(session);
371 MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
373 /* Get the memory backend that need to be resize. */
374 mem_backend = mali_mem_backend_struct_search(session, mali_addr);
376 if (NULL == mem_backend) {
377 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
381 mali_allocation = mem_backend->mali_allocation;
383 MALI_DEBUG_ASSERT_POINTER(mali_allocation);
385 new_physical_size = add_size + mem_backend->size;
387 if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
390 MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
392 ret = mali_mem_resize(session, mem_backend, new_physical_size);
398 * function@_mali_ukk_mem_allocate - allocate mali memory
400 _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
402 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
403 mali_mem_backend *mem_backend = NULL;
404 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
406 mali_mem_allocation *mali_allocation = NULL;
407 struct mali_vma_node *mali_vma_node = NULL;
409 MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
411 /* Check if the address is allocated
413 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
415 if (unlikely(mali_vma_node)) {
416 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
417 return _MALI_OSK_ERR_FAULT;
420 *create mali memory allocation
423 mali_allocation = mali_mem_allocation_struct_create(session);
425 if (mali_allocation == NULL) {
426 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
427 return _MALI_OSK_ERR_NOMEM;
429 mali_allocation->psize = args->psize;
430 mali_allocation->vsize = args->vsize;
432 /* MALI_MEM_OS if need to support mem resize,
433 * or MALI_MEM_BLOCK if have dedicated memory,
437 if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
438 mali_allocation->type = MALI_MEM_SWAP;
439 } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
440 mali_allocation->type = MALI_MEM_OS;
441 mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
442 } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
443 mali_allocation->type = MALI_MEM_BLOCK;
445 mali_allocation->type = MALI_MEM_OS;
449 *add allocation node to RB tree for index
451 mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
452 mali_allocation->mali_vma_node.vm_node.size = args->vsize;
454 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
456 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
457 if (mali_allocation->backend_handle < 0) {
458 ret = _MALI_OSK_ERR_NOMEM;
459 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
460 goto failed_alloc_backend;
464 mem_backend->mali_allocation = mali_allocation;
465 mem_backend->type = mali_allocation->type;
467 mali_allocation->mali_mapping.addr = args->gpu_vaddr;
469 /* set gpu mmu propery */
470 _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
471 /* do prepare for MALI mapping */
472 if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
473 _mali_osk_mutex_wait(session->memory_lock);
475 ret = mali_mem_mali_map_prepare(mali_allocation);
477 _mali_osk_mutex_signal(session->memory_lock);
478 goto failed_prepare_map;
480 _mali_osk_mutex_signal(session->memory_lock);
483 if (mali_allocation->psize == 0) {
484 mem_backend->os_mem.count = 0;
485 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
489 if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
490 mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
491 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
492 /* init for defer bind backend*/
493 mem_backend->os_mem.count = 0;
494 INIT_LIST_HEAD(&mem_backend->os_mem.pages);
499 *allocate physical memory
501 if (likely(mali_allocation->psize > 0)) {
503 if (mem_backend->type == MALI_MEM_OS) {
504 retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
505 } else if (mem_backend->type == MALI_MEM_BLOCK) {
506 /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
507 if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
508 retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
509 mem_backend->type = MALI_MEM_OS;
510 mali_allocation->type = MALI_MEM_OS;
512 } else if (MALI_MEM_SWAP == mem_backend->type) {
513 retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
515 /* ONLY support mem_os type */
516 MALI_DEBUG_ASSERT(0);
520 ret = _MALI_OSK_ERR_NOMEM;
521 MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
522 goto failed_alloc_pages;
529 if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
530 _mali_osk_mutex_wait(session->memory_lock);
533 if (mem_backend->type == MALI_MEM_OS) {
534 ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
535 mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
537 } else if (mem_backend->type == MALI_MEM_BLOCK) {
538 mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
539 mali_allocation->mali_mapping.properties);
540 } else if (mem_backend->type == MALI_MEM_SWAP) {
541 ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
542 mali_allocation->mali_mapping.properties);
543 } else { /* unsupport type */
544 MALI_DEBUG_ASSERT(0);
547 _mali_osk_mutex_signal(session->memory_lock);
550 if (MALI_MEM_OS == mem_backend->type) {
551 atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
552 } else if (MALI_MEM_BLOCK == mem_backend->type) {
553 atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
555 MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
556 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
557 atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
560 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
561 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
563 return _MALI_OSK_ERR_OK;
566 mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
568 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
569 failed_alloc_backend:
571 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
572 mali_mem_allocation_struct_destory(mali_allocation);
578 _mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
580 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
581 u32 vaddr = args->gpu_vaddr;
582 mali_mem_allocation *mali_alloc = NULL;
583 struct mali_vma_node *mali_vma_node = NULL;
585 /* find mali allocation structure by vaddress*/
586 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
587 if (NULL == mali_vma_node) {
588 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
589 return _MALI_OSK_ERR_INVALID_ARGS;
591 MALI_DEBUG_ASSERT(NULL != mali_vma_node);
592 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
595 /* check ref_count */
596 args->free_pages_nr = mali_allocation_unref(&mali_alloc);
598 return _MALI_OSK_ERR_OK;
603 * Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
604 * It will allocate a new mem allocation and bind external memory to it.
605 * Supported backend type are:
606 * _MALI_MEMORY_BIND_BACKEND_UMP
607 * _MALI_MEMORY_BIND_BACKEND_DMA_BUF
608 * _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
609 * CPU access is not supported yet
611 _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
613 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
614 mali_mem_backend *mem_backend = NULL;
615 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
616 mali_mem_allocation *mali_allocation = NULL;
617 MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
620 * allocate mali allocation.
622 mali_allocation = mali_mem_allocation_struct_create(session);
624 if (mali_allocation == NULL) {
625 return _MALI_OSK_ERR_NOMEM;
627 mali_allocation->psize = args->size;
628 mali_allocation->vsize = args->size;
629 mali_allocation->mali_mapping.addr = args->vaddr;
631 /* add allocation node to RB tree for index */
632 mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
633 mali_allocation->mali_vma_node.vm_node.size = args->size;
634 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
636 /* allocate backend*/
637 if (mali_allocation->psize > 0) {
638 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
639 if (mali_allocation->backend_handle < 0) {
640 goto Failed_alloc_backend;
644 goto Failed_alloc_backend;
647 mem_backend->size = mali_allocation->psize;
648 mem_backend->mali_allocation = mali_allocation;
650 switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
651 case _MALI_MEMORY_BIND_BACKEND_UMP:
652 #if defined(CONFIG_MALI400_UMP)
653 mali_allocation->type = MALI_MEM_UMP;
654 mem_backend->type = MALI_MEM_UMP;
655 ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
656 args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
657 if (_MALI_OSK_ERR_OK != ret) {
658 MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
659 goto Failed_bind_backend;
662 MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
663 goto Failed_bind_backend;
666 case _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
667 #if defined(CONFIG_DMA_SHARED_BUFFER)
668 mali_allocation->type = MALI_MEM_DMA_BUF;
669 mem_backend->type = MALI_MEM_DMA_BUF;
670 ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
671 args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
672 if (_MALI_OSK_ERR_OK != ret) {
673 MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
674 goto Failed_bind_backend;
677 MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
678 goto Failed_bind_backend;
681 case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
683 MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
684 goto Failed_bind_backend;
687 case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
688 mali_allocation->type = MALI_MEM_EXTERNAL;
689 mem_backend->type = MALI_MEM_EXTERNAL;
690 ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
691 args->mem_union.bind_ext_memory.flags);
692 if (_MALI_OSK_ERR_OK != ret) {
693 MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
694 goto Failed_bind_backend;
698 case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
700 MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n"));
701 goto Failed_bind_backend;
705 MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n"));
706 goto Failed_bind_backend;
709 MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
710 atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
711 return _MALI_OSK_ERR_OK;
714 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
716 Failed_alloc_backend:
717 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
718 mali_mem_allocation_struct_destory(mali_allocation);
720 MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
726 * Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
727 * This function unbind the backend memory and free the allocation
728 * no ref_count for this type of memory
730 _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
733 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
734 mali_mem_allocation *mali_allocation = NULL;
735 struct mali_vma_node *mali_vma_node = NULL;
736 u32 mali_addr = args->vaddr;
737 MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
739 /* find the allocation by vaddr */
740 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
741 if (likely(mali_vma_node)) {
742 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
743 mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
745 MALI_DEBUG_ASSERT(NULL != mali_vma_node);
746 return _MALI_OSK_ERR_INVALID_ARGS;
749 if (NULL != mali_allocation)
750 /* check ref_count */
751 mali_allocation_unref(&mali_allocation);
752 return _MALI_OSK_ERR_OK;
756 * Function _mali_ukk_mem_cow -- COW for an allocation
757 * This function allocate new pages for a range (range, range+size) of allocation
758 * And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
760 _mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
762 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
763 mali_mem_backend *target_backend = NULL;
764 mali_mem_backend *mem_backend = NULL;
765 struct mali_vma_node *mali_vma_node = NULL;
766 mali_mem_allocation *mali_allocation = NULL;
768 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
769 /* Get the target backend for cow */
770 target_backend = mali_mem_backend_struct_search(session, args->target_handle);
772 if (NULL == target_backend || 0 == target_backend->size) {
773 MALI_DEBUG_ASSERT_POINTER(target_backend);
774 MALI_DEBUG_ASSERT(0 != target_backend->size);
778 /*Cow not support resized mem */
779 MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
781 /* Check if the new mali address is allocated */
782 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
784 if (unlikely(mali_vma_node)) {
785 MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
789 /* create new alloction for COW*/
790 mali_allocation = mali_mem_allocation_struct_create(session);
791 if (mali_allocation == NULL) {
792 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
793 return _MALI_OSK_ERR_NOMEM;
795 mali_allocation->psize = args->target_size;
796 mali_allocation->vsize = args->target_size;
797 mali_allocation->type = MALI_MEM_COW;
799 /*add allocation node to RB tree for index*/
800 mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
801 mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
802 mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
804 /* create new backend for COW memory */
805 mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
806 if (mali_allocation->backend_handle < 0) {
807 ret = _MALI_OSK_ERR_NOMEM;
808 MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
809 goto failed_alloc_backend;
811 mem_backend->mali_allocation = mali_allocation;
812 mem_backend->type = mali_allocation->type;
814 if (target_backend->type == MALI_MEM_SWAP ||
815 (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
816 mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
818 * CoWed swap backends couldn't be mapped as non-linear vma, because if one
819 * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
820 * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
822 * To resolve this problem, we have to do some fake things, we reserved about 64MB
823 * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
824 * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
825 * backends will be mapped as linear and will add to priority tree of global swap file, while
826 * these vmas will never be found by using normal page->index, these pages in those vma
827 * also couldn't be swapped out.
829 mem_backend->start_idx = 0;
832 /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
833 *for a modified range and keep the page which not in the modified range and Add ref to it
835 MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
836 mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
838 ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
839 if (_MALI_OSK_ERR_OK != ret) {
840 MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
847 mali_allocation->mali_mapping.addr = args->vaddr;
848 /* set gpu mmu propery */
849 _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
851 _mali_osk_mutex_wait(session->memory_lock);
853 ret = mali_mem_mali_map_prepare(mali_allocation);
855 MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
859 if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
860 mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
863 _mali_osk_mutex_signal(session->memory_lock);
865 mutex_lock(&target_backend->mutex);
866 target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
867 mutex_unlock(&target_backend->mutex);
869 atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
870 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
871 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
873 return _MALI_OSK_ERR_OK;
876 _mali_osk_mutex_signal(session->memory_lock);
877 mali_mem_cow_release(mem_backend, MALI_FALSE);
878 mem_backend->cow_mem.count = 0;
880 mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
881 failed_alloc_backend:
882 mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
883 mali_mem_allocation_struct_destory(mali_allocation);
888 _mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
890 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
891 mali_mem_backend *mem_backend = NULL;
892 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
894 MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
895 /* Get the backend that need to be modified. */
896 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
898 if (NULL == mem_backend || 0 == mem_backend->size) {
899 MALI_DEBUG_ASSERT_POINTER(mem_backend);
900 MALI_DEBUG_ASSERT(0 != mem_backend->size);
904 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type);
906 ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
907 args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
908 if (_MALI_OSK_ERR_OK != ret)
910 _mali_osk_mutex_wait(session->memory_lock);
911 if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
912 mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
914 _mali_osk_mutex_signal(session->memory_lock);
916 atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
917 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
918 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
921 return _MALI_OSK_ERR_OK;
925 _mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
927 mali_mem_backend *mem_backend = NULL;
928 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
930 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
932 MALI_DEBUG_ASSERT_POINTER(session);
933 MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
934 MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE);
936 /* Get the memory backend that need to be resize. */
937 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
939 if (NULL == mem_backend) {
940 MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
944 MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
946 ret = mali_mem_resize(session, mem_backend, args->psize);
951 _mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
953 args->memory_usage = _mali_ukk_report_memory_usage();
954 if (0 != args->vaddr) {
955 mali_mem_backend *mem_backend = NULL;
956 struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
957 /* Get the backend that need to be modified. */
958 mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
959 if (NULL == mem_backend) {
960 MALI_DEBUG_ASSERT_POINTER(mem_backend);
961 return _MALI_OSK_ERR_FAULT;
964 if (MALI_MEM_COW == mem_backend->type)
965 args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
967 return _MALI_OSK_ERR_OK;