2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
10 #include <linux/mm.h>
\r
11 #include <linux/list.h>
\r
12 #include <linux/mm_types.h>
\r
13 #include <linux/fs.h>
\r
14 #include <linux/dma-mapping.h>
\r
15 #include <linux/highmem.h>
\r
16 #include <asm/cacheflush.h>
\r
17 #include <linux/sched.h>
\r
19 #include <asm/outercache.h>
\r
21 #include <asm/dma-mapping.h>
\r
23 #include "mali_memory.h"
\r
24 #include "mali_kernel_common.h"
\r
25 #include "mali_uk_types.h"
\r
26 #include "mali_osk.h"
\r
27 #include "mali_kernel_linux.h"
\r
28 #include "mali_memory_defer_bind.h"
\r
29 #include "mali_executor.h"
\r
30 #include "mali_osk.h"
\r
31 #include "mali_scheduler.h"
\r
32 #include "mali_gp_job.h"
\r
34 mali_defer_bind_manager *mali_dmem_man = NULL;
\r
36 static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)
\r
38 return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;
\r
41 _mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)
\r
43 mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager));
\r
45 return _MALI_OSK_ERR_NOMEM;
\r
47 atomic_set(&mali_dmem_man->num_used_pages, 0);
\r
48 atomic_set(&mali_dmem_man->num_dmem, 0);
\r
50 return _MALI_OSK_ERR_OK;
\r
54 void mali_mem_defer_bind_manager_destory(void)
\r
56 if (mali_dmem_man) {
\r
57 MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem));
\r
58 kfree(mali_dmem_man);
\r
60 mali_dmem_man = NULL;
\r
64 /*allocate pages from OS memory*/
\r
65 _mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock)
\r
68 u32 num_pages = require;
\r
69 mali_mem_os_mem os_mem;
\r
71 retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE);
\r
73 /* add to free pages list */
\r
75 MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages));
\r
76 list_splice(&os_mem.pages, &dblock->free_pages);
\r
77 atomic_add(os_mem.count, &dblock->num_free_pages);
\r
78 atomic_add(os_mem.count, &session->mali_mem_allocated_pages);
\r
79 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
\r
80 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
\r
82 return _MALI_OSK_ERR_OK;
\r
84 return _MALI_OSK_ERR_FAULT;
\r
87 _mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock)
\r
92 return _MALI_OSK_ERR_FAULT;
\r
94 require_page = mali_dmem_get_gp_varying_size(next_gp_job);
\r
96 MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n",
\r
98 /* allocate more pages from OS */
\r
99 if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) {
\r
100 MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!"));
\r
101 return _MALI_OSK_ERR_NOMEM;
\r
104 next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED;
\r
106 return _MALI_OSK_ERR_OK;
\r
110 /* do preparetion for allocation before defer bind */
\r
111 _mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize)
\r
113 mali_mem_backend *mem_bkend = NULL;
\r
114 struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));
\r
115 if (NULL == bk_list)
\r
116 return _MALI_OSK_ERR_FAULT;
\r
118 INIT_LIST_HEAD(&bk_list->node);
\r
119 /* Get backend memory */
\r
120 mutex_lock(&mali_idr_mutex);
\r
121 if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
\r
122 MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));
\r
123 mutex_unlock(&mali_idr_mutex);
\r
124 _mali_osk_free(bk_list);
\r
125 return _MALI_OSK_ERR_FAULT;
\r
127 mutex_unlock(&mali_idr_mutex);
\r
129 /* If the mem backend has already been bound, no need to bind again.*/
\r
130 if (mem_bkend->os_mem.count > 0) {
\r
131 _mali_osk_free(bk_list);
\r
132 return _MALI_OSK_ERR_OK;
\r
135 MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));
\r
137 INIT_LIST_HEAD(&mem_bkend->os_mem.pages);
\r
139 bk_list->bkend = mem_bkend;
\r
140 bk_list->vaddr = alloc->mali_vma_node.vm_node.start;
\r
141 bk_list->session = alloc->session;
\r
142 bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;
\r
143 *required_varying_memsize += mem_bkend->size;
\r
144 MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
\r
146 /* add to job to do list */
\r
147 list_add(&bk_list->node, list);
\r
149 return _MALI_OSK_ERR_OK;
\r
154 /* bind phyiscal memory to allocation
\r
155 This function will be called in IRQ handler*/
\r
156 static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node,
\r
157 struct list_head *pages)
\r
159 struct mali_session_data *session = bk_node->session;
\r
160 mali_mem_backend *mem_bkend = bk_node->bkend;
\r
161 MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session));
\r
163 MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
\r
164 list_splice(pages, &mem_bkend->os_mem.pages);
\r
165 mem_bkend->os_mem.count = bk_node->page_num;
\r
167 if (mem_bkend->type == MALI_MEM_OS) {
\r
168 mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0,
\r
169 mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT);
\r
172 bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED;
\r
173 mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED;
\r
174 mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED;
\r
175 return _MALI_OSK_ERR_OK;
\r
179 static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock)
\r
182 struct mali_page_node *m_page, *m_tmp;
\r
184 if (atomic_read(&dblock->num_free_pages) < count) {
\r
187 list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) {
\r
189 list_move_tail(&m_page->list, pages);
\r
195 MALI_DEBUG_ASSERT(i == count);
\r
196 atomic_sub(count, &dblock->num_free_pages);
\r
202 /* called in job start IOCTL to bind physical memory for each allocations
\r
203 @ bk_list backend list to do defer bind
\r
204 @ pages page list to do this bind
\r
205 @ count number of pages
\r
207 _mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp,
\r
208 struct mali_defer_mem_block *dmem_block)
\r
210 struct mali_defer_mem *dmem = NULL;
\r
211 struct mali_backend_bind_list *bkn, *bkn_tmp;
\r
214 if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) {
\r
215 MALI_DEBUG_PRINT_ERROR(("#BIND: The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n"));
\r
216 return _MALI_OSK_ERR_FAULT;
\r
219 MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));
\r
220 dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));
\r
222 INIT_LIST_HEAD(&dmem->node);
\r
225 return _MALI_OSK_ERR_NOMEM;
\r
228 atomic_add(1, &mali_dmem_man->num_dmem);
\r
229 /* for each bk_list backend, do bind */
\r
230 list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) {
\r
231 INIT_LIST_HEAD(&pages);
\r
232 if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) {
\r
233 list_del(&bkn->node);
\r
234 mali_mem_defer_bind_allocation(bkn, &pages);
\r
235 _mali_osk_free(bkn);
\r
237 /* not enough memory will not happen */
\r
238 MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n"));
\r
239 _mali_osk_free(gp->dmem);
\r
240 return _MALI_OSK_ERR_NOMEM;
\r
244 if (!list_empty(&gp->vary_todo)) {
\r
245 MALI_DEBUG_PRINT_ERROR(("#BIND: The deferbind backend list isn't empty !!## \n"));
\r
246 _mali_osk_free(gp->dmem);
\r
247 return _MALI_OSK_ERR_FAULT;
\r
250 dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;
\r
252 return _MALI_OSK_ERR_OK;
\r
255 void mali_mem_defer_dmem_free(struct mali_gp_job *gp)
\r
258 atomic_dec(&mali_dmem_man->num_dmem);
\r
259 _mali_osk_free(gp->dmem);
\r