2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/platform_device.h>
19 #include <linux/idr.h>
22 #include "mali_executor.h"
24 #include "mali_memory.h"
25 #include "mali_memory_os_alloc.h"
26 #include "mali_memory_block_alloc.h"
27 #include "mali_memory_util.h"
28 #include "mali_memory_virtual.h"
29 #include "mali_memory_manager.h"
30 #include "mali_memory_cow.h"
31 #include "mali_memory_swap_alloc.h"
32 #include "mali_memory_defer_bind.h"
34 extern unsigned int mali_dedicated_mem_size;
35 extern unsigned int mali_shared_mem_size;
37 #define MALI_VM_NUM_FAULT_PREFETCH (0x8)
39 static void mali_mem_vma_open(struct vm_area_struct *vma)
41 mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
42 MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
44 /* If need to share the allocation, add ref_count here */
45 mali_allocation_ref(alloc);
48 static void mali_mem_vma_close(struct vm_area_struct *vma)
50 /* If need to share the allocation, unref ref_count here */
51 mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
53 mali_allocation_unref(&alloc);
54 vma->vm_private_data = NULL;
57 static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
59 mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
60 mali_mem_backend *mem_bkend = NULL;
62 int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
64 unsigned long address = (unsigned long)vmf->virtual_address;
65 MALI_DEBUG_ASSERT(alloc->backend_handle);
66 MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
68 /* Get backend memory & Map on CPU */
69 mutex_lock(&mali_idr_mutex);
70 if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
71 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
72 mutex_unlock(&mali_idr_mutex);
73 return VM_FAULT_SIGBUS;
75 mutex_unlock(&mali_idr_mutex);
76 MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
78 if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
79 (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
80 (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
81 /*check if use page fault to do COW*/
82 MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
83 mutex_lock(&mem_bkend->mutex);
84 ret = mali_mem_cow_allocate_on_demand(mem_bkend,
85 (address - vma->vm_start) / PAGE_SIZE);
86 mutex_unlock(&mem_bkend->mutex);
88 if (ret != _MALI_OSK_ERR_OK) {
93 /* handle COW modified range cpu mapping
94 we zap the mapping in cow_modify_range, it will trigger page fault
95 when CPU access it, so here we map it to CPU*/
96 mutex_lock(&mem_bkend->mutex);
97 ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
98 mutex_unlock(&mem_bkend->mutex);
100 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
101 return VM_FAULT_SIGBUS;
103 } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
104 (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
105 u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
106 int ret = _MALI_OSK_ERR_OK;
108 mutex_lock(&mem_bkend->mutex);
109 if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
110 ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
112 ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
114 mutex_unlock(&mem_bkend->mutex);
116 if (ret != _MALI_OSK_ERR_OK) {
117 MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
120 return VM_FAULT_LOCKED;
123 MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
125 return VM_FAULT_NOPAGE;
128 static struct vm_operations_struct mali_kernel_vm_ops = {
129 .open = mali_mem_vma_open,
130 .close = mali_mem_vma_close,
131 .fault = mali_mem_vma_fault,
135 /** @ map mali allocation to CPU address
137 * Supported backend types:
139 * -- need to add COW?
140 *Not supported backend types:
141 * -_MALI_MEMORY_BIND_BACKEND_UMP
142 * -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
143 * -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
146 int mali_mmap(struct file *filp, struct vm_area_struct *vma)
148 struct mali_session_data *session;
149 mali_mem_allocation *mali_alloc = NULL;
150 u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
151 struct mali_vma_node *mali_vma_node = NULL;
152 mali_mem_backend *mem_bkend = NULL;
155 session = (struct mali_session_data *)filp->private_data;
156 if (NULL == session) {
157 MALI_PRINT_ERROR(("mmap called without any session data available\n"));
161 MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
162 (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
163 (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
165 /* Operations used on any memory system */
166 /* do not need to anything in vm open/close now */
168 /* find mali allocation structure by vaddress*/
169 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
170 if (likely(mali_vma_node)) {
171 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
172 MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
173 if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
174 /* only allow to use start address for mmap */
175 MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
179 MALI_DEBUG_ASSERT(NULL == mali_vma_node);
183 mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
185 if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
186 MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
190 /* Get backend memory & Map on CPU */
191 mutex_lock(&mali_idr_mutex);
192 if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
193 MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
194 mutex_unlock(&mali_idr_mutex);
197 mutex_unlock(&mali_idr_mutex);
199 if (!(MALI_MEM_SWAP == mali_alloc->type ||
200 (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
201 /* Set some bits which indicate that, the memory is IO memory, meaning
202 * that no paging is to be performed and the memory should not be
203 * included in crash dumps. And that the memory is reserved, meaning
204 * that it's present and can never be paged out (see also previous
207 vma->vm_flags |= VM_IO;
208 vma->vm_flags |= VM_DONTCOPY;
209 vma->vm_flags |= VM_PFNMAP;
210 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
211 vma->vm_flags |= VM_RESERVED;
213 vma->vm_flags |= VM_DONTDUMP;
214 vma->vm_flags |= VM_DONTEXPAND;
216 } else if (MALI_MEM_SWAP == mali_alloc->type) {
217 vma->vm_pgoff = mem_bkend->start_idx;
220 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
221 vma->vm_ops = &mali_kernel_vm_ops;
223 mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
225 /* If it's a copy-on-write mapping, map to read only */
226 if (!(vma->vm_flags & VM_WRITE)) {
227 MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
228 /* add VM_WRITE for do_page_fault will check this when a write fault */
229 vma->vm_flags |= VM_WRITE | VM_READ;
230 vma->vm_page_prot = PAGE_READONLY;
231 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
232 mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
236 if (mem_bkend->type == MALI_MEM_OS) {
237 ret = mali_mem_os_cpu_map(mem_bkend, vma);
238 } else if (mem_bkend->type == MALI_MEM_COW &&
239 (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
240 ret = mali_mem_cow_cpu_map(mem_bkend, vma);
241 } else if (mem_bkend->type == MALI_MEM_BLOCK) {
242 ret = mali_mem_block_cpu_map(mem_bkend, vma);
243 } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
244 (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
245 /*For swappable memory, CPU page table will be created by page fault handler. */
249 MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
254 MALI_DEBUG_PRINT(1, ("ret != 0\n"));
258 MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
260 vma->vm_private_data = (void *)mali_alloc;
261 mali_alloc->cpu_mapping.vma = vma;
263 mali_allocation_ref(mali_alloc);
268 _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
270 u32 size = descriptor->psize;
271 struct mali_session_data *session = descriptor->session;
273 MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
275 /* Map dma-buf into this session's page tables */
277 if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
278 size += MALI_MMU_PAGE_SIZE;
281 return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
284 _mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
286 u32 old_size = descriptor->psize;
287 struct mali_session_data *session = descriptor->session;
289 MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
291 if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
292 new_size += MALI_MMU_PAGE_SIZE;
295 if (new_size > old_size) {
296 MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
297 return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
299 return _MALI_OSK_ERR_OK;
302 void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
304 if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
305 size += MALI_MMU_PAGE_SIZE;
308 /* Umap and flush L2 */
309 mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
310 mali_executor_zap_all_active(session);
313 u32 _mali_ukk_report_memory_usage(void)
317 if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
318 sum += mali_mem_block_allocator_stat();
321 sum += mali_mem_os_stat();
326 u32 _mali_ukk_report_total_memory_size(void)
328 return mali_dedicated_mem_size + mali_shared_mem_size;
333 * Per-session memory descriptor mapping table sizes
335 #define MALI_MEM_DESCRIPTORS_INIT 64
336 #define MALI_MEM_DESCRIPTORS_MAX 65536
338 _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
340 MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
342 session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
343 _MALI_OSK_LOCK_ORDER_MEM_SESSION);
345 if (NULL == session_data->memory_lock) {
346 MALI_ERROR(_MALI_OSK_ERR_FAULT);
349 session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
350 if (NULL == session_data->cow_lock) {
351 _mali_osk_mutex_term(session_data->memory_lock);
352 _mali_osk_free(session_data);
353 MALI_ERROR(_MALI_OSK_ERR_FAULT);
356 mali_memory_manager_init(&session_data->allocation_mgr);
358 MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
362 void mali_memory_session_end(struct mali_session_data *session)
364 MALI_DEBUG_PRINT(3, ("MMU session end\n"));
366 if (NULL == session) {
367 MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
370 /* free allocation */
371 mali_free_session_allocations(session);
372 /* do some check in unint*/
373 mali_memory_manager_uninit(&session->allocation_mgr);
376 _mali_osk_mutex_term(session->memory_lock);
377 _mali_osk_mutex_term(session->cow_lock);
381 _mali_osk_errcode_t mali_memory_initialize(void)
383 _mali_osk_errcode_t err;
385 idr_init(&mali_backend_idr);
386 mutex_init(&mali_idr_mutex);
388 err = mali_mem_swap_init();
389 if (err != _MALI_OSK_ERR_OK) {
392 err = mali_mem_os_init();
393 if (_MALI_OSK_ERR_OK == err) {
394 err = mali_mem_defer_bind_manager_init();
400 void mali_memory_terminate(void)
402 mali_mem_swap_term();
403 mali_mem_defer_bind_manager_destory();
405 if (mali_memory_have_dedicated_memory()) {
406 mali_mem_block_allocator_destroy();
411 struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
413 mali_page_node *page_node = NULL;
415 page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
416 MALI_DEBUG_ASSERT(NULL != page_node);
419 page_node->type = type;
420 INIT_LIST_HEAD(&page_node->list);
426 void _mali_page_node_ref(struct mali_page_node *node)
428 if (node->type == MALI_PAGE_NODE_OS) {
429 /* add ref to this page */
430 get_page(node->page);
431 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
432 mali_mem_block_add_ref(node);
433 } else if (node->type == MALI_PAGE_NODE_SWAP) {
434 atomic_inc(&node->swap_it->ref_count);
436 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
440 void _mali_page_node_unref(struct mali_page_node *node)
442 if (node->type == MALI_PAGE_NODE_OS) {
443 /* unref to this page */
444 put_page(node->page);
445 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
446 mali_mem_block_dec_ref(node);
448 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
453 void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
455 MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
460 void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
462 MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
463 node->swap_it = item;
466 void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
468 MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
473 int _mali_page_node_get_ref_count(struct mali_page_node *node)
475 if (node->type == MALI_PAGE_NODE_OS) {
476 /* get ref count of this page */
477 return page_count(node->page);
478 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
479 return mali_mem_block_get_ref_count(node);
480 } else if (node->type == MALI_PAGE_NODE_SWAP) {
481 return atomic_read(&node->swap_it->ref_count);
483 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
489 dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
491 if (node->type == MALI_PAGE_NODE_OS) {
492 return page_private(node->page);
493 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
494 return _mali_blk_item_get_phy_addr(node->blk_it);
495 } else if (node->type == MALI_PAGE_NODE_SWAP) {
496 return node->swap_it->dma_addr;
498 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
504 unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
506 if (node->type == MALI_PAGE_NODE_OS) {
507 return page_to_pfn(node->page);
508 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
509 /* get phy addr for BLOCK page*/
510 return _mali_blk_item_get_pfn(node->blk_it);
511 } else if (node->type == MALI_PAGE_NODE_SWAP) {
512 return page_to_pfn(node->swap_it->page);
514 MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));