2 * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
10 #include <linux/mm.h>
\r
11 #include <linux/list.h>
\r
12 #include <linux/mm_types.h>
\r
13 #include <linux/fs.h>
\r
14 #include <linux/dma-mapping.h>
\r
15 #include <linux/highmem.h>
\r
16 #include <asm/cacheflush.h>
\r
17 #include <linux/sched.h>
\r
19 #include <asm/outercache.h>
\r
21 #include <asm/dma-mapping.h>
\r
23 #include "mali_memory.h"
\r
24 #include "mali_kernel_common.h"
\r
25 #include "mali_uk_types.h"
\r
26 #include "mali_osk.h"
\r
27 #include "mali_kernel_linux.h"
\r
28 #include "mali_memory_cow.h"
\r
29 #include "mali_memory_block_alloc.h"
\r
30 #include "mali_memory_swap_alloc.h"
\r
33 * allocate pages for COW backend and flush cache
\r
35 static struct page *mali_mem_cow_alloc_page(void)
\r
38 mali_mem_os_mem os_mem;
\r
39 struct mali_page_node *node;
\r
40 struct page *new_page;
\r
43 /* allocate pages from os mem */
\r
44 ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
\r
50 MALI_DEBUG_ASSERT(1 == os_mem.count);
\r
52 node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
\r
53 new_page = node->page;
\r
55 list_del(&node->list);
\r
62 static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
\r
66 MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
\r
67 MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);
\r
69 if (MALI_MEM_OS == target_bk->type) {
\r
70 MALI_DEBUG_ASSERT(&target_bk->os_mem);
\r
71 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
\r
72 return &target_bk->os_mem.pages;
\r
73 } else if (MALI_MEM_COW == target_bk->type) {
\r
74 MALI_DEBUG_ASSERT(&target_bk->cow_mem);
\r
75 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
\r
76 return &target_bk->cow_mem.pages;
\r
77 } else if (MALI_MEM_BLOCK == target_bk->type) {
\r
78 MALI_DEBUG_ASSERT(&target_bk->block_mem);
\r
79 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
\r
80 return &target_bk->block_mem.pfns;
\r
81 } else if (MALI_MEM_SWAP == target_bk->type) {
\r
82 MALI_DEBUG_ASSERT(&target_bk->swap_mem);
\r
83 MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);
\r
84 return &target_bk->swap_mem.pages;
\r
91 * Do COW for os memory - support do COW for memory from bank memory
\r
92 * The range_start/size can be zero, which means it will call cow_modify_range
\r
94 * This function allocate new pages for COW backend from os mem for a modified range
\r
95 * It will keep the page which not in the modified range and Add ref to it
\r
97 * @target_bk - target allocation's backend(the allocation need to do COW)
\r
98 * @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
\r
99 * @target_size - size of target allocation to do COW (for support memory bank)
\r
100 * @backend -COW backend
\r
101 * @range_start - offset of modified range (4K align)
\r
102 * @range_size - size of modified range
\r
104 _mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
\r
107 mali_mem_backend *backend,
\r
111 mali_mem_cow *cow = &backend->cow_mem;
\r
112 struct mali_page_node *m_page, *m_tmp, *page_node;
\r
113 int target_page = 0;
\r
114 struct page *new_page;
\r
115 struct list_head *pages = NULL;
\r
117 pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
\r
119 if (NULL == pages) {
\r
120 MALI_DEBUG_PRINT_ERROR(("No memory page need to cow ! \n"));
\r
121 return _MALI_OSK_ERR_FAULT;
\r
124 MALI_DEBUG_ASSERT(0 == cow->count);
\r
126 INIT_LIST_HEAD(&cow->pages);
\r
127 mutex_lock(&target_bk->mutex);
\r
128 list_for_each_entry_safe(m_page, m_tmp, pages, list) {
\r
129 /* add page from (target_offset,target_offset+size) to cow backend */
\r
130 if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
\r
131 (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
\r
133 /* allocate a new page node, alway use OS memory for COW */
\r
134 page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
\r
136 if (NULL == page_node) {
\r
137 mutex_unlock(&target_bk->mutex);
\r
141 INIT_LIST_HEAD(&page_node->list);
\r
143 /* check if in the modified range*/
\r
144 if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
\r
145 (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
\r
146 /* need to allocate a new page */
\r
147 /* To simplify the case, All COW memory is allocated from os memory ?*/
\r
148 new_page = mali_mem_cow_alloc_page();
\r
150 if (NULL == new_page) {
\r
152 mutex_unlock(&target_bk->mutex);
\r
156 _mali_page_node_add_page(page_node, new_page);
\r
158 /*Add Block memory case*/
\r
159 if (m_page->type != MALI_PAGE_NODE_BLOCK) {
\r
160 _mali_page_node_add_page(page_node, m_page->page);
\r
162 page_node->type = MALI_PAGE_NODE_BLOCK;
\r
163 _mali_page_node_add_block_item(page_node, m_page->blk_it);
\r
166 /* add ref to this page */
\r
167 _mali_page_node_ref(m_page);
\r
170 /* add it to COW backend page list */
\r
171 list_add_tail(&page_node->list, &cow->pages);
\r
176 mutex_unlock(&target_bk->mutex);
\r
177 return _MALI_OSK_ERR_OK;
\r
179 mali_mem_cow_release(backend, MALI_FALSE);
\r
180 return _MALI_OSK_ERR_FAULT;
\r
183 _mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,
\r
186 mali_mem_backend *backend,
\r
190 mali_mem_cow *cow = &backend->cow_mem;
\r
191 struct mali_page_node *m_page, *m_tmp, *page_node;
\r
192 int target_page = 0;
\r
193 struct mali_swap_item *swap_item;
\r
194 struct list_head *pages = NULL;
\r
196 pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
\r
197 if (NULL == pages) {
\r
198 MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));
\r
199 return _MALI_OSK_ERR_FAULT;
\r
202 MALI_DEBUG_ASSERT(0 == cow->count);
\r
204 INIT_LIST_HEAD(&cow->pages);
\r
205 mutex_lock(&target_bk->mutex);
\r
207 backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
\r
209 list_for_each_entry_safe(m_page, m_tmp, pages, list) {
\r
210 /* add page from (target_offset,target_offset+size) to cow backend */
\r
211 if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
\r
212 (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
\r
214 /* allocate a new page node, use swap memory for COW memory swap cowed flag. */
\r
215 page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
\r
217 if (NULL == page_node) {
\r
218 mutex_unlock(&target_bk->mutex);
\r
222 /* check if in the modified range*/
\r
223 if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
\r
224 (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
\r
225 /* need to allocate a new page */
\r
226 /* To simplify the case, All COW memory is allocated from os memory ?*/
\r
227 swap_item = mali_mem_swap_alloc_swap_item();
\r
229 if (NULL == swap_item) {
\r
231 mutex_unlock(&target_bk->mutex);
\r
235 swap_item->idx = mali_mem_swap_idx_alloc();
\r
237 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
\r
238 MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));
\r
241 mutex_unlock(&target_bk->mutex);
\r
245 _mali_page_node_add_swap_item(page_node, swap_item);
\r
247 _mali_page_node_add_swap_item(page_node, m_page->swap_it);
\r
249 /* add ref to this page */
\r
250 _mali_page_node_ref(m_page);
\r
253 list_add_tail(&page_node->list, &cow->pages);
\r
258 mutex_unlock(&target_bk->mutex);
\r
260 return _MALI_OSK_ERR_OK;
\r
262 mali_mem_swap_release(backend, MALI_FALSE);
\r
263 return _MALI_OSK_ERR_FAULT;
\r
268 _mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
\r
270 if (node->type == MALI_PAGE_NODE_OS) {
\r
271 return mali_mem_os_put_page(node->page);
\r
272 } else if (node->type == MALI_PAGE_NODE_BLOCK) {
\r
273 return mali_mem_block_unref_node(node);
\r
274 } else if (node->type == MALI_PAGE_NODE_SWAP) {
\r
275 return _mali_mem_swap_put_page_node(node);
\r
277 MALI_DEBUG_ASSERT(0);
\r
278 return _MALI_OSK_ERR_FAULT;
\r
283 * Modify a range of a exist COW backend
\r
284 * @backend -COW backend
\r
285 * @range_start - offset of modified range (4K align)
\r
286 * @range_size - size of modified range(in byte)
\r
288 _mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
\r
292 mali_mem_allocation *alloc = NULL;
\r
293 struct mali_session_data *session;
\r
294 mali_mem_cow *cow = &backend->cow_mem;
\r
295 struct mali_page_node *m_page, *m_tmp;
\r
297 struct page *new_page;
\r
299 s32 change_pages_nr = 0;
\r
300 _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
\r
302 if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
303 if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
305 alloc = backend->mali_allocation;
\r
306 MALI_DEBUG_ASSERT_POINTER(alloc);
\r
308 session = alloc->session;
\r
309 MALI_DEBUG_ASSERT_POINTER(session);
\r
311 MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
\r
312 MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
\r
314 mutex_lock(&backend->mutex);
\r
317 list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
\r
319 /* check if in the modified range*/
\r
320 if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
\r
321 (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
\r
322 if (MALI_PAGE_NODE_SWAP != m_page->type) {
\r
323 new_page = mali_mem_cow_alloc_page();
\r
325 if (NULL == new_page) {
\r
328 if (1 != _mali_page_node_get_ref_count(m_page))
\r
330 /* unref old page*/
\r
331 _mali_osk_mutex_wait(session->cow_lock);
\r
332 if (_mali_mem_put_page_node(m_page)) {
\r
333 __free_page(new_page);
\r
334 _mali_osk_mutex_signal(session->cow_lock);
\r
337 _mali_osk_mutex_signal(session->cow_lock);
\r
339 /* always use OS for COW*/
\r
340 m_page->type = MALI_PAGE_NODE_OS;
\r
341 _mali_page_node_add_page(m_page, new_page);
\r
343 struct mali_swap_item *swap_item;
\r
345 swap_item = mali_mem_swap_alloc_swap_item();
\r
347 if (NULL == swap_item) {
\r
351 swap_item->idx = mali_mem_swap_idx_alloc();
\r
353 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
\r
354 MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));
\r
359 if (1 != _mali_page_node_get_ref_count(m_page)) {
\r
363 if (_mali_mem_put_page_node(m_page)) {
\r
364 mali_mem_swap_free_swap_item(swap_item);
\r
368 _mali_page_node_add_swap_item(m_page, swap_item);
\r
373 cow->change_pages_nr = change_pages_nr;
\r
375 MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
\r
377 /* ZAP cpu mapping(modified range), and do cpu mapping here if need */
\r
378 if (NULL != alloc->cpu_mapping.vma) {
\r
379 MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
\r
380 MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
\r
381 MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
\r
383 if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
\r
384 zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
\r
386 ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);
\r
388 if (unlikely(ret != _MALI_OSK_ERR_OK)) {
\r
389 MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));
\r
390 ret = _MALI_OSK_ERR_FAULT;
\r
393 /* used to trigger page fault for swappable cowed memory. */
\r
394 alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;
\r
395 alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;
\r
397 zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
\r
398 /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */
\r
399 alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;
\r
400 alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;
\r
405 mutex_unlock(&backend->mutex);
\r
412 * Allocate pages for COW backend
\r
413 * @alloc -allocation for COW allocation
\r
414 * @target_bk - target allocation's backend(the allocation need to do COW)
\r
415 * @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
\r
416 * @target_size - size of target allocation to do COW (for support memory bank)(in byte)
\r
417 * @backend -COW backend
\r
418 * @range_start - offset of modified range (4K align)
\r
419 * @range_size - size of modified range(in byte)
\r
421 _mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
\r
424 mali_mem_backend *backend,
\r
428 struct mali_session_data *session = backend->mali_allocation->session;
\r
430 MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
\r
432 /* size & offset must be a multiple of the system page size */
\r
433 if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
434 if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
435 if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
436 if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
\r
438 /* check backend type */
\r
439 MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
\r
441 switch (target_bk->type) {
\r
443 case MALI_MEM_BLOCK:
\r
444 return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
\r
447 if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
\r
448 return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
\r
450 return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
\r
453 case MALI_MEM_SWAP:
\r
454 return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
\r
456 case MALI_MEM_EXTERNAL:
\r
457 /*NOT support yet*/
\r
458 MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));
\r
459 return _MALI_OSK_ERR_UNSUPPORTED;
\r
461 case MALI_MEM_DMA_BUF:
\r
462 /*NOT support yet*/
\r
463 MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));
\r
464 return _MALI_OSK_ERR_UNSUPPORTED;
\r
467 /*NOT support yet*/
\r
468 MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));
\r
469 return _MALI_OSK_ERR_UNSUPPORTED;
\r
472 /*Not support yet*/
\r
473 MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));
\r
474 return _MALI_OSK_ERR_UNSUPPORTED;
\r
477 return _MALI_OSK_ERR_OK;
\r
482 * Map COW backend memory to mali
\r
483 * Support OS/BLOCK for mali_page_node
\r
485 int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
\r
487 mali_mem_allocation *cow_alloc;
\r
488 struct mali_page_node *m_page;
\r
489 struct mali_session_data *session;
\r
490 struct mali_page_directory *pagedir;
\r
493 cow_alloc = mem_bkend->mali_allocation;
\r
494 virt = cow_alloc->mali_vma_node.vm_node.start;
\r
497 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
\r
498 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
\r
499 MALI_DEBUG_ASSERT_POINTER(cow_alloc);
\r
501 session = cow_alloc->session;
\r
502 pagedir = session->page_directory;
\r
503 MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
\r
504 list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
\r
505 if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
\r
506 dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);
\r
507 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
\r
508 MALI_DEBUG_ASSERT(0 == (phys >> 32));
\r
510 mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
\r
511 MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
\r
513 virt += MALI_MMU_PAGE_SIZE;
\r
519 * Map COW backend to cpu
\r
520 * support OS/BLOCK memory
\r
522 int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
\r
524 mali_mem_cow *cow = &mem_bkend->cow_mem;
\r
525 struct mali_page_node *m_page;
\r
527 unsigned long addr = vma->vm_start;
\r
528 MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
\r
530 list_for_each_entry(m_page, &cow->pages, list) {
\r
531 /* We should use vm_insert_page, but it does a dcache
\r
532 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
\r
533 ret = vm_insert_page(vma, addr, page);
\r
535 ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
\r
537 if (unlikely(0 != ret)) {
\r
540 addr += _MALI_OSK_MALI_PAGE_SIZE;
\r
547 * Map some pages(COW backend) to CPU vma@vaddr
\r
548 *@ mem_bkend - COW backend
\r
550 *@ vaddr -start CPU vaddr mapped to
\r
551 *@ num - max number of pages to map to CPU vaddr
\r
553 _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
\r
554 struct vm_area_struct *vma,
\r
555 unsigned long vaddr,
\r
558 mali_mem_cow *cow = &mem_bkend->cow_mem;
\r
559 struct mali_page_node *m_page;
\r
563 unsigned long vstart = vma->vm_start;
\r
565 MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
\r
566 MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
\r
567 MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
\r
568 offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
\r
570 list_for_each_entry(m_page, &cow->pages, list) {
\r
571 if ((count >= offset) && (count < offset + num)) {
\r
572 ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
\r
574 if (unlikely(0 != ret)) {
\r
575 if (count == offset) {
\r
576 return _MALI_OSK_ERR_FAULT;
\r
578 /* ret is EBUSY when page isn't in modify range, but now it's OK*/
\r
579 return _MALI_OSK_ERR_OK;
\r
582 vaddr += _MALI_OSK_MALI_PAGE_SIZE;
\r
586 return _MALI_OSK_ERR_OK;
\r
590 * Release COW backend memory
\r
591 * free it directly(put_page--unref page), not put into pool
\r
593 u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
\r
595 mali_mem_allocation *alloc;
\r
596 struct mali_session_data *session;
\r
597 u32 free_pages_nr = 0;
\r
598 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
\r
599 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
\r
600 alloc = mem_bkend->mali_allocation;
\r
601 MALI_DEBUG_ASSERT_POINTER(alloc);
\r
603 session = alloc->session;
\r
604 MALI_DEBUG_ASSERT_POINTER(session);
\r
606 if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {
\r
607 /* Unmap the memory from the mali virtual address space. */
\r
608 if (MALI_TRUE == is_mali_mapped)
\r
609 mali_mem_os_mali_unmap(alloc);
\r
610 /* free cow backend list*/
\r
611 _mali_osk_mutex_wait(session->cow_lock);
\r
612 free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
\r
613 _mali_osk_mutex_signal(session->cow_lock);
\r
615 free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
\r
617 MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
\r
619 free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);
\r
623 MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
\r
624 free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
\r
626 mem_bkend->cow_mem.count = 0;
\r
627 return free_pages_nr;
\r
631 /* Dst node could os node or swap node. */
\r
632 void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
\r
635 struct page *dst_page;
\r
636 dma_addr_t dma_addr;
\r
638 MALI_DEBUG_ASSERT(src_node != NULL);
\r
639 MALI_DEBUG_ASSERT(dst_node != NULL);
\r
640 MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS
\r
641 || dst_node->type == MALI_PAGE_NODE_SWAP);
\r
643 if (dst_node->type == MALI_PAGE_NODE_OS) {
\r
644 dst_page = dst_node->page;
\r
646 dst_page = dst_node->swap_it->page;
\r
649 dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),
\r
650 _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
\r
652 /* map it , and copy the content*/
\r
653 dst = kmap_atomic(dst_page);
\r
655 if (src_node->type == MALI_PAGE_NODE_OS ||
\r
656 src_node->type == MALI_PAGE_NODE_SWAP) {
\r
657 struct page *src_page;
\r
659 if (src_node->type == MALI_PAGE_NODE_OS) {
\r
660 src_page = src_node->page;
\r
662 src_page = src_node->swap_it->page;
\r
665 /* Clear and invaliate cache */
\r
666 /* In ARM architecture, speculative read may pull stale data into L1 cache
\r
667 * for kernel linear mapping page table. DMA_BIDIRECTIONAL could
\r
668 * invalidate the L1 cache so that following read get the latest data
\r
670 dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),
\r
671 _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
\r
673 src = kmap_atomic(src_page);
\r
674 memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
\r
675 kunmap_atomic(src);
\r
676 dma_addr = dma_map_page(&mali_platform_device->dev, src_page,
\r
677 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
\r
679 if (src_node->type == MALI_PAGE_NODE_SWAP) {
\r
680 src_node->swap_it->dma_addr = dma_addr;
\r
682 } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
\r
684 * use ioremap to map src for BLOCK memory
\r
686 src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
\r
687 memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
\r
690 kunmap_atomic(dst);
\r
691 dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,
\r
692 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
\r
694 if (dst_node->type == MALI_PAGE_NODE_SWAP) {
\r
695 dst_node->swap_it->dma_addr = dma_addr;
\r
701 * allocate page on demand when CPU access it,
\r
702 * THis used in page fault handler
\r
704 _mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
\r
706 struct page *new_page = NULL;
\r
707 struct mali_page_node *new_node = NULL;
\r
709 struct mali_page_node *m_page, *found_node = NULL;
\r
710 struct mali_session_data *session = NULL;
\r
711 mali_mem_cow *cow = &mem_bkend->cow_mem;
\r
712 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
\r
713 MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
\r
714 MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
\r
716 /* allocate new page here */
\r
717 new_page = mali_mem_cow_alloc_page();
\r
719 return _MALI_OSK_ERR_NOMEM;
\r
721 new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
\r
723 __free_page(new_page);
\r
724 return _MALI_OSK_ERR_NOMEM;
\r
727 /* find the page in backend*/
\r
728 list_for_each_entry(m_page, &cow->pages, list) {
\r
729 if (i == offset_page) {
\r
730 found_node = m_page;
\r
735 MALI_DEBUG_ASSERT(found_node);
\r
736 if (NULL == found_node) {
\r
737 __free_page(new_page);
\r
739 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
\r
742 _mali_page_node_add_page(new_node, new_page);
\r
744 /* Copy the src page's content to new page */
\r
745 _mali_mem_cow_copy_page(found_node, new_node);
\r
747 MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
\r
748 session = mem_bkend->mali_allocation->session;
\r
749 MALI_DEBUG_ASSERT_POINTER(session);
\r
750 if (1 != _mali_page_node_get_ref_count(found_node)) {
\r
751 atomic_add(1, &session->mali_mem_allocated_pages);
\r
752 if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
\r
753 session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
\r
755 mem_bkend->cow_mem.change_pages_nr++;
\r
758 _mali_osk_mutex_wait(session->cow_lock);
\r
759 if (_mali_mem_put_page_node(found_node)) {
\r
760 __free_page(new_page);
\r
762 _mali_osk_mutex_signal(session->cow_lock);
\r
763 return _MALI_OSK_ERR_NOMEM;
\r
765 _mali_osk_mutex_signal(session->cow_lock);
\r
767 list_replace(&found_node->list, &new_node->list);
\r
771 /* map to GPU side*/
\r
772 _mali_osk_mutex_wait(session->memory_lock);
\r
773 mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
\r
774 _mali_osk_mutex_signal(session->memory_lock);
\r
775 return _MALI_OSK_ERR_OK;
\r