2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
30 /* Write combine dma_attrs */
31 static DEFINE_DMA_ATTRS(dma_attrs_wc);
34 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
36 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
38 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
41 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
42 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
44 static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
45 static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
48 static void mali_mem_os_trim_pool(struct work_struct *work);
50 struct mali_mem_os_allocator mali_mem_os_allocator = {
51 .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
52 .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
55 .allocated_pages = ATOMIC_INIT(0),
56 .allocation_limit = 0,
58 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
59 .shrinker.shrink = mali_mem_os_shrink,
61 .shrinker.count_objects = mali_mem_os_shrink_count,
62 .shrinker.scan_objects = mali_mem_os_shrink,
64 .shrinker.seeks = DEFAULT_SEEKS,
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
66 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
67 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
68 .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
70 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
74 u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
77 struct mali_page_node *m_page, *m_tmp;
78 u32 free_pages_nr = 0;
80 if (MALI_TRUE == cow_flag) {
81 list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
82 /*only handle OS node here */
83 if (m_page->type == MALI_PAGE_NODE_OS) {
84 if (1 == _mali_page_node_get_ref_count(m_page)) {
85 list_move(&m_page->list, &pages);
86 atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
89 _mali_page_node_unref(m_page);
91 list_del(&m_page->list);
97 list_cut_position(&pages, os_pages, os_pages->prev);
98 atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
99 free_pages_nr = pages_count;
102 /* Put pages on pool. */
103 spin_lock(&mali_mem_os_allocator.pool_lock);
104 list_splice(&pages, &mali_mem_os_allocator.pool_pages);
105 mali_mem_os_allocator.pool_count += free_pages_nr;
106 spin_unlock(&mali_mem_os_allocator.pool_lock);
108 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
109 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
110 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
112 return free_pages_nr;
116 * put page without put it into page pool
118 _mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
120 MALI_DEBUG_ASSERT_POINTER(page);
121 if (1 == page_count(page)) {
122 atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
123 dma_unmap_page(&mali_platform_device->dev, page_private(page),
124 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
125 ClearPagePrivate(page);
128 return _MALI_OSK_ERR_OK;
131 _mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
133 struct mali_page_node *m_page, *m_tmp;
136 MALI_DEBUG_ASSERT_POINTER(mem_from);
137 MALI_DEBUG_ASSERT_POINTER(mem_to);
139 if (mem_from->count < start_page + page_count) {
140 return _MALI_OSK_ERR_INVALID_ARGS;
143 list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
144 if (i >= start_page && i < start_page + page_count) {
145 list_move_tail(&m_page->list, &mem_to->pages);
152 return _MALI_OSK_ERR_OK;
156 int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
158 struct page *new_page;
159 LIST_HEAD(pages_list);
160 size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
161 size_t remaining = page_count;
162 struct mali_page_node *m_page, *m_tmp;
165 MALI_DEBUG_ASSERT_POINTER(os_mem);
167 if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
168 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
170 atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
171 mali_mem_os_allocator.allocation_limit));
175 INIT_LIST_HEAD(&os_mem->pages);
176 os_mem->count = page_count;
178 /* Grab pages from pool. */
181 spin_lock(&mali_mem_os_allocator.pool_lock);
182 pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
183 for (i = pool_pages; i > 0; i--) {
184 BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
185 list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
187 mali_mem_os_allocator.pool_count -= pool_pages;
188 remaining -= pool_pages;
189 spin_unlock(&mali_mem_os_allocator.pool_lock);
192 /* Process pages from pool. */
194 list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
195 BUG_ON(NULL == m_page);
197 list_move_tail(&m_page->list, &os_mem->pages);
200 /* Allocate new pages, if needed. */
201 for (i = 0; i < remaining; i++) {
203 gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD;
206 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
207 flags |= GFP_HIGHUSER;
209 #ifdef CONFIG_ZONE_DMA32
212 #ifdef CONFIG_ZONE_DMA
215 /* arm64 utgard only work on < 4G, but the kernel
216 * didn't provide method to allocte memory < 4G
218 MALI_DEBUG_ASSERT(0);
223 new_page = alloc_page(flags);
225 if (unlikely(NULL == new_page)) {
226 /* Calculate the number of pages actually allocated, and free them. */
227 os_mem->count = (page_count - remaining) + i;
228 atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
229 mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
233 /* Ensure page is flushed from CPU caches. */
234 dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
235 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
237 err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
239 MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
241 __free_page(new_page);
242 os_mem->count = (page_count - remaining) + i;
243 atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
244 mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
248 /* Store page phys addr */
249 SetPagePrivate(new_page);
250 set_page_private(new_page, dma_addr);
252 m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
253 if (unlikely(NULL == m_page)) {
254 MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
255 dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
256 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
257 ClearPagePrivate(new_page);
258 __free_page(new_page);
259 os_mem->count = (page_count - remaining) + i;
260 atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
261 mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
264 m_page->page = new_page;
266 list_add_tail(&m_page->list, &os_mem->pages);
269 atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
271 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
272 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
273 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
280 _mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
282 struct mali_page_directory *pagedir = session->page_directory;
283 struct mali_page_node *m_page;
287 MALI_DEBUG_ASSERT_POINTER(session);
288 MALI_DEBUG_ASSERT_POINTER(os_mem);
290 MALI_DEBUG_ASSERT(start_page <= os_mem->count);
291 MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
293 if ((start_page + mapping_pgae_num) == os_mem->count) {
295 virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
297 list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
299 virt -= MALI_MMU_PAGE_SIZE;
300 if (mapping_pgae_num > 0) {
301 dma_addr_t phys = page_private(m_page->page);
302 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
303 /* Verify that the "physical" address is 32-bit and
304 * usable for Mali, when on a system with bus addresses
305 * wider than 32-bit. */
306 MALI_DEBUG_ASSERT(0 == (phys >> 32));
308 mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
318 list_for_each_entry(m_page, &os_mem->pages, list) {
320 if (i >= start_page) {
321 dma_addr_t phys = page_private(m_page->page);
323 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
324 /* Verify that the "physical" address is 32-bit and
325 * usable for Mali, when on a system with bus addresses
326 * wider than 32-bit. */
327 MALI_DEBUG_ASSERT(0 == (phys >> 32));
329 mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
332 virt += MALI_MMU_PAGE_SIZE;
335 return _MALI_OSK_ERR_OK;
339 void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
341 struct mali_session_data *session;
342 MALI_DEBUG_ASSERT_POINTER(alloc);
343 session = alloc->session;
344 MALI_DEBUG_ASSERT_POINTER(session);
346 mali_session_memory_lock(session);
347 mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
349 mali_session_memory_unlock(session);
352 int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
354 mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
355 struct mali_page_node *m_page;
358 unsigned long addr = vma->vm_start;
359 MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
361 list_for_each_entry(m_page, &os_mem->pages, list) {
362 /* We should use vm_insert_page, but it does a dcache
363 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
364 ret = vm_insert_page(vma, addr, page);
367 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
369 if (unlikely(0 != ret)) {
372 addr += _MALI_OSK_MALI_PAGE_SIZE;
378 _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
380 mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
381 struct mali_page_node *m_page;
384 int mapping_page_num;
387 unsigned long vstart = vma->vm_start;
389 MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
390 MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
391 MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
392 offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
393 MALI_DEBUG_ASSERT(offset <= os_mem->count);
394 mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
395 MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
397 if ((offset + mapping_page_num) == os_mem->count) {
399 unsigned long vm_end = start_vaddr + mappig_size;
401 list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
403 vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
404 if (mapping_page_num > 0) {
405 ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
407 if (unlikely(0 != ret)) {
408 /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
412 MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
413 ret, offset + mapping_page_num, os_mem->count));
415 return _MALI_OSK_ERR_FAULT;
425 list_for_each_entry(m_page, &os_mem->pages, list) {
426 if (count >= offset) {
428 ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
430 if (unlikely(0 != ret)) {
431 /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
435 MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
436 ret, count, offset, os_mem->count));
438 return _MALI_OSK_ERR_FAULT;
442 vstart += _MALI_OSK_MALI_PAGE_SIZE;
445 return _MALI_OSK_ERR_OK;
448 u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
451 mali_mem_allocation *alloc;
452 struct mali_session_data *session;
453 u32 free_pages_nr = 0;
454 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
455 MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
457 alloc = mem_bkend->mali_allocation;
458 MALI_DEBUG_ASSERT_POINTER(alloc);
460 session = alloc->session;
461 MALI_DEBUG_ASSERT_POINTER(session);
463 /* Unmap the memory from the mali virtual address space. */
464 mali_mem_os_mali_unmap(alloc);
465 mutex_lock(&mem_bkend->mutex);
467 if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
468 /* Lock to avoid the free race condition for the cow shared memory page node. */
469 _mali_osk_mutex_wait(session->cow_lock);
470 free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
471 _mali_osk_mutex_signal(session->cow_lock);
473 free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
475 mutex_unlock(&mem_bkend->mutex);
477 MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
478 free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
480 mem_bkend->os_mem.count = 0;
481 return free_pages_nr;
485 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
489 mali_io_address mapping;
490 } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
493 } mali_mem_page_table_page_pool = {
495 .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
498 _mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
500 _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
503 spin_lock(&mali_mem_page_table_page_pool.lock);
504 if (0 < mali_mem_page_table_page_pool.count) {
505 u32 i = --mali_mem_page_table_page_pool.count;
506 *phys = mali_mem_page_table_page_pool.page[i].phys;
507 *mapping = mali_mem_page_table_page_pool.page[i].mapping;
509 ret = _MALI_OSK_ERR_OK;
511 spin_unlock(&mali_mem_page_table_page_pool.lock);
513 if (_MALI_OSK_ERR_OK != ret) {
514 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
515 *mapping = dma_alloc_attrs(&mali_platform_device->dev,
516 _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
517 GFP_KERNEL, &dma_attrs_wc);
519 *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
520 _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
522 if (NULL != *mapping) {
523 ret = _MALI_OSK_ERR_OK;
525 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
526 /* Verify that the "physical" address is 32-bit and
527 * usable for Mali, when on a system with bus addresses
528 * wider than 32-bit. */
529 MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
532 *phys = (mali_dma_addr)tmp_phys;
539 void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
541 spin_lock(&mali_mem_page_table_page_pool.lock);
542 if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
543 u32 i = mali_mem_page_table_page_pool.count;
544 mali_mem_page_table_page_pool.page[i].phys = phys;
545 mali_mem_page_table_page_pool.page[i].mapping = virt;
547 ++mali_mem_page_table_page_pool.count;
549 spin_unlock(&mali_mem_page_table_page_pool.lock);
551 spin_unlock(&mali_mem_page_table_page_pool.lock);
553 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
554 dma_free_attrs(&mali_platform_device->dev,
555 _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
558 dma_free_writecombine(&mali_platform_device->dev,
559 _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
564 void mali_mem_os_free_page_node(struct mali_page_node *m_page)
566 struct page *page = m_page->page;
567 MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
569 if (1 == page_count(page)) {
570 dma_unmap_page(&mali_platform_device->dev, page_private(page),
571 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
572 ClearPagePrivate(page);
576 list_del(&m_page->list);
580 /* The maximum number of page table pool pages to free in one go. */
581 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
583 /* Free a certain number of pages from the page table page pool.
584 * The pool lock must be held when calling the function, and the lock will be
585 * released before returning.
587 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
589 mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
590 void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
593 MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
595 /* Remove nr_to_free pages from the pool and store them locally on stack. */
596 for (i = 0; i < nr_to_free; i++) {
597 u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
599 phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
600 virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
603 mali_mem_page_table_page_pool.count -= nr_to_free;
605 spin_unlock(&mali_mem_page_table_page_pool.lock);
607 /* After releasing the spinlock: free the pages we removed from the pool. */
608 for (i = 0; i < nr_to_free; i++) {
609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
610 dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
611 virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
613 dma_free_writecombine(&mali_platform_device->dev,
614 _MALI_OSK_MALI_PAGE_SIZE,
615 virt_arr[i], (dma_addr_t)phys_arr[i]);
620 static void mali_mem_os_trim_page_table_page_pool(void)
622 size_t nr_to_free = 0;
625 /* Keep 2 page table pages for each 1024 pages in the page cache. */
626 nr_to_keep = mali_mem_os_allocator.pool_count / 512;
627 /* And a minimum of eight pages, to accomodate new sessions. */
630 if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
632 if (nr_to_keep < mali_mem_page_table_page_pool.count) {
633 nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
634 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
637 /* Pool lock will be released by the callee. */
638 mali_mem_os_page_table_pool_free(nr_to_free);
641 static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
643 return mali_mem_os_allocator.pool_count;
646 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
647 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
648 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
650 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
651 #endif /* Linux < 2.6.35 */
653 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
654 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
656 static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
657 #endif /* Linux < 3.12.0 */
658 #endif /* Linux < 3.0.0 */
660 struct mali_page_node *m_page, *m_tmp;
662 struct list_head *le, pages;
663 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
666 int nr = sc->nr_to_scan;
670 return mali_mem_os_shrink_count(shrinker, sc);
673 if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
674 /* Not able to lock. */
678 if (0 == mali_mem_os_allocator.pool_count) {
679 /* No pages availble */
680 spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
684 /* Release from general page pool */
685 nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
686 mali_mem_os_allocator.pool_count -= nr;
687 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
691 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
692 spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
694 list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
695 mali_mem_os_free_page_node(m_page);
698 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
699 /* Pools are empty, stop timer */
700 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
701 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
704 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
705 return mali_mem_os_shrink_count(shrinker, sc);
711 static void mali_mem_os_trim_pool(struct work_struct *data)
713 struct mali_page_node *m_page, *m_tmp;
714 struct list_head *le;
720 MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
722 /* Release from general page pool */
723 spin_lock(&mali_mem_os_allocator.pool_lock);
724 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
725 size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
726 const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
728 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
729 nr_to_free = max(count / 2, min_to_free);
731 mali_mem_os_allocator.pool_count -= nr_to_free;
732 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
734 if (0 == nr_to_free) break;
736 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
738 spin_unlock(&mali_mem_os_allocator.pool_lock);
740 list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
741 mali_mem_os_free_page_node(m_page);
744 /* Release some pages from page table page pool */
745 mali_mem_os_trim_page_table_page_pool();
747 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
748 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
749 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
753 _mali_osk_errcode_t mali_mem_os_init(void)
755 mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
756 if (NULL == mali_mem_os_allocator.wq) {
757 return _MALI_OSK_ERR_NOMEM;
760 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
761 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
764 register_shrinker(&mali_mem_os_allocator.shrinker);
766 return _MALI_OSK_ERR_OK;
769 void mali_mem_os_term(void)
771 struct mali_page_node *m_page, *m_tmp;
772 unregister_shrinker(&mali_mem_os_allocator.shrinker);
773 cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
775 if (NULL != mali_mem_os_allocator.wq) {
776 destroy_workqueue(mali_mem_os_allocator.wq);
777 mali_mem_os_allocator.wq = NULL;
780 spin_lock(&mali_mem_os_allocator.pool_lock);
781 list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
782 mali_mem_os_free_page_node(m_page);
784 --mali_mem_os_allocator.pool_count;
786 BUG_ON(mali_mem_os_allocator.pool_count);
787 spin_unlock(&mali_mem_os_allocator.pool_lock);
789 /* Release from page table page pool */
793 spin_lock(&mali_mem_page_table_page_pool.lock);
795 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
797 /* Pool lock will be released by the callee. */
798 mali_mem_os_page_table_pool_free(nr_to_free);
799 } while (0 != mali_mem_page_table_page_pool.count);
802 _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
804 mali_mem_os_allocator.allocation_limit = size;
809 u32 mali_mem_os_stat(void)
811 return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;