90bc2f6d83f53872d65f5501d57c4322495d54c0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / mali / linux / mali_memory_os_alloc.c
1 /*
2  * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
19
20 #include "mali_osk.h"
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
24
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
28
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
30 /* Write combine dma_attrs */
31 static DEFINE_DMA_ATTRS(dma_attrs_wc);
32 #endif
33
34 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
36 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
37 #else
38 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
39 #endif
40 #else
41 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
42 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
43 #else
44 static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
45 static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
46 #endif
47 #endif
48 static void mali_mem_os_trim_pool(struct work_struct *work);
49
50 struct mali_mem_os_allocator mali_mem_os_allocator = {
51         .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
52         .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
53         .pool_count = 0,
54
55         .allocated_pages = ATOMIC_INIT(0),
56         .allocation_limit = 0,
57
58 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
59         .shrinker.shrink = mali_mem_os_shrink,
60 #else
61         .shrinker.count_objects = mali_mem_os_shrink_count,
62         .shrinker.scan_objects = mali_mem_os_shrink,
63 #endif
64         .shrinker.seeks = DEFAULT_SEEKS,
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
66         .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
67 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
68         .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
69 #else
70         .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
71 #endif
72 };
73
74 u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
75 {
76         LIST_HEAD(pages);
77         struct mali_page_node *m_page, *m_tmp;
78         u32 free_pages_nr = 0;
79
80         if (MALI_TRUE == cow_flag) {
81                 list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
82                         /*only handle OS node here */
83                         if (m_page->type == MALI_PAGE_NODE_OS) {
84                                 if (1 == _mali_page_node_get_ref_count(m_page)) {
85                                         list_move(&m_page->list, &pages);
86                                         atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
87                                         free_pages_nr ++;
88                                 } else {
89                                         _mali_page_node_unref(m_page);
90                                         m_page->page = NULL;
91                                         list_del(&m_page->list);
92                                         kfree(m_page);
93                                 }
94                         }
95                 }
96         } else {
97                 list_cut_position(&pages, os_pages, os_pages->prev);
98                 atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
99                 free_pages_nr = pages_count;
100         }
101
102         /* Put pages on pool. */
103         spin_lock(&mali_mem_os_allocator.pool_lock);
104         list_splice(&pages, &mali_mem_os_allocator.pool_pages);
105         mali_mem_os_allocator.pool_count += free_pages_nr;
106         spin_unlock(&mali_mem_os_allocator.pool_lock);
107
108         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
109                 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
110                 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
111         }
112         return free_pages_nr;
113 }
114
115 /**
116 * put page without put it into page pool
117 */
118 _mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
119 {
120         MALI_DEBUG_ASSERT_POINTER(page);
121         if (1 == page_count(page)) {
122                 atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
123                 dma_unmap_page(&mali_platform_device->dev, page_private(page),
124                                _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
125                 ClearPagePrivate(page);
126         }
127         put_page(page);
128         return _MALI_OSK_ERR_OK;
129 }
130
131 _mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
132 {
133         struct mali_page_node *m_page, *m_tmp;
134         u32 i = 0;
135
136         MALI_DEBUG_ASSERT_POINTER(mem_from);
137         MALI_DEBUG_ASSERT_POINTER(mem_to);
138
139         if (mem_from->count < start_page + page_count) {
140                 return _MALI_OSK_ERR_INVALID_ARGS;
141         }
142
143         list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
144                 if (i >= start_page && i < start_page + page_count) {
145                         list_move_tail(&m_page->list, &mem_to->pages);
146                         mem_from->count--;
147                         mem_to->count++;
148                 }
149                 i++;
150         }
151
152         return _MALI_OSK_ERR_OK;
153 }
154
155
156 int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
157 {
158         struct page *new_page;
159         LIST_HEAD(pages_list);
160         size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
161         size_t remaining = page_count;
162         struct mali_page_node *m_page, *m_tmp;
163         u32 i;
164
165         MALI_DEBUG_ASSERT_POINTER(os_mem);
166
167         if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
168                 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
169                                      size,
170                                      atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
171                                      mali_mem_os_allocator.allocation_limit));
172                 return -ENOMEM;
173         }
174
175         INIT_LIST_HEAD(&os_mem->pages);
176         os_mem->count = page_count;
177
178         /* Grab pages from pool. */
179         {
180                 size_t pool_pages;
181                 spin_lock(&mali_mem_os_allocator.pool_lock);
182                 pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
183                 for (i = pool_pages; i > 0; i--) {
184                         BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
185                         list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
186                 }
187                 mali_mem_os_allocator.pool_count -= pool_pages;
188                 remaining -= pool_pages;
189                 spin_unlock(&mali_mem_os_allocator.pool_lock);
190         }
191
192         /* Process pages from pool. */
193         i = 0;
194         list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
195                 BUG_ON(NULL == m_page);
196
197                 list_move_tail(&m_page->list, &os_mem->pages);
198         }
199
200         /* Allocate new pages, if needed. */
201         for (i = 0; i < remaining; i++) {
202                 dma_addr_t dma_addr;
203                 gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD;
204                 int err;
205
206 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
207                 flags |= GFP_HIGHUSER;
208 #else
209 #ifdef CONFIG_ZONE_DMA32
210                 flags |= GFP_DMA32;
211 #else
212 #ifdef CONFIG_ZONE_DMA
213                 flags |= GFP_DMA;
214 #else
215                 /* arm64 utgard only work on < 4G, but the kernel
216                  * didn't provide method to allocte memory < 4G
217                  */
218                 MALI_DEBUG_ASSERT(0);
219 #endif
220 #endif
221 #endif
222
223                 new_page = alloc_page(flags);
224
225                 if (unlikely(NULL == new_page)) {
226                         /* Calculate the number of pages actually allocated, and free them. */
227                         os_mem->count = (page_count - remaining) + i;
228                         atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
229                         mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
230                         return -ENOMEM;
231                 }
232
233                 /* Ensure page is flushed from CPU caches. */
234                 dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
235                                         0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
236
237                 err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
238                 if (unlikely(err)) {
239                         MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
240                                                 new_page, err));
241                         __free_page(new_page);
242                         os_mem->count = (page_count - remaining) + i;
243                         atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
244                         mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
245                         return -EFAULT;
246                 }
247
248                 /* Store page phys addr */
249                 SetPagePrivate(new_page);
250                 set_page_private(new_page, dma_addr);
251
252                 m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
253                 if (unlikely(NULL == m_page)) {
254                         MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
255                         dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
256                                        _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
257                         ClearPagePrivate(new_page);
258                         __free_page(new_page);
259                         os_mem->count = (page_count - remaining) + i;
260                         atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
261                         mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
262                         return -EFAULT;
263                 }
264                 m_page->page = new_page;
265
266                 list_add_tail(&m_page->list, &os_mem->pages);
267         }
268
269         atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
270
271         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
272                 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
273                 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
274         }
275
276         return 0;
277 }
278
279
280 _mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
281 {
282         struct mali_page_directory *pagedir = session->page_directory;
283         struct mali_page_node *m_page;
284         u32 virt;
285         u32 prop = props;
286
287         MALI_DEBUG_ASSERT_POINTER(session);
288         MALI_DEBUG_ASSERT_POINTER(os_mem);
289
290         MALI_DEBUG_ASSERT(start_page <= os_mem->count);
291         MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
292
293         if ((start_page + mapping_pgae_num) == os_mem->count) {
294
295                 virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
296
297                 list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
298
299                         virt -= MALI_MMU_PAGE_SIZE;
300                         if (mapping_pgae_num > 0) {
301                                 dma_addr_t phys = page_private(m_page->page);
302 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
303                                 /* Verify that the "physical" address is 32-bit and
304                                 * usable for Mali, when on a system with bus addresses
305                                 * wider than 32-bit. */
306                                 MALI_DEBUG_ASSERT(0 == (phys >> 32));
307 #endif
308                                 mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
309                         } else {
310                                 break;
311                         }
312                         mapping_pgae_num--;
313                 }
314
315         } else {
316                 u32 i = 0;
317                 virt = vaddr;
318                 list_for_each_entry(m_page, &os_mem->pages, list) {
319
320                         if (i >= start_page) {
321                                 dma_addr_t phys = page_private(m_page->page);
322
323 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
324                                 /* Verify that the "physical" address is 32-bit and
325                                 * usable for Mali, when on a system with bus addresses
326                                 * wider than 32-bit. */
327                                 MALI_DEBUG_ASSERT(0 == (phys >> 32));
328 #endif
329                                 mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
330                         }
331                         i++;
332                         virt += MALI_MMU_PAGE_SIZE;
333                 }
334         }
335         return _MALI_OSK_ERR_OK;
336 }
337
338
339 void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
340 {
341         struct mali_session_data *session;
342         MALI_DEBUG_ASSERT_POINTER(alloc);
343         session = alloc->session;
344         MALI_DEBUG_ASSERT_POINTER(session);
345
346         mali_session_memory_lock(session);
347         mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
348                                alloc->flags);
349         mali_session_memory_unlock(session);
350 }
351
352 int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
353 {
354         mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
355         struct mali_page_node *m_page;
356         struct page *page;
357         int ret;
358         unsigned long addr = vma->vm_start;
359         MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
360
361         list_for_each_entry(m_page, &os_mem->pages, list) {
362                 /* We should use vm_insert_page, but it does a dcache
363                  * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
364                 ret = vm_insert_page(vma, addr, page);
365                 */
366                 page = m_page->page;
367                 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
368
369                 if (unlikely(0 != ret)) {
370                         return -EFAULT;
371                 }
372                 addr += _MALI_OSK_MALI_PAGE_SIZE;
373         }
374
375         return 0;
376 }
377
378 _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
379 {
380         mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
381         struct mali_page_node *m_page;
382         int ret;
383         int offset;
384         int mapping_page_num;
385         int count ;
386
387         unsigned long vstart = vma->vm_start;
388         count = 0;
389         MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
390         MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
391         MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
392         offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
393         MALI_DEBUG_ASSERT(offset <= os_mem->count);
394         mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
395         MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
396
397         if ((offset + mapping_page_num) == os_mem->count) {
398
399                 unsigned long vm_end = start_vaddr + mappig_size;
400
401                 list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
402
403                         vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
404                         if (mapping_page_num > 0) {
405                                 ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
406
407                                 if (unlikely(0 != ret)) {
408                                         /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
409                                         if (-EBUSY == ret) {
410                                                 break;
411                                         } else {
412                                                 MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
413                                                                      ret,  offset + mapping_page_num, os_mem->count));
414                                         }
415                                         return _MALI_OSK_ERR_FAULT;
416                                 }
417                         } else {
418                                 break;
419                         }
420                         mapping_page_num--;
421
422                 }
423         } else {
424
425                 list_for_each_entry(m_page, &os_mem->pages, list) {
426                         if (count >= offset) {
427
428                                 ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
429
430                                 if (unlikely(0 != ret)) {
431                                         /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
432                                         if (-EBUSY == ret) {
433                                                 break;
434                                         } else {
435                                                 MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
436                                                                      ret, count, offset, os_mem->count));
437                                         }
438                                         return _MALI_OSK_ERR_FAULT;
439                                 }
440                         }
441                         count++;
442                         vstart += _MALI_OSK_MALI_PAGE_SIZE;
443                 }
444         }
445         return _MALI_OSK_ERR_OK;
446 }
447
448 u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
449 {
450
451         mali_mem_allocation *alloc;
452         struct mali_session_data *session;
453         u32 free_pages_nr = 0;
454         MALI_DEBUG_ASSERT_POINTER(mem_bkend);
455         MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
456
457         alloc = mem_bkend->mali_allocation;
458         MALI_DEBUG_ASSERT_POINTER(alloc);
459
460         session = alloc->session;
461         MALI_DEBUG_ASSERT_POINTER(session);
462
463         /* Unmap the memory from the mali virtual address space. */
464         mali_mem_os_mali_unmap(alloc);
465         mutex_lock(&mem_bkend->mutex);
466         /* Free pages */
467         if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
468                 /* Lock to avoid the free race condition for the cow shared memory page node. */
469                 _mali_osk_mutex_wait(session->cow_lock);
470                 free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
471                 _mali_osk_mutex_signal(session->cow_lock);
472         } else {
473                 free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
474         }
475         mutex_unlock(&mem_bkend->mutex);
476
477         MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
478                              free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
479
480         mem_bkend->os_mem.count = 0;
481         return free_pages_nr;
482 }
483
484
485 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
486 static struct {
487         struct {
488                 mali_dma_addr phys;
489                 mali_io_address mapping;
490         } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
491         size_t count;
492         spinlock_t lock;
493 } mali_mem_page_table_page_pool = {
494         .count = 0,
495         .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
496 };
497
498 _mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
499 {
500         _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
501         dma_addr_t tmp_phys;
502
503         spin_lock(&mali_mem_page_table_page_pool.lock);
504         if (0 < mali_mem_page_table_page_pool.count) {
505                 u32 i = --mali_mem_page_table_page_pool.count;
506                 *phys = mali_mem_page_table_page_pool.page[i].phys;
507                 *mapping = mali_mem_page_table_page_pool.page[i].mapping;
508
509                 ret = _MALI_OSK_ERR_OK;
510         }
511         spin_unlock(&mali_mem_page_table_page_pool.lock);
512
513         if (_MALI_OSK_ERR_OK != ret) {
514 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
515                 *mapping = dma_alloc_attrs(&mali_platform_device->dev,
516                                            _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
517                                            GFP_KERNEL, &dma_attrs_wc);
518 #else
519                 *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
520                                                   _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
521 #endif
522                 if (NULL != *mapping) {
523                         ret = _MALI_OSK_ERR_OK;
524
525 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
526                         /* Verify that the "physical" address is 32-bit and
527                          * usable for Mali, when on a system with bus addresses
528                          * wider than 32-bit. */
529                         MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
530 #endif
531
532                         *phys = (mali_dma_addr)tmp_phys;
533                 }
534         }
535
536         return ret;
537 }
538
539 void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
540 {
541         spin_lock(&mali_mem_page_table_page_pool.lock);
542         if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
543                 u32 i = mali_mem_page_table_page_pool.count;
544                 mali_mem_page_table_page_pool.page[i].phys = phys;
545                 mali_mem_page_table_page_pool.page[i].mapping = virt;
546
547                 ++mali_mem_page_table_page_pool.count;
548
549                 spin_unlock(&mali_mem_page_table_page_pool.lock);
550         } else {
551                 spin_unlock(&mali_mem_page_table_page_pool.lock);
552
553 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
554                 dma_free_attrs(&mali_platform_device->dev,
555                                _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
556                                &dma_attrs_wc);
557 #else
558                 dma_free_writecombine(&mali_platform_device->dev,
559                                       _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
560 #endif
561         }
562 }
563
564 void mali_mem_os_free_page_node(struct mali_page_node *m_page)
565 {
566         struct page *page = m_page->page;
567         MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
568
569         if (1  == page_count(page)) {
570                 dma_unmap_page(&mali_platform_device->dev, page_private(page),
571                                _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
572                 ClearPagePrivate(page);
573         }
574         __free_page(page);
575         m_page->page = NULL;
576         list_del(&m_page->list);
577         kfree(m_page);
578 }
579
580 /* The maximum number of page table pool pages to free in one go. */
581 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
582
583 /* Free a certain number of pages from the page table page pool.
584  * The pool lock must be held when calling the function, and the lock will be
585  * released before returning.
586  */
587 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
588 {
589         mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
590         void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
591         u32 i;
592
593         MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
594
595         /* Remove nr_to_free pages from the pool and store them locally on stack. */
596         for (i = 0; i < nr_to_free; i++) {
597                 u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
598
599                 phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
600                 virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
601         }
602
603         mali_mem_page_table_page_pool.count -= nr_to_free;
604
605         spin_unlock(&mali_mem_page_table_page_pool.lock);
606
607         /* After releasing the spinlock: free the pages we removed from the pool. */
608         for (i = 0; i < nr_to_free; i++) {
609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
610                 dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
611                                virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
612 #else
613                 dma_free_writecombine(&mali_platform_device->dev,
614                                       _MALI_OSK_MALI_PAGE_SIZE,
615                                       virt_arr[i], (dma_addr_t)phys_arr[i]);
616 #endif
617         }
618 }
619
620 static void mali_mem_os_trim_page_table_page_pool(void)
621 {
622         size_t nr_to_free = 0;
623         size_t nr_to_keep;
624
625         /* Keep 2 page table pages for each 1024 pages in the page cache. */
626         nr_to_keep = mali_mem_os_allocator.pool_count / 512;
627         /* And a minimum of eight pages, to accomodate new sessions. */
628         nr_to_keep += 8;
629
630         if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
631
632         if (nr_to_keep < mali_mem_page_table_page_pool.count) {
633                 nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
634                 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
635         }
636
637         /* Pool lock will be released by the callee. */
638         mali_mem_os_page_table_pool_free(nr_to_free);
639 }
640
641 static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
642 {
643         return mali_mem_os_allocator.pool_count;
644 }
645
646 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
647 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
648 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
649 #else
650 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
651 #endif /* Linux < 2.6.35 */
652 #else
653 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
654 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
655 #else
656 static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
657 #endif /* Linux < 3.12.0 */
658 #endif /* Linux < 3.0.0 */
659 {
660         struct mali_page_node *m_page, *m_tmp;
661         unsigned long flags;
662         struct list_head *le, pages;
663 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
664         int nr = nr_to_scan;
665 #else
666         int nr = sc->nr_to_scan;
667 #endif
668
669         if (0 == nr) {
670                 return mali_mem_os_shrink_count(shrinker, sc);
671         }
672
673         if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
674                 /* Not able to lock. */
675                 return -1;
676         }
677
678         if (0 == mali_mem_os_allocator.pool_count) {
679                 /* No pages availble */
680                 spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
681                 return 0;
682         }
683
684         /* Release from general page pool */
685         nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
686         mali_mem_os_allocator.pool_count -= nr;
687         list_for_each(le, &mali_mem_os_allocator.pool_pages) {
688                 --nr;
689                 if (0 == nr) break;
690         }
691         list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
692         spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
693
694         list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
695                 mali_mem_os_free_page_node(m_page);
696         }
697
698         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
699                 /* Pools are empty, stop timer */
700                 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
701                 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
702         }
703
704 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
705         return mali_mem_os_shrink_count(shrinker, sc);
706 #else
707         return nr;
708 #endif
709 }
710
711 static void mali_mem_os_trim_pool(struct work_struct *data)
712 {
713         struct mali_page_node *m_page, *m_tmp;
714         struct list_head *le;
715         LIST_HEAD(pages);
716         size_t nr_to_free;
717
718         MALI_IGNORE(data);
719
720         MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
721
722         /* Release from general page pool */
723         spin_lock(&mali_mem_os_allocator.pool_lock);
724         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
725                 size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
726                 const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
727
728                 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
729                 nr_to_free = max(count / 2, min_to_free);
730
731                 mali_mem_os_allocator.pool_count -= nr_to_free;
732                 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
733                         --nr_to_free;
734                         if (0 == nr_to_free) break;
735                 }
736                 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
737         }
738         spin_unlock(&mali_mem_os_allocator.pool_lock);
739
740         list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
741                 mali_mem_os_free_page_node(m_page);
742         }
743
744         /* Release some pages from page table page pool */
745         mali_mem_os_trim_page_table_page_pool();
746
747         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
748                 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
749                 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
750         }
751 }
752
753 _mali_osk_errcode_t mali_mem_os_init(void)
754 {
755         mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
756         if (NULL == mali_mem_os_allocator.wq) {
757                 return _MALI_OSK_ERR_NOMEM;
758         }
759
760 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
761         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
762 #endif
763
764         register_shrinker(&mali_mem_os_allocator.shrinker);
765
766         return _MALI_OSK_ERR_OK;
767 }
768
769 void mali_mem_os_term(void)
770 {
771         struct mali_page_node *m_page, *m_tmp;
772         unregister_shrinker(&mali_mem_os_allocator.shrinker);
773         cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
774
775         if (NULL != mali_mem_os_allocator.wq) {
776                 destroy_workqueue(mali_mem_os_allocator.wq);
777                 mali_mem_os_allocator.wq = NULL;
778         }
779
780         spin_lock(&mali_mem_os_allocator.pool_lock);
781         list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
782                 mali_mem_os_free_page_node(m_page);
783
784                 --mali_mem_os_allocator.pool_count;
785         }
786         BUG_ON(mali_mem_os_allocator.pool_count);
787         spin_unlock(&mali_mem_os_allocator.pool_lock);
788
789         /* Release from page table page pool */
790         do {
791                 u32 nr_to_free;
792
793                 spin_lock(&mali_mem_page_table_page_pool.lock);
794
795                 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
796
797                 /* Pool lock will be released by the callee. */
798                 mali_mem_os_page_table_pool_free(nr_to_free);
799         } while (0 != mali_mem_page_table_page_pool.count);
800 }
801
802 _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
803 {
804         mali_mem_os_allocator.allocation_limit = size;
805
806         MALI_SUCCESS;
807 }
808
809 u32 mali_mem_os_stat(void)
810 {
811         return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
812 }