3 * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <mali_kbase.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
27 /* This function is only provided for backwards compatibility with kernels
28 * which use the old carveout allocator.
30 * The forward declaration is to keep sparse happy.
32 int __init kbase_carveout_mem_reserve(
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
39 #define pool_dbg(pool, format, ...) \
40 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41 (pool->next_pool) ? "kctx" : "kbdev", \
42 kbase_mem_pool_size(pool), \
43 kbase_mem_pool_max_size(pool), \
46 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
48 spin_lock(&pool->pool_lock);
51 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
53 spin_unlock(&pool->pool_lock);
56 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
58 ssize_t max_size = kbase_mem_pool_max_size(pool);
59 ssize_t cur_size = kbase_mem_pool_size(pool);
61 return max(max_size - cur_size, (ssize_t)0);
64 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
66 return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
69 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
71 return kbase_mem_pool_size(pool) == 0;
74 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
77 lockdep_assert_held(&pool->pool_lock);
79 list_add(&p->lru, &pool->page_list);
82 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
84 pool_dbg(pool, "added page\n");
87 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
89 kbase_mem_pool_lock(pool);
90 kbase_mem_pool_add_locked(pool, p);
91 kbase_mem_pool_unlock(pool);
94 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
95 struct list_head *page_list, size_t nr_pages)
99 lockdep_assert_held(&pool->pool_lock);
101 list_for_each_entry(p, page_list, lru) {
102 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
105 list_splice(page_list, &pool->page_list);
106 pool->cur_size += nr_pages;
108 pool_dbg(pool, "added %zu pages\n", nr_pages);
111 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
112 struct list_head *page_list, size_t nr_pages)
114 kbase_mem_pool_lock(pool);
115 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
116 kbase_mem_pool_unlock(pool);
119 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
123 lockdep_assert_held(&pool->pool_lock);
125 if (kbase_mem_pool_is_empty(pool))
128 p = list_first_entry(&pool->page_list, struct page, lru);
129 list_del_init(&p->lru);
132 zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
134 pool_dbg(pool, "removed page\n");
139 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
143 kbase_mem_pool_lock(pool);
144 p = kbase_mem_pool_remove_locked(pool);
145 kbase_mem_pool_unlock(pool);
150 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
153 struct device *dev = pool->kbdev->dev;
155 dma_sync_single_for_device(dev, kbase_dma_addr(p),
156 PAGE_SIZE, DMA_BIDIRECTIONAL);
159 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
163 kbase_mem_pool_sync_page(pool, p);
166 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
169 /* Zero page before spilling */
170 kbase_mem_pool_zero_page(next_pool, p);
172 kbase_mem_pool_add(next_pool, p);
175 static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool)
179 struct device *dev = pool->kbdev->dev;
182 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
183 LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
184 /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
185 gfp = GFP_USER | __GFP_ZERO;
187 gfp = GFP_HIGHUSER | __GFP_ZERO;
190 if (current->flags & PF_KTHREAD) {
191 /* Don't trigger OOM killer from kernel threads, e.g. when
192 * growing memory on GPU page fault */
193 gfp |= __GFP_NORETRY;
200 dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
201 if (dma_mapping_error(dev, dma_addr)) {
206 WARN_ON(dma_addr != page_to_phys(p));
208 kbase_set_dma_addr(p, dma_addr);
210 pool_dbg(pool, "alloced page from kernel\n");
215 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
218 struct device *dev = pool->kbdev->dev;
219 dma_addr_t dma_addr = kbase_dma_addr(p);
221 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
222 kbase_clear_dma_addr(p);
225 pool_dbg(pool, "freed page to kernel\n");
228 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
234 lockdep_assert_held(&pool->pool_lock);
236 for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
237 p = kbase_mem_pool_remove_locked(pool);
238 kbase_mem_pool_free_page(pool, p);
244 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
249 kbase_mem_pool_lock(pool);
250 nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
251 kbase_mem_pool_unlock(pool);
256 static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
262 for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
263 p = kbase_mem_pool_alloc_page(pool);
264 kbase_mem_pool_add(pool, p);
270 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
274 cur_size = kbase_mem_pool_size(pool);
276 if (new_size < cur_size)
277 kbase_mem_pool_shrink(pool, cur_size - new_size);
278 else if (new_size > cur_size)
279 kbase_mem_pool_grow(pool, new_size - cur_size);
281 cur_size = kbase_mem_pool_size(pool);
286 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
291 kbase_mem_pool_lock(pool);
293 pool->max_size = max_size;
295 cur_size = kbase_mem_pool_size(pool);
296 if (max_size < cur_size) {
297 nr_to_shrink = cur_size - max_size;
298 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
301 kbase_mem_pool_unlock(pool);
305 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
306 struct shrink_control *sc)
308 struct kbase_mem_pool *pool;
310 pool = container_of(s, struct kbase_mem_pool, reclaim);
311 pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
312 return kbase_mem_pool_size(pool);
315 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
316 struct shrink_control *sc)
318 struct kbase_mem_pool *pool;
321 pool = container_of(s, struct kbase_mem_pool, reclaim);
323 pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
325 freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
327 pool_dbg(pool, "reclaim freed %ld pages\n", freed);
332 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
333 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
334 struct shrink_control *sc)
336 if (sc->nr_to_scan == 0)
337 return kbase_mem_pool_reclaim_count_objects(s, sc);
339 return kbase_mem_pool_reclaim_scan_objects(s, sc);
343 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
345 struct kbase_device *kbdev,
346 struct kbase_mem_pool *next_pool)
349 pool->max_size = max_size;
351 pool->next_pool = next_pool;
353 spin_lock_init(&pool->pool_lock);
354 INIT_LIST_HEAD(&pool->page_list);
356 /* Register shrinker */
357 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
358 pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
360 pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
361 pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
363 pool->reclaim.seeks = DEFAULT_SEEKS;
364 /* Kernel versions prior to 3.1 :
365 * struct shrinker does not define batch */
366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
367 pool->reclaim.batch = 0;
369 register_shrinker(&pool->reclaim);
371 pool_dbg(pool, "initialized\n");
376 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
378 struct kbase_mem_pool *next_pool = pool->next_pool;
380 size_t nr_to_spill = 0;
381 LIST_HEAD(spill_list);
384 pool_dbg(pool, "terminate()\n");
386 unregister_shrinker(&pool->reclaim);
388 kbase_mem_pool_lock(pool);
391 if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
392 /* Spill to next pool (may overspill) */
393 nr_to_spill = kbase_mem_pool_capacity(next_pool);
394 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
396 /* Zero pages first without holding the next_pool lock */
397 for (i = 0; i < nr_to_spill; i++) {
398 p = kbase_mem_pool_remove_locked(pool);
399 kbase_mem_pool_zero_page(pool, p);
400 list_add(&p->lru, &spill_list);
404 while (!kbase_mem_pool_is_empty(pool)) {
405 /* Free remaining pages to kernel */
406 p = kbase_mem_pool_remove_locked(pool);
407 kbase_mem_pool_free_page(pool, p);
410 kbase_mem_pool_unlock(pool);
412 if (next_pool && nr_to_spill) {
413 /* Add new page list to next_pool */
414 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
416 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
419 pool_dbg(pool, "terminated\n");
422 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
426 pool_dbg(pool, "alloc()\n");
428 p = kbase_mem_pool_remove(pool);
430 if (!p && pool->next_pool) {
431 /* Allocate via next pool */
432 return kbase_mem_pool_alloc(pool->next_pool);
436 /* Get page from kernel */
437 p = kbase_mem_pool_alloc_page(pool);
443 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
446 struct kbase_mem_pool *next_pool = pool->next_pool;
448 pool_dbg(pool, "free()\n");
450 if (!kbase_mem_pool_is_full(pool)) {
451 /* Add to our own pool */
453 kbase_mem_pool_sync_page(pool, p);
455 kbase_mem_pool_add(pool, p);
456 } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
457 /* Spill to next pool */
458 kbase_mem_pool_spill(next_pool, p);
461 kbase_mem_pool_free_page(pool, p);
465 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
473 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
475 /* Get pages from this pool */
476 kbase_mem_pool_lock(pool);
477 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
478 for (i = 0; i < nr_from_pool; i++) {
479 p = kbase_mem_pool_remove_locked(pool);
480 pages[i] = page_to_phys(p);
482 kbase_mem_pool_unlock(pool);
484 if (i != nr_pages && pool->next_pool) {
485 /* Allocate via next pool */
486 err = kbase_mem_pool_alloc_pages(pool->next_pool,
487 nr_pages - i, pages + i);
495 /* Get any remaining pages from kernel */
496 for (; i < nr_pages; i++) {
497 p = kbase_mem_pool_alloc_page(pool);
500 pages[i] = page_to_phys(p);
503 pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
508 kbase_mem_pool_free_pages(pool, i, pages, false);
512 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
513 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
516 size_t nr_to_pool = 0;
517 LIST_HEAD(new_page_list);
523 pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
524 nr_pages, zero, sync);
526 /* Zero/sync pages first without holding the pool lock */
527 for (i = 0; i < nr_pages; i++) {
528 if (unlikely(!pages[i]))
531 p = phys_to_page(pages[i]);
534 kbase_mem_pool_zero_page(pool, p);
536 kbase_mem_pool_sync_page(pool, p);
538 list_add(&p->lru, &new_page_list);
543 /* Add new page list to pool */
544 kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
546 pool_dbg(pool, "add_array(%zu) added %zu pages\n",
547 nr_pages, nr_to_pool);
550 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
551 phys_addr_t *pages, bool dirty)
553 struct kbase_mem_pool *next_pool = pool->next_pool;
556 LIST_HEAD(to_pool_list);
559 pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
561 /* Add to this pool */
562 nr_to_pool = kbase_mem_pool_capacity(pool);
563 nr_to_pool = min(nr_pages, nr_to_pool);
565 kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
569 if (i != nr_pages && next_pool) {
570 /* Spill to next pool (may overspill) */
571 nr_to_pool = kbase_mem_pool_capacity(next_pool);
572 nr_to_pool = min(nr_pages - i, nr_to_pool);
574 kbase_mem_pool_add_array(next_pool, nr_to_pool, pages + i,
579 /* Free any remaining pages to kernel */
580 for (; i < nr_pages; i++) {
581 if (unlikely(!pages[i]))
584 p = phys_to_page(pages[i]);
585 kbase_mem_pool_free_page(pool, p);
589 pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);