3 * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <mali_kbase.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
27 /* This function is only provided for backwards compatibility with kernels
28 * which use the old carveout allocator.
30 * The forward declaration is to keep sparse happy.
32 int __init kbase_carveout_mem_reserve(
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
39 #define pool_dbg(pool, format, ...) \
40 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41 (pool->next_pool) ? "kctx" : "kbdev", \
42 kbase_mem_pool_size(pool), \
43 kbase_mem_pool_max_size(pool), \
46 #define NOT_DIRTY false
47 #define NOT_RECLAIMED false
49 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
51 spin_lock(&pool->pool_lock);
54 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
56 spin_unlock(&pool->pool_lock);
59 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
61 ssize_t max_size = kbase_mem_pool_max_size(pool);
62 ssize_t cur_size = kbase_mem_pool_size(pool);
64 return max(max_size - cur_size, (ssize_t)0);
67 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
69 return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
72 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
74 return kbase_mem_pool_size(pool) == 0;
77 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
80 lockdep_assert_held(&pool->pool_lock);
82 list_add(&p->lru, &pool->page_list);
85 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
87 pool_dbg(pool, "added page\n");
90 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
92 kbase_mem_pool_lock(pool);
93 kbase_mem_pool_add_locked(pool, p);
94 kbase_mem_pool_unlock(pool);
97 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
98 struct list_head *page_list, size_t nr_pages)
102 lockdep_assert_held(&pool->pool_lock);
104 list_for_each_entry(p, page_list, lru) {
105 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
108 list_splice(page_list, &pool->page_list);
109 pool->cur_size += nr_pages;
111 pool_dbg(pool, "added %zu pages\n", nr_pages);
114 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
115 struct list_head *page_list, size_t nr_pages)
117 kbase_mem_pool_lock(pool);
118 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
119 kbase_mem_pool_unlock(pool);
122 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
126 lockdep_assert_held(&pool->pool_lock);
128 if (kbase_mem_pool_is_empty(pool))
131 p = list_first_entry(&pool->page_list, struct page, lru);
132 list_del_init(&p->lru);
135 zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
137 pool_dbg(pool, "removed page\n");
142 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
146 kbase_mem_pool_lock(pool);
147 p = kbase_mem_pool_remove_locked(pool);
148 kbase_mem_pool_unlock(pool);
153 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
156 struct device *dev = pool->kbdev->dev;
158 dma_sync_single_for_device(dev, kbase_dma_addr(p),
159 PAGE_SIZE, DMA_BIDIRECTIONAL);
162 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
166 kbase_mem_pool_sync_page(pool, p);
169 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
172 /* Zero page before spilling */
173 kbase_mem_pool_zero_page(next_pool, p);
175 kbase_mem_pool_add(next_pool, p);
178 struct page *kbase_mem_alloc_page(struct kbase_device *kbdev)
182 struct device *dev = kbdev->dev;
185 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
186 LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
187 /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
188 gfp = GFP_USER | __GFP_ZERO;
190 gfp = GFP_HIGHUSER | __GFP_ZERO;
193 if (current->flags & PF_KTHREAD) {
194 /* Don't trigger OOM killer from kernel threads, e.g. when
195 * growing memory on GPU page fault */
196 gfp |= __GFP_NORETRY;
203 dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
204 if (dma_mapping_error(dev, dma_addr)) {
209 WARN_ON(dma_addr != page_to_phys(p));
211 kbase_set_dma_addr(p, dma_addr);
216 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
219 struct device *dev = pool->kbdev->dev;
220 dma_addr_t dma_addr = kbase_dma_addr(p);
222 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
223 kbase_clear_dma_addr(p);
226 pool_dbg(pool, "freed page to kernel\n");
229 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
235 lockdep_assert_held(&pool->pool_lock);
237 for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
238 p = kbase_mem_pool_remove_locked(pool);
239 kbase_mem_pool_free_page(pool, p);
245 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
250 kbase_mem_pool_lock(pool);
251 nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
252 kbase_mem_pool_unlock(pool);
257 int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
263 for (i = 0; i < nr_to_grow; i++) {
264 p = kbase_mem_alloc_page(pool->kbdev);
267 kbase_mem_pool_add(pool, p);
273 void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
277 cur_size = kbase_mem_pool_size(pool);
279 if (new_size > pool->max_size)
280 new_size = pool->max_size;
282 if (new_size < cur_size)
283 kbase_mem_pool_shrink(pool, cur_size - new_size);
284 else if (new_size > cur_size)
285 kbase_mem_pool_grow(pool, new_size - cur_size);
288 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
293 kbase_mem_pool_lock(pool);
295 pool->max_size = max_size;
297 cur_size = kbase_mem_pool_size(pool);
298 if (max_size < cur_size) {
299 nr_to_shrink = cur_size - max_size;
300 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
303 kbase_mem_pool_unlock(pool);
307 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
308 struct shrink_control *sc)
310 struct kbase_mem_pool *pool;
312 pool = container_of(s, struct kbase_mem_pool, reclaim);
313 pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
314 return kbase_mem_pool_size(pool);
317 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
318 struct shrink_control *sc)
320 struct kbase_mem_pool *pool;
323 pool = container_of(s, struct kbase_mem_pool, reclaim);
325 pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
327 freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
329 pool_dbg(pool, "reclaim freed %ld pages\n", freed);
334 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
335 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
336 struct shrink_control *sc)
338 if (sc->nr_to_scan == 0)
339 return kbase_mem_pool_reclaim_count_objects(s, sc);
341 return kbase_mem_pool_reclaim_scan_objects(s, sc);
345 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
347 struct kbase_device *kbdev,
348 struct kbase_mem_pool *next_pool)
351 pool->max_size = max_size;
353 pool->next_pool = next_pool;
355 spin_lock_init(&pool->pool_lock);
356 INIT_LIST_HEAD(&pool->page_list);
358 /* Register shrinker */
359 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
360 pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
362 pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
363 pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
365 pool->reclaim.seeks = DEFAULT_SEEKS;
366 /* Kernel versions prior to 3.1 :
367 * struct shrinker does not define batch */
368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
369 pool->reclaim.batch = 0;
371 register_shrinker(&pool->reclaim);
373 pool_dbg(pool, "initialized\n");
378 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
380 struct kbase_mem_pool *next_pool = pool->next_pool;
382 size_t nr_to_spill = 0;
383 LIST_HEAD(spill_list);
386 pool_dbg(pool, "terminate()\n");
388 unregister_shrinker(&pool->reclaim);
390 kbase_mem_pool_lock(pool);
393 if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
394 /* Spill to next pool (may overspill) */
395 nr_to_spill = kbase_mem_pool_capacity(next_pool);
396 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
398 /* Zero pages first without holding the next_pool lock */
399 for (i = 0; i < nr_to_spill; i++) {
400 p = kbase_mem_pool_remove_locked(pool);
401 kbase_mem_pool_zero_page(pool, p);
402 list_add(&p->lru, &spill_list);
406 while (!kbase_mem_pool_is_empty(pool)) {
407 /* Free remaining pages to kernel */
408 p = kbase_mem_pool_remove_locked(pool);
409 kbase_mem_pool_free_page(pool, p);
412 kbase_mem_pool_unlock(pool);
414 if (next_pool && nr_to_spill) {
415 /* Add new page list to next_pool */
416 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
418 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
421 pool_dbg(pool, "terminated\n");
424 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
429 pool_dbg(pool, "alloc()\n");
430 p = kbase_mem_pool_remove(pool);
435 pool = pool->next_pool;
441 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
444 struct kbase_mem_pool *next_pool = pool->next_pool;
446 pool_dbg(pool, "free()\n");
448 if (!kbase_mem_pool_is_full(pool)) {
449 /* Add to our own pool */
451 kbase_mem_pool_sync_page(pool, p);
453 kbase_mem_pool_add(pool, p);
454 } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
455 /* Spill to next pool */
456 kbase_mem_pool_spill(next_pool, p);
459 kbase_mem_pool_free_page(pool, p);
463 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
471 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
473 /* Get pages from this pool */
474 kbase_mem_pool_lock(pool);
475 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
476 for (i = 0; i < nr_from_pool; i++) {
477 p = kbase_mem_pool_remove_locked(pool);
478 pages[i] = page_to_phys(p);
480 kbase_mem_pool_unlock(pool);
482 if (i != nr_pages && pool->next_pool) {
483 /* Allocate via next pool */
484 err = kbase_mem_pool_alloc_pages(pool->next_pool,
485 nr_pages - i, pages + i);
493 /* Get any remaining pages from kernel */
494 for (; i < nr_pages; i++) {
495 p = kbase_mem_alloc_page(pool->kbdev);
498 pages[i] = page_to_phys(p);
501 pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
506 kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
510 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
511 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
514 size_t nr_to_pool = 0;
515 LIST_HEAD(new_page_list);
521 pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
522 nr_pages, zero, sync);
524 /* Zero/sync pages first without holding the pool lock */
525 for (i = 0; i < nr_pages; i++) {
526 if (unlikely(!pages[i]))
529 p = phys_to_page(pages[i]);
532 kbase_mem_pool_zero_page(pool, p);
534 kbase_mem_pool_sync_page(pool, p);
536 list_add(&p->lru, &new_page_list);
541 /* Add new page list to pool */
542 kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
544 pool_dbg(pool, "add_array(%zu) added %zu pages\n",
545 nr_pages, nr_to_pool);
548 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
549 phys_addr_t *pages, bool dirty, bool reclaimed)
551 struct kbase_mem_pool *next_pool = pool->next_pool;
554 LIST_HEAD(to_pool_list);
557 pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
560 /* Add to this pool */
561 nr_to_pool = kbase_mem_pool_capacity(pool);
562 nr_to_pool = min(nr_pages, nr_to_pool);
564 kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
568 if (i != nr_pages && next_pool) {
569 /* Spill to next pool (may overspill) */
570 nr_to_pool = kbase_mem_pool_capacity(next_pool);
571 nr_to_pool = min(nr_pages - i, nr_to_pool);
573 kbase_mem_pool_add_array(next_pool, nr_to_pool,
574 pages + i, true, dirty);
579 /* Free any remaining pages to kernel */
580 for (; i < nr_pages; i++) {
581 if (unlikely(!pages[i]))
584 p = phys_to_page(pages[i]);
586 zone_page_state_add(-1, page_zone(p),
587 NR_SLAB_RECLAIMABLE);
589 kbase_mem_pool_free_page(pool, p);
593 pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);