3 * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <mali_kbase.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
27 /* This function is only provided for backwards compatibility with kernels
28 * which use the old carveout allocator.
30 * The forward declaration is to keep sparse happy.
32 int __init kbase_carveout_mem_reserve(
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
39 #define pool_dbg(pool, format, ...) \
40 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41 (pool->next_pool) ? "kctx" : "kbdev", \
42 kbase_mem_pool_size(pool), \
43 kbase_mem_pool_max_size(pool), \
46 #define NOT_DIRTY false
47 #define NOT_RECLAIMED false
49 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
51 spin_lock(&pool->pool_lock);
54 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
56 spin_unlock(&pool->pool_lock);
59 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
61 ssize_t max_size = kbase_mem_pool_max_size(pool);
62 ssize_t cur_size = kbase_mem_pool_size(pool);
64 return max(max_size - cur_size, (ssize_t)0);
67 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
69 return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
72 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
74 return kbase_mem_pool_size(pool) == 0;
77 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
80 lockdep_assert_held(&pool->pool_lock);
82 list_add(&p->lru, &pool->page_list);
85 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
87 pool_dbg(pool, "added page\n");
90 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
92 kbase_mem_pool_lock(pool);
93 kbase_mem_pool_add_locked(pool, p);
94 kbase_mem_pool_unlock(pool);
97 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
98 struct list_head *page_list, size_t nr_pages)
102 lockdep_assert_held(&pool->pool_lock);
104 list_for_each_entry(p, page_list, lru) {
105 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
108 list_splice(page_list, &pool->page_list);
109 pool->cur_size += nr_pages;
111 pool_dbg(pool, "added %zu pages\n", nr_pages);
114 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
115 struct list_head *page_list, size_t nr_pages)
117 kbase_mem_pool_lock(pool);
118 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
119 kbase_mem_pool_unlock(pool);
122 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
126 lockdep_assert_held(&pool->pool_lock);
128 if (kbase_mem_pool_is_empty(pool))
131 p = list_first_entry(&pool->page_list, struct page, lru);
132 list_del_init(&p->lru);
135 zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
137 pool_dbg(pool, "removed page\n");
142 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
146 kbase_mem_pool_lock(pool);
147 p = kbase_mem_pool_remove_locked(pool);
148 kbase_mem_pool_unlock(pool);
153 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
156 struct device *dev = pool->kbdev->dev;
158 dma_sync_single_for_device(dev, kbase_dma_addr(p),
159 PAGE_SIZE, DMA_BIDIRECTIONAL);
162 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
166 kbase_mem_pool_sync_page(pool, p);
169 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
172 /* Zero page before spilling */
173 kbase_mem_pool_zero_page(next_pool, p);
175 kbase_mem_pool_add(next_pool, p);
178 static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool)
182 struct device *dev = pool->kbdev->dev;
185 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
186 LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
187 /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
188 gfp = GFP_USER | __GFP_ZERO;
190 gfp = GFP_HIGHUSER | __GFP_ZERO;
193 if (current->flags & PF_KTHREAD) {
194 /* Don't trigger OOM killer from kernel threads, e.g. when
195 * growing memory on GPU page fault */
196 gfp |= __GFP_NORETRY;
203 dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
204 if (dma_mapping_error(dev, dma_addr)) {
209 WARN_ON(dma_addr != page_to_phys(p));
211 kbase_set_dma_addr(p, dma_addr);
213 pool_dbg(pool, "alloced page from kernel\n");
218 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
221 struct device *dev = pool->kbdev->dev;
222 dma_addr_t dma_addr = kbase_dma_addr(p);
224 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
225 kbase_clear_dma_addr(p);
228 pool_dbg(pool, "freed page to kernel\n");
231 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
237 lockdep_assert_held(&pool->pool_lock);
239 for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
240 p = kbase_mem_pool_remove_locked(pool);
241 kbase_mem_pool_free_page(pool, p);
247 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
252 kbase_mem_pool_lock(pool);
253 nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
254 kbase_mem_pool_unlock(pool);
259 static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
265 for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
266 p = kbase_mem_pool_alloc_page(pool);
267 kbase_mem_pool_add(pool, p);
273 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
277 cur_size = kbase_mem_pool_size(pool);
279 if (new_size < cur_size)
280 kbase_mem_pool_shrink(pool, cur_size - new_size);
281 else if (new_size > cur_size)
282 kbase_mem_pool_grow(pool, new_size - cur_size);
284 cur_size = kbase_mem_pool_size(pool);
289 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
294 kbase_mem_pool_lock(pool);
296 pool->max_size = max_size;
298 cur_size = kbase_mem_pool_size(pool);
299 if (max_size < cur_size) {
300 nr_to_shrink = cur_size - max_size;
301 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
304 kbase_mem_pool_unlock(pool);
308 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
309 struct shrink_control *sc)
311 struct kbase_mem_pool *pool;
313 pool = container_of(s, struct kbase_mem_pool, reclaim);
314 pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
315 return kbase_mem_pool_size(pool);
318 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
319 struct shrink_control *sc)
321 struct kbase_mem_pool *pool;
324 pool = container_of(s, struct kbase_mem_pool, reclaim);
326 pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
328 freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
330 pool_dbg(pool, "reclaim freed %ld pages\n", freed);
335 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
336 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
337 struct shrink_control *sc)
339 if (sc->nr_to_scan == 0)
340 return kbase_mem_pool_reclaim_count_objects(s, sc);
342 return kbase_mem_pool_reclaim_scan_objects(s, sc);
346 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
348 struct kbase_device *kbdev,
349 struct kbase_mem_pool *next_pool)
352 pool->max_size = max_size;
354 pool->next_pool = next_pool;
356 spin_lock_init(&pool->pool_lock);
357 INIT_LIST_HEAD(&pool->page_list);
359 /* Register shrinker */
360 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
361 pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
363 pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
364 pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
366 pool->reclaim.seeks = DEFAULT_SEEKS;
367 /* Kernel versions prior to 3.1 :
368 * struct shrinker does not define batch */
369 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
370 pool->reclaim.batch = 0;
372 register_shrinker(&pool->reclaim);
374 pool_dbg(pool, "initialized\n");
379 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
381 struct kbase_mem_pool *next_pool = pool->next_pool;
383 size_t nr_to_spill = 0;
384 LIST_HEAD(spill_list);
387 pool_dbg(pool, "terminate()\n");
389 unregister_shrinker(&pool->reclaim);
391 kbase_mem_pool_lock(pool);
394 if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
395 /* Spill to next pool (may overspill) */
396 nr_to_spill = kbase_mem_pool_capacity(next_pool);
397 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
399 /* Zero pages first without holding the next_pool lock */
400 for (i = 0; i < nr_to_spill; i++) {
401 p = kbase_mem_pool_remove_locked(pool);
402 kbase_mem_pool_zero_page(pool, p);
403 list_add(&p->lru, &spill_list);
407 while (!kbase_mem_pool_is_empty(pool)) {
408 /* Free remaining pages to kernel */
409 p = kbase_mem_pool_remove_locked(pool);
410 kbase_mem_pool_free_page(pool, p);
413 kbase_mem_pool_unlock(pool);
415 if (next_pool && nr_to_spill) {
416 /* Add new page list to next_pool */
417 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
419 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
422 pool_dbg(pool, "terminated\n");
425 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
429 pool_dbg(pool, "alloc()\n");
431 p = kbase_mem_pool_remove(pool);
433 if (!p && pool->next_pool) {
434 /* Allocate via next pool */
435 return kbase_mem_pool_alloc(pool->next_pool);
439 /* Get page from kernel */
440 p = kbase_mem_pool_alloc_page(pool);
446 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
449 struct kbase_mem_pool *next_pool = pool->next_pool;
451 pool_dbg(pool, "free()\n");
453 if (!kbase_mem_pool_is_full(pool)) {
454 /* Add to our own pool */
456 kbase_mem_pool_sync_page(pool, p);
458 kbase_mem_pool_add(pool, p);
459 } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
460 /* Spill to next pool */
461 kbase_mem_pool_spill(next_pool, p);
464 kbase_mem_pool_free_page(pool, p);
468 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
476 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
478 /* Get pages from this pool */
479 kbase_mem_pool_lock(pool);
480 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
481 for (i = 0; i < nr_from_pool; i++) {
482 p = kbase_mem_pool_remove_locked(pool);
483 pages[i] = page_to_phys(p);
485 kbase_mem_pool_unlock(pool);
487 if (i != nr_pages && pool->next_pool) {
488 /* Allocate via next pool */
489 err = kbase_mem_pool_alloc_pages(pool->next_pool,
490 nr_pages - i, pages + i);
498 /* Get any remaining pages from kernel */
499 for (; i < nr_pages; i++) {
500 p = kbase_mem_pool_alloc_page(pool);
503 pages[i] = page_to_phys(p);
506 pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
511 kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
515 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
516 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
519 size_t nr_to_pool = 0;
520 LIST_HEAD(new_page_list);
526 pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
527 nr_pages, zero, sync);
529 /* Zero/sync pages first without holding the pool lock */
530 for (i = 0; i < nr_pages; i++) {
531 if (unlikely(!pages[i]))
534 p = phys_to_page(pages[i]);
537 kbase_mem_pool_zero_page(pool, p);
539 kbase_mem_pool_sync_page(pool, p);
541 list_add(&p->lru, &new_page_list);
546 /* Add new page list to pool */
547 kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
549 pool_dbg(pool, "add_array(%zu) added %zu pages\n",
550 nr_pages, nr_to_pool);
553 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
554 phys_addr_t *pages, bool dirty, bool reclaimed)
556 struct kbase_mem_pool *next_pool = pool->next_pool;
559 LIST_HEAD(to_pool_list);
562 pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
565 /* Add to this pool */
566 nr_to_pool = kbase_mem_pool_capacity(pool);
567 nr_to_pool = min(nr_pages, nr_to_pool);
569 kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
573 if (i != nr_pages && next_pool) {
574 /* Spill to next pool (may overspill) */
575 nr_to_pool = kbase_mem_pool_capacity(next_pool);
576 nr_to_pool = min(nr_pages - i, nr_to_pool);
578 kbase_mem_pool_add_array(next_pool, nr_to_pool,
579 pages + i, true, dirty);
584 /* Free any remaining pages to kernel */
585 for (; i < nr_pages; i++) {
586 if (unlikely(!pages[i]))
589 p = phys_to_page(pages[i]);
591 zone_page_state_add(-1, page_zone(p),
592 NR_SLAB_RECLAIMABLE);
594 kbase_mem_pool_free_page(pool, p);
598 pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);