3 * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <mali_kbase.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
27 /* This function is only provided for backwards compatibility with kernels
28 * which use the old carveout allocator.
30 * The forward declaration is to keep sparse happy.
32 int __init kbase_carveout_mem_reserve(
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
39 #define pool_dbg(pool, format, ...) \
40 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41 (pool->next_pool) ? "kctx" : "kbdev", \
42 kbase_mem_pool_size(pool), \
43 kbase_mem_pool_max_size(pool), \
46 #define NOT_DIRTY false
47 #define NOT_RECLAIMED false
49 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
51 spin_lock(&pool->pool_lock);
54 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
56 spin_unlock(&pool->pool_lock);
59 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
61 ssize_t max_size = kbase_mem_pool_max_size(pool);
62 ssize_t cur_size = kbase_mem_pool_size(pool);
64 return max(max_size - cur_size, (ssize_t)0);
67 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
69 return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
72 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
74 return kbase_mem_pool_size(pool) == 0;
77 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
80 lockdep_assert_held(&pool->pool_lock);
82 list_add(&p->lru, &pool->page_list);
85 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
87 pool_dbg(pool, "added page\n");
90 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
92 kbase_mem_pool_lock(pool);
93 kbase_mem_pool_add_locked(pool, p);
94 kbase_mem_pool_unlock(pool);
97 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
98 struct list_head *page_list, size_t nr_pages)
102 lockdep_assert_held(&pool->pool_lock);
104 list_for_each_entry(p, page_list, lru) {
105 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
108 list_splice(page_list, &pool->page_list);
109 pool->cur_size += nr_pages;
111 pool_dbg(pool, "added %zu pages\n", nr_pages);
114 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
115 struct list_head *page_list, size_t nr_pages)
117 kbase_mem_pool_lock(pool);
118 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
119 kbase_mem_pool_unlock(pool);
122 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
126 lockdep_assert_held(&pool->pool_lock);
128 if (kbase_mem_pool_is_empty(pool))
131 p = list_first_entry(&pool->page_list, struct page, lru);
132 list_del_init(&p->lru);
135 zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
137 pool_dbg(pool, "removed page\n");
142 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
146 kbase_mem_pool_lock(pool);
147 p = kbase_mem_pool_remove_locked(pool);
148 kbase_mem_pool_unlock(pool);
153 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
156 struct device *dev = pool->kbdev->dev;
158 dma_sync_single_for_device(dev, kbase_dma_addr(p),
159 PAGE_SIZE, DMA_BIDIRECTIONAL);
162 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
166 kbase_mem_pool_sync_page(pool, p);
169 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
172 /* Zero page before spilling */
173 kbase_mem_pool_zero_page(next_pool, p);
175 kbase_mem_pool_add(next_pool, p);
178 static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool)
182 struct device *dev = pool->kbdev->dev;
185 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
186 LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
187 /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
188 gfp = GFP_USER | __GFP_ZERO;
190 gfp = GFP_HIGHUSER | __GFP_ZERO;
193 if (current->flags & PF_KTHREAD) {
194 /* Don't trigger OOM killer from kernel threads, e.g. when
195 * growing memory on GPU page fault */
196 gfp |= __GFP_NORETRY;
203 dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
204 if (dma_mapping_error(dev, dma_addr)) {
209 WARN_ON(dma_addr != page_to_phys(p));
211 kbase_set_dma_addr(p, dma_addr);
213 pool_dbg(pool, "alloced page from kernel\n");
218 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
221 struct device *dev = pool->kbdev->dev;
222 dma_addr_t dma_addr = kbase_dma_addr(p);
224 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
225 kbase_clear_dma_addr(p);
228 pool_dbg(pool, "freed page to kernel\n");
231 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
237 lockdep_assert_held(&pool->pool_lock);
239 for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
240 p = kbase_mem_pool_remove_locked(pool);
241 kbase_mem_pool_free_page(pool, p);
247 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
252 kbase_mem_pool_lock(pool);
253 nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
254 kbase_mem_pool_unlock(pool);
259 static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
265 for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
266 p = kbase_mem_pool_alloc_page(pool);
269 kbase_mem_pool_add(pool, p);
275 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
279 cur_size = kbase_mem_pool_size(pool);
281 if (new_size < cur_size)
282 kbase_mem_pool_shrink(pool, cur_size - new_size);
283 else if (new_size > cur_size)
284 kbase_mem_pool_grow(pool, new_size - cur_size);
286 cur_size = kbase_mem_pool_size(pool);
291 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
296 kbase_mem_pool_lock(pool);
298 pool->max_size = max_size;
300 cur_size = kbase_mem_pool_size(pool);
301 if (max_size < cur_size) {
302 nr_to_shrink = cur_size - max_size;
303 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
306 kbase_mem_pool_unlock(pool);
310 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
311 struct shrink_control *sc)
313 struct kbase_mem_pool *pool;
315 pool = container_of(s, struct kbase_mem_pool, reclaim);
316 pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
317 return kbase_mem_pool_size(pool);
320 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
321 struct shrink_control *sc)
323 struct kbase_mem_pool *pool;
326 pool = container_of(s, struct kbase_mem_pool, reclaim);
328 pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
330 freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
332 pool_dbg(pool, "reclaim freed %ld pages\n", freed);
337 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
338 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
339 struct shrink_control *sc)
341 if (sc->nr_to_scan == 0)
342 return kbase_mem_pool_reclaim_count_objects(s, sc);
344 return kbase_mem_pool_reclaim_scan_objects(s, sc);
348 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
350 struct kbase_device *kbdev,
351 struct kbase_mem_pool *next_pool)
354 pool->max_size = max_size;
356 pool->next_pool = next_pool;
358 spin_lock_init(&pool->pool_lock);
359 INIT_LIST_HEAD(&pool->page_list);
361 /* Register shrinker */
362 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
363 pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
365 pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
366 pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
368 pool->reclaim.seeks = DEFAULT_SEEKS;
369 /* Kernel versions prior to 3.1 :
370 * struct shrinker does not define batch */
371 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
372 pool->reclaim.batch = 0;
374 register_shrinker(&pool->reclaim);
376 pool_dbg(pool, "initialized\n");
381 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
383 struct kbase_mem_pool *next_pool = pool->next_pool;
385 size_t nr_to_spill = 0;
386 LIST_HEAD(spill_list);
389 pool_dbg(pool, "terminate()\n");
391 unregister_shrinker(&pool->reclaim);
393 kbase_mem_pool_lock(pool);
396 if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
397 /* Spill to next pool (may overspill) */
398 nr_to_spill = kbase_mem_pool_capacity(next_pool);
399 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
401 /* Zero pages first without holding the next_pool lock */
402 for (i = 0; i < nr_to_spill; i++) {
403 p = kbase_mem_pool_remove_locked(pool);
404 kbase_mem_pool_zero_page(pool, p);
405 list_add(&p->lru, &spill_list);
409 while (!kbase_mem_pool_is_empty(pool)) {
410 /* Free remaining pages to kernel */
411 p = kbase_mem_pool_remove_locked(pool);
412 kbase_mem_pool_free_page(pool, p);
415 kbase_mem_pool_unlock(pool);
417 if (next_pool && nr_to_spill) {
418 /* Add new page list to next_pool */
419 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
421 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
424 pool_dbg(pool, "terminated\n");
427 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
431 pool_dbg(pool, "alloc()\n");
433 p = kbase_mem_pool_remove(pool);
435 if (!p && pool->next_pool) {
436 /* Allocate via next pool */
437 return kbase_mem_pool_alloc(pool->next_pool);
441 /* Get page from kernel */
442 p = kbase_mem_pool_alloc_page(pool);
448 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
451 struct kbase_mem_pool *next_pool = pool->next_pool;
453 pool_dbg(pool, "free()\n");
455 if (!kbase_mem_pool_is_full(pool)) {
456 /* Add to our own pool */
458 kbase_mem_pool_sync_page(pool, p);
460 kbase_mem_pool_add(pool, p);
461 } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
462 /* Spill to next pool */
463 kbase_mem_pool_spill(next_pool, p);
466 kbase_mem_pool_free_page(pool, p);
470 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
478 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
480 /* Get pages from this pool */
481 kbase_mem_pool_lock(pool);
482 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
483 for (i = 0; i < nr_from_pool; i++) {
484 p = kbase_mem_pool_remove_locked(pool);
485 pages[i] = page_to_phys(p);
487 kbase_mem_pool_unlock(pool);
489 if (i != nr_pages && pool->next_pool) {
490 /* Allocate via next pool */
491 err = kbase_mem_pool_alloc_pages(pool->next_pool,
492 nr_pages - i, pages + i);
500 /* Get any remaining pages from kernel */
501 for (; i < nr_pages; i++) {
502 p = kbase_mem_pool_alloc_page(pool);
505 pages[i] = page_to_phys(p);
508 pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
513 kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
517 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
518 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
521 size_t nr_to_pool = 0;
522 LIST_HEAD(new_page_list);
528 pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
529 nr_pages, zero, sync);
531 /* Zero/sync pages first without holding the pool lock */
532 for (i = 0; i < nr_pages; i++) {
533 if (unlikely(!pages[i]))
536 p = phys_to_page(pages[i]);
539 kbase_mem_pool_zero_page(pool, p);
541 kbase_mem_pool_sync_page(pool, p);
543 list_add(&p->lru, &new_page_list);
548 /* Add new page list to pool */
549 kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
551 pool_dbg(pool, "add_array(%zu) added %zu pages\n",
552 nr_pages, nr_to_pool);
555 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
556 phys_addr_t *pages, bool dirty, bool reclaimed)
558 struct kbase_mem_pool *next_pool = pool->next_pool;
561 LIST_HEAD(to_pool_list);
564 pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
567 /* Add to this pool */
568 nr_to_pool = kbase_mem_pool_capacity(pool);
569 nr_to_pool = min(nr_pages, nr_to_pool);
571 kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
575 if (i != nr_pages && next_pool) {
576 /* Spill to next pool (may overspill) */
577 nr_to_pool = kbase_mem_pool_capacity(next_pool);
578 nr_to_pool = min(nr_pages - i, nr_to_pool);
580 kbase_mem_pool_add_array(next_pool, nr_to_pool,
581 pages + i, true, dirty);
586 /* Free any remaining pages to kernel */
587 for (; i < nr_pages; i++) {
588 if (unlikely(!pages[i]))
591 p = phys_to_page(pages[i]);
593 zone_page_state_add(-1, page_zone(p),
594 NR_SLAB_RECLAIMABLE);
596 kbase_mem_pool_free_page(pool, p);
600 pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);