MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_mem_pool.c
1 /*
2  *
3  * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18 #include <mali_kbase.h>
19 #include <linux/mm.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
26
27 /* This function is only provided for backwards compatibility with kernels
28  * which use the old carveout allocator.
29  *
30  * The forward declaration is to keep sparse happy.
31  */
32 int __init kbase_carveout_mem_reserve(
33                 phys_addr_t size);
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
35 {
36         return 0;
37 }
38
39 #define pool_dbg(pool, format, ...) \
40         dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41                 (pool->next_pool) ? "kctx" : "kbdev",   \
42                 kbase_mem_pool_size(pool),      \
43                 kbase_mem_pool_max_size(pool),  \
44                 ##__VA_ARGS__)
45
46 #define NOT_DIRTY false
47 #define NOT_RECLAIMED false
48
49 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
50 {
51         spin_lock(&pool->pool_lock);
52 }
53
54 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
55 {
56         spin_unlock(&pool->pool_lock);
57 }
58
59 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
60 {
61         ssize_t max_size = kbase_mem_pool_max_size(pool);
62         ssize_t cur_size = kbase_mem_pool_size(pool);
63
64         return max(max_size - cur_size, (ssize_t)0);
65 }
66
67 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
68 {
69         return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
70 }
71
72 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
73 {
74         return kbase_mem_pool_size(pool) == 0;
75 }
76
77 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
78                 struct page *p)
79 {
80         lockdep_assert_held(&pool->pool_lock);
81
82         list_add(&p->lru, &pool->page_list);
83         pool->cur_size++;
84
85         zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
86
87         pool_dbg(pool, "added page\n");
88 }
89
90 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
91 {
92         kbase_mem_pool_lock(pool);
93         kbase_mem_pool_add_locked(pool, p);
94         kbase_mem_pool_unlock(pool);
95 }
96
97 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
98                 struct list_head *page_list, size_t nr_pages)
99 {
100         struct page *p;
101
102         lockdep_assert_held(&pool->pool_lock);
103
104         list_for_each_entry(p, page_list, lru) {
105                 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
106         }
107
108         list_splice(page_list, &pool->page_list);
109         pool->cur_size += nr_pages;
110
111         pool_dbg(pool, "added %zu pages\n", nr_pages);
112 }
113
114 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
115                 struct list_head *page_list, size_t nr_pages)
116 {
117         kbase_mem_pool_lock(pool);
118         kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
119         kbase_mem_pool_unlock(pool);
120 }
121
122 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
123 {
124         struct page *p;
125
126         lockdep_assert_held(&pool->pool_lock);
127
128         if (kbase_mem_pool_is_empty(pool))
129                 return NULL;
130
131         p = list_first_entry(&pool->page_list, struct page, lru);
132         list_del_init(&p->lru);
133         pool->cur_size--;
134
135         zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
136
137         pool_dbg(pool, "removed page\n");
138
139         return p;
140 }
141
142 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
143 {
144         struct page *p;
145
146         kbase_mem_pool_lock(pool);
147         p = kbase_mem_pool_remove_locked(pool);
148         kbase_mem_pool_unlock(pool);
149
150         return p;
151 }
152
153 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
154                 struct page *p)
155 {
156         struct device *dev = pool->kbdev->dev;
157
158         dma_sync_single_for_device(dev, kbase_dma_addr(p),
159                         PAGE_SIZE, DMA_BIDIRECTIONAL);
160 }
161
162 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
163                 struct page *p)
164 {
165         clear_highpage(p);
166         kbase_mem_pool_sync_page(pool, p);
167 }
168
169 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
170                 struct page *p)
171 {
172         /* Zero page before spilling */
173         kbase_mem_pool_zero_page(next_pool, p);
174
175         kbase_mem_pool_add(next_pool, p);
176 }
177
178 struct page *kbase_mem_alloc_page(struct kbase_device *kbdev)
179 {
180         struct page *p;
181         gfp_t gfp;
182         struct device *dev = kbdev->dev;
183         dma_addr_t dma_addr;
184
185 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
186         LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
187         /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
188         gfp = GFP_USER | __GFP_ZERO;
189 #else
190         gfp = GFP_HIGHUSER | __GFP_ZERO;
191 #endif
192
193         if (current->flags & PF_KTHREAD) {
194                 /* Don't trigger OOM killer from kernel threads, e.g. when
195                  * growing memory on GPU page fault */
196                 gfp |= __GFP_NORETRY;
197         }
198
199         p = alloc_page(gfp);
200         if (!p)
201                 return NULL;
202
203         dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
204         if (dma_mapping_error(dev, dma_addr)) {
205                 __free_page(p);
206                 return NULL;
207         }
208
209         WARN_ON(dma_addr != page_to_phys(p));
210
211         kbase_set_dma_addr(p, dma_addr);
212
213         return p;
214 }
215
216 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
217                 struct page *p)
218 {
219         struct device *dev = pool->kbdev->dev;
220         dma_addr_t dma_addr = kbase_dma_addr(p);
221
222         dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
223         kbase_clear_dma_addr(p);
224         __free_page(p);
225
226         pool_dbg(pool, "freed page to kernel\n");
227 }
228
229 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
230                 size_t nr_to_shrink)
231 {
232         struct page *p;
233         size_t i;
234
235         lockdep_assert_held(&pool->pool_lock);
236
237         for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
238                 p = kbase_mem_pool_remove_locked(pool);
239                 kbase_mem_pool_free_page(pool, p);
240         }
241
242         return i;
243 }
244
245 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
246                 size_t nr_to_shrink)
247 {
248         size_t nr_freed;
249
250         kbase_mem_pool_lock(pool);
251         nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
252         kbase_mem_pool_unlock(pool);
253
254         return nr_freed;
255 }
256
257 int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
258                 size_t nr_to_grow)
259 {
260         struct page *p;
261         size_t i;
262
263         for (i = 0; i < nr_to_grow; i++) {
264                 p = kbase_mem_alloc_page(pool->kbdev);
265                 if (!p)
266                         return -ENOMEM;
267                 kbase_mem_pool_add(pool, p);
268         }
269
270         return 0;
271 }
272
273 void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
274 {
275         size_t cur_size;
276
277         cur_size = kbase_mem_pool_size(pool);
278
279         if (new_size > pool->max_size)
280                 new_size = pool->max_size;
281
282         if (new_size < cur_size)
283                 kbase_mem_pool_shrink(pool, cur_size - new_size);
284         else if (new_size > cur_size)
285                 kbase_mem_pool_grow(pool, new_size - cur_size);
286 }
287
288 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
289 {
290         size_t cur_size;
291         size_t nr_to_shrink;
292
293         kbase_mem_pool_lock(pool);
294
295         pool->max_size = max_size;
296
297         cur_size = kbase_mem_pool_size(pool);
298         if (max_size < cur_size) {
299                 nr_to_shrink = cur_size - max_size;
300                 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
301         }
302
303         kbase_mem_pool_unlock(pool);
304 }
305
306
307 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
308                 struct shrink_control *sc)
309 {
310         struct kbase_mem_pool *pool;
311
312         pool = container_of(s, struct kbase_mem_pool, reclaim);
313         pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
314         return kbase_mem_pool_size(pool);
315 }
316
317 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
318                 struct shrink_control *sc)
319 {
320         struct kbase_mem_pool *pool;
321         unsigned long freed;
322
323         pool = container_of(s, struct kbase_mem_pool, reclaim);
324
325         pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
326
327         freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
328
329         pool_dbg(pool, "reclaim freed %ld pages\n", freed);
330
331         return freed;
332 }
333
334 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
335 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
336                 struct shrink_control *sc)
337 {
338         if (sc->nr_to_scan == 0)
339                 return kbase_mem_pool_reclaim_count_objects(s, sc);
340
341         return kbase_mem_pool_reclaim_scan_objects(s, sc);
342 }
343 #endif
344
345 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
346                 size_t max_size,
347                 struct kbase_device *kbdev,
348                 struct kbase_mem_pool *next_pool)
349 {
350         pool->cur_size = 0;
351         pool->max_size = max_size;
352         pool->kbdev = kbdev;
353         pool->next_pool = next_pool;
354
355         spin_lock_init(&pool->pool_lock);
356         INIT_LIST_HEAD(&pool->page_list);
357
358         /* Register shrinker */
359 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
360         pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
361 #else
362         pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
363         pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
364 #endif
365         pool->reclaim.seeks = DEFAULT_SEEKS;
366         /* Kernel versions prior to 3.1 :
367          * struct shrinker does not define batch */
368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
369         pool->reclaim.batch = 0;
370 #endif
371         register_shrinker(&pool->reclaim);
372
373         pool_dbg(pool, "initialized\n");
374
375         return 0;
376 }
377
378 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
379 {
380         struct kbase_mem_pool *next_pool = pool->next_pool;
381         struct page *p;
382         size_t nr_to_spill = 0;
383         LIST_HEAD(spill_list);
384         int i;
385
386         pool_dbg(pool, "terminate()\n");
387
388         unregister_shrinker(&pool->reclaim);
389
390         kbase_mem_pool_lock(pool);
391         pool->max_size = 0;
392
393         if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
394                 /* Spill to next pool (may overspill) */
395                 nr_to_spill = kbase_mem_pool_capacity(next_pool);
396                 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
397
398                 /* Zero pages first without holding the next_pool lock */
399                 for (i = 0; i < nr_to_spill; i++) {
400                         p = kbase_mem_pool_remove_locked(pool);
401                         kbase_mem_pool_zero_page(pool, p);
402                         list_add(&p->lru, &spill_list);
403                 }
404         }
405
406         while (!kbase_mem_pool_is_empty(pool)) {
407                 /* Free remaining pages to kernel */
408                 p = kbase_mem_pool_remove_locked(pool);
409                 kbase_mem_pool_free_page(pool, p);
410         }
411
412         kbase_mem_pool_unlock(pool);
413
414         if (next_pool && nr_to_spill) {
415                 /* Add new page list to next_pool */
416                 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
417
418                 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
419         }
420
421         pool_dbg(pool, "terminated\n");
422 }
423
424 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
425 {
426         struct page *p;
427
428         do {
429                 pool_dbg(pool, "alloc()\n");
430                 p = kbase_mem_pool_remove(pool);
431
432                 if (p)
433                         return p;
434
435                 pool = pool->next_pool;
436         } while (pool);
437
438         return NULL;
439 }
440
441 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
442                 bool dirty)
443 {
444         struct kbase_mem_pool *next_pool = pool->next_pool;
445
446         pool_dbg(pool, "free()\n");
447
448         if (!kbase_mem_pool_is_full(pool)) {
449                 /* Add to our own pool */
450                 if (dirty)
451                         kbase_mem_pool_sync_page(pool, p);
452
453                 kbase_mem_pool_add(pool, p);
454         } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
455                 /* Spill to next pool */
456                 kbase_mem_pool_spill(next_pool, p);
457         } else {
458                 /* Free page */
459                 kbase_mem_pool_free_page(pool, p);
460         }
461 }
462
463 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
464                 phys_addr_t *pages)
465 {
466         struct page *p;
467         size_t nr_from_pool;
468         size_t i;
469         int err = -ENOMEM;
470
471         pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
472
473         /* Get pages from this pool */
474         kbase_mem_pool_lock(pool);
475         nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
476         for (i = 0; i < nr_from_pool; i++) {
477                 p = kbase_mem_pool_remove_locked(pool);
478                 pages[i] = page_to_phys(p);
479         }
480         kbase_mem_pool_unlock(pool);
481
482         if (i != nr_pages && pool->next_pool) {
483                 /* Allocate via next pool */
484                 err = kbase_mem_pool_alloc_pages(pool->next_pool,
485                                 nr_pages - i, pages + i);
486
487                 if (err)
488                         goto err_rollback;
489
490                 i += nr_pages - i;
491         }
492
493         /* Get any remaining pages from kernel */
494         for (; i < nr_pages; i++) {
495                 p = kbase_mem_alloc_page(pool->kbdev);
496                 if (!p)
497                         goto err_rollback;
498                 pages[i] = page_to_phys(p);
499         }
500
501         pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
502
503         return 0;
504
505 err_rollback:
506         kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
507         return err;
508 }
509
510 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
511                 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
512 {
513         struct page *p;
514         size_t nr_to_pool = 0;
515         LIST_HEAD(new_page_list);
516         size_t i;
517
518         if (!nr_pages)
519                 return;
520
521         pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
522                         nr_pages, zero, sync);
523
524         /* Zero/sync pages first without holding the pool lock */
525         for (i = 0; i < nr_pages; i++) {
526                 if (unlikely(!pages[i]))
527                         continue;
528
529                 p = phys_to_page(pages[i]);
530
531                 if (zero)
532                         kbase_mem_pool_zero_page(pool, p);
533                 else if (sync)
534                         kbase_mem_pool_sync_page(pool, p);
535
536                 list_add(&p->lru, &new_page_list);
537                 nr_to_pool++;
538                 pages[i] = 0;
539         }
540
541         /* Add new page list to pool */
542         kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
543
544         pool_dbg(pool, "add_array(%zu) added %zu pages\n",
545                         nr_pages, nr_to_pool);
546 }
547
548 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
549                 phys_addr_t *pages, bool dirty, bool reclaimed)
550 {
551         struct kbase_mem_pool *next_pool = pool->next_pool;
552         struct page *p;
553         size_t nr_to_pool;
554         LIST_HEAD(to_pool_list);
555         size_t i = 0;
556
557         pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
558
559         if (!reclaimed) {
560                 /* Add to this pool */
561                 nr_to_pool = kbase_mem_pool_capacity(pool);
562                 nr_to_pool = min(nr_pages, nr_to_pool);
563
564                 kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
565
566                 i += nr_to_pool;
567
568                 if (i != nr_pages && next_pool) {
569                         /* Spill to next pool (may overspill) */
570                         nr_to_pool = kbase_mem_pool_capacity(next_pool);
571                         nr_to_pool = min(nr_pages - i, nr_to_pool);
572
573                         kbase_mem_pool_add_array(next_pool, nr_to_pool,
574                                         pages + i, true, dirty);
575                         i += nr_to_pool;
576                 }
577         }
578
579         /* Free any remaining pages to kernel */
580         for (; i < nr_pages; i++) {
581                 if (unlikely(!pages[i]))
582                         continue;
583
584                 p = phys_to_page(pages[i]);
585                 if (reclaimed)
586                         zone_page_state_add(-1, page_zone(p),
587                                         NR_SLAB_RECLAIMABLE);
588
589                 kbase_mem_pool_free_page(pool, p);
590                 pages[i] = 0;
591         }
592
593         pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
594 }