153cd4efac49f2b6f70c8960eeb202913647cd5e
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard_for_linux / mali_kbase_mem_pool.c
1 /*
2  *
3  * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18 #include <mali_kbase.h>
19 #include <linux/mm.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 #include <linux/spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/atomic.h>
25 #include <linux/version.h>
26
27 /* This function is only provided for backwards compatibility with kernels
28  * which use the old carveout allocator.
29  *
30  * The forward declaration is to keep sparse happy.
31  */
32 int __init kbase_carveout_mem_reserve(
33                 phys_addr_t size);
34 int __init kbase_carveout_mem_reserve(phys_addr_t size)
35 {
36         return 0;
37 }
38
39 #define pool_dbg(pool, format, ...) \
40         dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
41                 (pool->next_pool) ? "kctx" : "kbdev",   \
42                 kbase_mem_pool_size(pool),      \
43                 kbase_mem_pool_max_size(pool),  \
44                 ##__VA_ARGS__)
45
46 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
47 {
48         spin_lock(&pool->pool_lock);
49 }
50
51 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
52 {
53         spin_unlock(&pool->pool_lock);
54 }
55
56 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
57 {
58         ssize_t max_size = kbase_mem_pool_max_size(pool);
59         ssize_t cur_size = kbase_mem_pool_size(pool);
60
61         return max(max_size - cur_size, (ssize_t)0);
62 }
63
64 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
65 {
66         return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
67 }
68
69 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
70 {
71         return kbase_mem_pool_size(pool) == 0;
72 }
73
74 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
75                 struct page *p)
76 {
77         lockdep_assert_held(&pool->pool_lock);
78
79         list_add(&p->lru, &pool->page_list);
80         pool->cur_size++;
81
82         zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
83
84         pool_dbg(pool, "added page\n");
85 }
86
87 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
88 {
89         kbase_mem_pool_lock(pool);
90         kbase_mem_pool_add_locked(pool, p);
91         kbase_mem_pool_unlock(pool);
92 }
93
94 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
95                 struct list_head *page_list, size_t nr_pages)
96 {
97         struct page *p;
98
99         lockdep_assert_held(&pool->pool_lock);
100
101         list_for_each_entry(p, page_list, lru) {
102                 zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE);
103         }
104
105         list_splice(page_list, &pool->page_list);
106         pool->cur_size += nr_pages;
107
108         pool_dbg(pool, "added %zu pages\n", nr_pages);
109 }
110
111 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
112                 struct list_head *page_list, size_t nr_pages)
113 {
114         kbase_mem_pool_lock(pool);
115         kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
116         kbase_mem_pool_unlock(pool);
117 }
118
119 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
120 {
121         struct page *p;
122
123         lockdep_assert_held(&pool->pool_lock);
124
125         if (kbase_mem_pool_is_empty(pool))
126                 return NULL;
127
128         p = list_first_entry(&pool->page_list, struct page, lru);
129         list_del_init(&p->lru);
130         pool->cur_size--;
131
132         zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE);
133
134         pool_dbg(pool, "removed page\n");
135
136         return p;
137 }
138
139 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
140 {
141         struct page *p;
142
143         kbase_mem_pool_lock(pool);
144         p = kbase_mem_pool_remove_locked(pool);
145         kbase_mem_pool_unlock(pool);
146
147         return p;
148 }
149
150 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
151                 struct page *p)
152 {
153         struct device *dev = pool->kbdev->dev;
154
155         dma_sync_single_for_device(dev, kbase_dma_addr(p),
156                         PAGE_SIZE, DMA_BIDIRECTIONAL);
157 }
158
159 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
160                 struct page *p)
161 {
162         clear_highpage(p);
163         kbase_mem_pool_sync_page(pool, p);
164 }
165
166 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
167                 struct page *p)
168 {
169         /* Zero page before spilling */
170         kbase_mem_pool_zero_page(next_pool, p);
171
172         kbase_mem_pool_add(next_pool, p);
173 }
174
175 static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool)
176 {
177         struct page *p;
178         gfp_t gfp;
179         struct device *dev = pool->kbdev->dev;
180         dma_addr_t dma_addr;
181
182 #if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
183         LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
184         /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
185         gfp = GFP_USER | __GFP_ZERO;
186 #else
187         gfp = GFP_HIGHUSER | __GFP_ZERO;
188 #endif
189
190         if (current->flags & PF_KTHREAD) {
191                 /* Don't trigger OOM killer from kernel threads, e.g. when
192                  * growing memory on GPU page fault */
193                 gfp |= __GFP_NORETRY;
194         }
195
196         p = alloc_page(gfp);
197         if (!p)
198                 return NULL;
199
200         dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
201         if (dma_mapping_error(dev, dma_addr)) {
202                 __free_page(p);
203                 return NULL;
204         }
205
206         WARN_ON(dma_addr != page_to_phys(p));
207
208         kbase_set_dma_addr(p, dma_addr);
209
210         pool_dbg(pool, "alloced page from kernel\n");
211
212         return p;
213 }
214
215 static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
216                 struct page *p)
217 {
218         struct device *dev = pool->kbdev->dev;
219         dma_addr_t dma_addr = kbase_dma_addr(p);
220
221         dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
222         kbase_clear_dma_addr(p);
223         __free_page(p);
224
225         pool_dbg(pool, "freed page to kernel\n");
226 }
227
228 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
229                 size_t nr_to_shrink)
230 {
231         struct page *p;
232         size_t i;
233
234         lockdep_assert_held(&pool->pool_lock);
235
236         for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
237                 p = kbase_mem_pool_remove_locked(pool);
238                 kbase_mem_pool_free_page(pool, p);
239         }
240
241         return i;
242 }
243
244 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
245                 size_t nr_to_shrink)
246 {
247         size_t nr_freed;
248
249         kbase_mem_pool_lock(pool);
250         nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
251         kbase_mem_pool_unlock(pool);
252
253         return nr_freed;
254 }
255
256 static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
257                 size_t nr_to_grow)
258 {
259         struct page *p;
260         size_t i;
261
262         for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
263                 p = kbase_mem_pool_alloc_page(pool);
264                 kbase_mem_pool_add(pool, p);
265         }
266
267         return i;
268 }
269
270 size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
271 {
272         size_t cur_size;
273
274         cur_size = kbase_mem_pool_size(pool);
275
276         if (new_size < cur_size)
277                 kbase_mem_pool_shrink(pool, cur_size - new_size);
278         else if (new_size > cur_size)
279                 kbase_mem_pool_grow(pool, new_size - cur_size);
280
281         cur_size = kbase_mem_pool_size(pool);
282
283         return cur_size;
284 }
285
286 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
287 {
288         size_t cur_size;
289         size_t nr_to_shrink;
290
291         kbase_mem_pool_lock(pool);
292
293         pool->max_size = max_size;
294
295         cur_size = kbase_mem_pool_size(pool);
296         if (max_size < cur_size) {
297                 nr_to_shrink = cur_size - max_size;
298                 kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
299         }
300
301         kbase_mem_pool_unlock(pool);
302 }
303
304
305 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
306                 struct shrink_control *sc)
307 {
308         struct kbase_mem_pool *pool;
309
310         pool = container_of(s, struct kbase_mem_pool, reclaim);
311         pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
312         return kbase_mem_pool_size(pool);
313 }
314
315 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
316                 struct shrink_control *sc)
317 {
318         struct kbase_mem_pool *pool;
319         unsigned long freed;
320
321         pool = container_of(s, struct kbase_mem_pool, reclaim);
322
323         pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
324
325         freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
326
327         pool_dbg(pool, "reclaim freed %ld pages\n", freed);
328
329         return freed;
330 }
331
332 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
333 static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
334                 struct shrink_control *sc)
335 {
336         if (sc->nr_to_scan == 0)
337                 return kbase_mem_pool_reclaim_count_objects(s, sc);
338
339         return kbase_mem_pool_reclaim_scan_objects(s, sc);
340 }
341 #endif
342
343 int kbase_mem_pool_init(struct kbase_mem_pool *pool,
344                 size_t max_size,
345                 struct kbase_device *kbdev,
346                 struct kbase_mem_pool *next_pool)
347 {
348         pool->cur_size = 0;
349         pool->max_size = max_size;
350         pool->kbdev = kbdev;
351         pool->next_pool = next_pool;
352
353         spin_lock_init(&pool->pool_lock);
354         INIT_LIST_HEAD(&pool->page_list);
355
356         /* Register shrinker */
357 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
358         pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
359 #else
360         pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
361         pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
362 #endif
363         pool->reclaim.seeks = DEFAULT_SEEKS;
364         /* Kernel versions prior to 3.1 :
365          * struct shrinker does not define batch */
366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
367         pool->reclaim.batch = 0;
368 #endif
369         register_shrinker(&pool->reclaim);
370
371         pool_dbg(pool, "initialized\n");
372
373         return 0;
374 }
375
376 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
377 {
378         struct kbase_mem_pool *next_pool = pool->next_pool;
379         struct page *p;
380         size_t nr_to_spill = 0;
381         LIST_HEAD(spill_list);
382         int i;
383
384         pool_dbg(pool, "terminate()\n");
385
386         unregister_shrinker(&pool->reclaim);
387
388         kbase_mem_pool_lock(pool);
389         pool->max_size = 0;
390
391         if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
392                 /* Spill to next pool (may overspill) */
393                 nr_to_spill = kbase_mem_pool_capacity(next_pool);
394                 nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
395
396                 /* Zero pages first without holding the next_pool lock */
397                 for (i = 0; i < nr_to_spill; i++) {
398                         p = kbase_mem_pool_remove_locked(pool);
399                         kbase_mem_pool_zero_page(pool, p);
400                         list_add(&p->lru, &spill_list);
401                 }
402         }
403
404         while (!kbase_mem_pool_is_empty(pool)) {
405                 /* Free remaining pages to kernel */
406                 p = kbase_mem_pool_remove_locked(pool);
407                 kbase_mem_pool_free_page(pool, p);
408         }
409
410         kbase_mem_pool_unlock(pool);
411
412         if (next_pool && nr_to_spill) {
413                 /* Add new page list to next_pool */
414                 kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
415
416                 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
417         }
418
419         pool_dbg(pool, "terminated\n");
420 }
421
422 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
423 {
424         struct page *p;
425
426         pool_dbg(pool, "alloc()\n");
427
428         p = kbase_mem_pool_remove(pool);
429
430         if (!p && pool->next_pool) {
431                 /* Allocate via next pool */
432                 return kbase_mem_pool_alloc(pool->next_pool);
433         }
434
435         if (!p) {
436                 /* Get page from kernel */
437                 p = kbase_mem_pool_alloc_page(pool);
438         }
439
440         return p;
441 }
442
443 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
444                 bool dirty)
445 {
446         struct kbase_mem_pool *next_pool = pool->next_pool;
447
448         pool_dbg(pool, "free()\n");
449
450         if (!kbase_mem_pool_is_full(pool)) {
451                 /* Add to our own pool */
452                 if (dirty)
453                         kbase_mem_pool_sync_page(pool, p);
454
455                 kbase_mem_pool_add(pool, p);
456         } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
457                 /* Spill to next pool */
458                 kbase_mem_pool_spill(next_pool, p);
459         } else {
460                 /* Free page */
461                 kbase_mem_pool_free_page(pool, p);
462         }
463 }
464
465 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
466                 phys_addr_t *pages)
467 {
468         struct page *p;
469         size_t nr_from_pool;
470         size_t i;
471         int err = -ENOMEM;
472
473         pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
474
475         /* Get pages from this pool */
476         kbase_mem_pool_lock(pool);
477         nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
478         for (i = 0; i < nr_from_pool; i++) {
479                 p = kbase_mem_pool_remove_locked(pool);
480                 pages[i] = page_to_phys(p);
481         }
482         kbase_mem_pool_unlock(pool);
483
484         if (i != nr_pages && pool->next_pool) {
485                 /* Allocate via next pool */
486                 err = kbase_mem_pool_alloc_pages(pool->next_pool,
487                                 nr_pages - i, pages + i);
488
489                 if (err)
490                         goto err_rollback;
491
492                 i += nr_pages - i;
493         }
494
495         /* Get any remaining pages from kernel */
496         for (; i < nr_pages; i++) {
497                 p = kbase_mem_pool_alloc_page(pool);
498                 if (!p)
499                         goto err_rollback;
500                 pages[i] = page_to_phys(p);
501         }
502
503         pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
504
505         return 0;
506
507 err_rollback:
508         kbase_mem_pool_free_pages(pool, i, pages, false);
509         return err;
510 }
511
512 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
513                 size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
514 {
515         struct page *p;
516         size_t nr_to_pool = 0;
517         LIST_HEAD(new_page_list);
518         size_t i;
519
520         if (!nr_pages)
521                 return;
522
523         pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
524                         nr_pages, zero, sync);
525
526         /* Zero/sync pages first without holding the pool lock */
527         for (i = 0; i < nr_pages; i++) {
528                 if (unlikely(!pages[i]))
529                         continue;
530
531                 p = phys_to_page(pages[i]);
532
533                 if (zero)
534                         kbase_mem_pool_zero_page(pool, p);
535                 else if (sync)
536                         kbase_mem_pool_sync_page(pool, p);
537
538                 list_add(&p->lru, &new_page_list);
539                 nr_to_pool++;
540                 pages[i] = 0;
541         }
542
543         /* Add new page list to pool */
544         kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
545
546         pool_dbg(pool, "add_array(%zu) added %zu pages\n",
547                         nr_pages, nr_to_pool);
548 }
549
550 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
551                 phys_addr_t *pages, bool dirty)
552 {
553         struct kbase_mem_pool *next_pool = pool->next_pool;
554         struct page *p;
555         size_t nr_to_pool;
556         LIST_HEAD(to_pool_list);
557         size_t i = 0;
558
559         pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
560
561         /* Add to this pool */
562         nr_to_pool = kbase_mem_pool_capacity(pool);
563         nr_to_pool = min(nr_pages, nr_to_pool);
564
565         kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
566
567         i += nr_to_pool;
568
569         if (i != nr_pages && next_pool) {
570                 /* Spill to next pool (may overspill) */
571                 nr_to_pool = kbase_mem_pool_capacity(next_pool);
572                 nr_to_pool = min(nr_pages - i, nr_to_pool);
573
574                 kbase_mem_pool_add_array(next_pool, nr_to_pool, pages + i,
575                                 true, dirty);
576                 i += nr_to_pool;
577         }
578
579         /* Free any remaining pages to kernel */
580         for (; i < nr_pages; i++) {
581                 if (unlikely(!pages[i]))
582                         continue;
583
584                 p = phys_to_page(pages[i]);
585                 kbase_mem_pool_free_page(pool, p);
586                 pages[i] = 0;
587         }
588
589         pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
590 }