ion: Handle the memory mapping correctly on x86
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_page_pool.c
1 /*
2  * drivers/staging/android/ion/ion_mem_pool.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/list.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include "ion_priv.h"
26
27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
28 {
29         struct page *page = alloc_pages(pool->gfp_mask, pool->order);
30
31         if (!page)
32                 return NULL;
33         ion_page_pool_alloc_set_cache_policy(pool, page);
34
35         ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
36                                                 DMA_BIDIRECTIONAL);
37         return page;
38 }
39
40 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
41                                      struct page *page)
42 {
43         ion_page_pool_free_set_cache_policy(pool, page);
44         __free_pages(page, pool->order);
45 }
46
47 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
48 {
49         mutex_lock(&pool->mutex);
50         if (PageHighMem(page)) {
51                 list_add_tail(&page->lru, &pool->high_items);
52                 pool->high_count++;
53         } else {
54                 list_add_tail(&page->lru, &pool->low_items);
55                 pool->low_count++;
56         }
57         mutex_unlock(&pool->mutex);
58         return 0;
59 }
60
61 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
62 {
63         struct page *page;
64
65         if (high) {
66                 BUG_ON(!pool->high_count);
67                 page = list_first_entry(&pool->high_items, struct page, lru);
68                 pool->high_count--;
69         } else {
70                 BUG_ON(!pool->low_count);
71                 page = list_first_entry(&pool->low_items, struct page, lru);
72                 pool->low_count--;
73         }
74
75         list_del(&page->lru);
76         return page;
77 }
78
79 struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
80 {
81         struct page *page = NULL;
82
83         BUG_ON(!pool);
84
85         mutex_lock(&pool->mutex);
86         if (pool->high_count)
87                 page = ion_page_pool_remove(pool, true);
88         else if (pool->low_count)
89                 page = ion_page_pool_remove(pool, false);
90         mutex_unlock(&pool->mutex);
91
92         if (!page)
93                 page = ion_page_pool_alloc_pages(pool);
94
95         return page;
96 }
97
98 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
99 {
100         int ret;
101
102         BUG_ON(pool->order != compound_order(page));
103
104         ret = ion_page_pool_add(pool, page);
105         if (ret)
106                 ion_page_pool_free_pages(pool, page);
107 }
108
109 void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
110 {
111         ion_page_pool_free_pages(pool, page);
112 }
113
114 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
115 {
116         int count = pool->low_count;
117
118         if (high)
119                 count += pool->high_count;
120
121         return count << pool->order;
122 }
123
124 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
125                                 int nr_to_scan)
126 {
127         int freed = 0;
128         bool high;
129
130         if (current_is_kswapd())
131                 high = true;
132         else
133                 high = !!(gfp_mask & __GFP_HIGHMEM);
134
135         if (nr_to_scan == 0)
136                 return ion_page_pool_total(pool, high);
137
138         while (freed < nr_to_scan) {
139                 struct page *page;
140
141                 mutex_lock(&pool->mutex);
142                 if (pool->low_count) {
143                         page = ion_page_pool_remove(pool, false);
144                 } else if (high && pool->high_count) {
145                         page = ion_page_pool_remove(pool, true);
146                 } else {
147                         mutex_unlock(&pool->mutex);
148                         break;
149                 }
150                 mutex_unlock(&pool->mutex);
151                 ion_page_pool_free_pages(pool, page);
152                 freed += (1 << pool->order);
153         }
154
155         return freed;
156 }
157
158 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
159 {
160         struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
161                                              GFP_KERNEL);
162         if (!pool)
163                 return NULL;
164         pool->high_count = 0;
165         pool->low_count = 0;
166         INIT_LIST_HEAD(&pool->low_items);
167         INIT_LIST_HEAD(&pool->high_items);
168         pool->gfp_mask = gfp_mask | __GFP_COMP;
169         pool->order = order;
170         mutex_init(&pool->mutex);
171         plist_node_init(&pool->list, order);
172
173         return pool;
174 }
175
176 void ion_page_pool_destroy(struct ion_page_pool *pool)
177 {
178         kfree(pool);
179 }
180
181 static int __init ion_page_pool_init(void)
182 {
183         return 0;
184 }
185 device_initcall(ion_page_pool_init);