MALI: utgard: upgrade DDK to r6p1-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / mali400 / ump / linux / ump_kernel_memory_backend_dedicated.c
1 /*
2  * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
13
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
18 #endif
19
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <asm/atomic.h>
23 #include <linux/vmalloc.h>
24 #include "ump_kernel_common.h"
25 #include "ump_kernel_memory_backend.h"
26
27
28
29 #define UMP_BLOCK_SIZE (256UL * 1024UL)  /* 256kB, remember to keep the ()s */
30
31
32
33 typedef struct block_info {
34         struct block_info *next;
35 } block_info;
36
37
38
39 typedef struct block_allocator {
40         struct semaphore mutex;
41         block_info *all_blocks;
42         block_info *first_free;
43         u32 base;
44         u32 num_blocks;
45         u32 num_free;
46 } block_allocator;
47
48
49 static void block_allocator_shutdown(ump_memory_backend *backend);
50 static int block_allocator_allocate(void *ctx, ump_dd_mem *mem);
51 static void block_allocator_release(void *ctx, ump_dd_mem *handle);
52 static inline u32 get_phys(block_allocator *allocator, block_info *block);
53 static u32 block_allocator_stat(struct ump_memory_backend *backend);
54
55
56
57 /*
58  * Create dedicated memory backend
59  */
60 ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size)
61 {
62         ump_memory_backend *backend;
63         block_allocator *allocator;
64         u32 usable_size;
65         u32 num_blocks;
66
67         usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
68         num_blocks = usable_size / UMP_BLOCK_SIZE;
69
70         if (0 == usable_size) {
71                 DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
72                 return NULL;
73         }
74
75         DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
76         DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
77
78         backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
79         if (NULL != backend) {
80                 allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
81                 if (NULL != allocator) {
82                         allocator->all_blocks = kmalloc(sizeof(block_info) * num_blocks, GFP_KERNEL);
83                         if (NULL != allocator->all_blocks) {
84                                 int i;
85
86                                 allocator->first_free = NULL;
87                                 allocator->num_blocks = num_blocks;
88                                 allocator->num_free = num_blocks;
89                                 allocator->base = base_address;
90                                 sema_init(&allocator->mutex, 1);
91
92                                 for (i = 0; i < num_blocks; i++) {
93                                         allocator->all_blocks[i].next = allocator->first_free;
94                                         allocator->first_free = &allocator->all_blocks[i];
95                                 }
96
97                                 backend->ctx = allocator;
98                                 backend->allocate = block_allocator_allocate;
99                                 backend->release = block_allocator_release;
100                                 backend->shutdown = block_allocator_shutdown;
101                                 backend->stat = block_allocator_stat;
102                                 backend->pre_allocate_physical_check = NULL;
103                                 backend->adjust_to_mali_phys = NULL;
104
105                                 return backend;
106                         }
107                         kfree(allocator);
108                 }
109                 kfree(backend);
110         }
111
112         return NULL;
113 }
114
115
116
117 /*
118  * Destroy specified dedicated memory backend
119  */
120 static void block_allocator_shutdown(ump_memory_backend *backend)
121 {
122         block_allocator *allocator;
123
124         BUG_ON(!backend);
125         BUG_ON(!backend->ctx);
126
127         allocator = (block_allocator *)backend->ctx;
128
129         DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
130
131         kfree(allocator->all_blocks);
132         kfree(allocator);
133         kfree(backend);
134 }
135
136
137
138 static int block_allocator_allocate(void *ctx, ump_dd_mem *mem)
139 {
140         block_allocator *allocator;
141         u32 left;
142         block_info *last_allocated = NULL;
143         int i = 0;
144
145         BUG_ON(!ctx);
146         BUG_ON(!mem);
147
148         allocator = (block_allocator *)ctx;
149         left = mem->size_bytes;
150
151         BUG_ON(!left);
152         BUG_ON(!&allocator->mutex);
153
154         mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
155         mem->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
156         if (NULL == mem->block_array) {
157                 MSG_ERR(("Failed to allocate block array\n"));
158                 return 0;
159         }
160
161         if (down_interruptible(&allocator->mutex)) {
162                 MSG_ERR(("Could not get mutex to do block_allocate\n"));
163                 return 0;
164         }
165
166         mem->size_bytes = 0;
167
168         while ((left > 0) && (allocator->first_free)) {
169                 block_info *block;
170
171                 block = allocator->first_free;
172                 allocator->first_free = allocator->first_free->next;
173                 block->next = last_allocated;
174                 last_allocated = block;
175                 allocator->num_free--;
176
177                 mem->block_array[i].addr = get_phys(allocator, block);
178                 mem->block_array[i].size = UMP_BLOCK_SIZE;
179                 mem->size_bytes += UMP_BLOCK_SIZE;
180
181                 i++;
182
183                 if (left < UMP_BLOCK_SIZE) left = 0;
184                 else left -= UMP_BLOCK_SIZE;
185         }
186
187         if (left) {
188                 block_info *block;
189                 /* release all memory back to the pool */
190                 while (last_allocated) {
191                         block = last_allocated->next;
192                         last_allocated->next = allocator->first_free;
193                         allocator->first_free = last_allocated;
194                         last_allocated = block;
195                         allocator->num_free++;
196                 }
197
198                 vfree(mem->block_array);
199                 mem->backend_info = NULL;
200                 mem->block_array = NULL;
201
202                 DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
203                 up(&allocator->mutex);
204
205                 return 0;
206         }
207
208         mem->backend_info = last_allocated;
209
210         up(&allocator->mutex);
211         mem->is_cached = 0;
212
213         return 1;
214 }
215
216
217
218 static void block_allocator_release(void *ctx, ump_dd_mem *handle)
219 {
220         block_allocator *allocator;
221         block_info *block, * next;
222
223         BUG_ON(!ctx);
224         BUG_ON(!handle);
225
226         allocator = (block_allocator *)ctx;
227         block = (block_info *)handle->backend_info;
228         BUG_ON(!block);
229
230         if (down_interruptible(&allocator->mutex)) {
231                 MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
232                 return;
233         }
234
235         while (block) {
236                 next = block->next;
237
238                 BUG_ON((block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
239
240                 block->next = allocator->first_free;
241                 allocator->first_free = block;
242                 allocator->num_free++;
243
244                 block = next;
245         }
246         DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
247         up(&allocator->mutex);
248
249         vfree(handle->block_array);
250         handle->block_array = NULL;
251 }
252
253
254
255 /*
256  * Helper function for calculating the physical base adderss of a memory block
257  */
258 static inline u32 get_phys(block_allocator *allocator, block_info *block)
259 {
260         return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
261 }
262
263 static u32 block_allocator_stat(struct ump_memory_backend *backend)
264 {
265         block_allocator *allocator;
266         BUG_ON(!backend);
267         allocator = (block_allocator *)backend->ctx;
268         BUG_ON(!allocator);
269
270         return (allocator->num_blocks - allocator->num_free) * UMP_BLOCK_SIZE;
271 }