Merge remote branch 'common/android-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30
31 #include <asm/cacheflush.h>
32 #include <asm/outercache.h>
33 #include <asm/pgtable.h>
34
35 #include <mach/iovmm.h>
36 #include <mach/nvmap.h>
37
38 #include "nvmap.h"
39 #include "nvmap_mru.h"
40
41 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
42 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
43 #define GFP_NVMAP               (__GFP_HIGHMEM | __GFP_NOWARN)
44 #else
45 #define GFP_NVMAP               (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
46 #endif
47 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
48  * the kernel (i.e., not a carveout handle) includes its array of pages. to
49  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
50  * the array is allocated using vmalloc. */
51 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE * 2)
52
53 static inline void *altalloc(size_t len)
54 {
55         if (len >= PAGELIST_VMALLOC_MIN)
56                 return vmalloc(len);
57         else
58                 return kmalloc(len, GFP_KERNEL);
59 }
60
61 static inline void altfree(void *ptr, size_t len)
62 {
63         if (!ptr)
64                 return;
65
66         if (len >= PAGELIST_VMALLOC_MIN)
67                 vfree(ptr);
68         else
69                 kfree(ptr);
70 }
71
72 void _nvmap_handle_free(struct nvmap_handle *h)
73 {
74         struct nvmap_device *dev = h->dev;
75         unsigned int i, nr_page;
76
77         if (nvmap_handle_remove(dev, h) != 0)
78                 return;
79
80         if (!h->alloc)
81                 goto out;
82
83         if (!h->heap_pgalloc) {
84                 nvmap_heap_free(h->carveout);
85                 goto out;
86         }
87
88         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
89
90         BUG_ON(h->size & ~PAGE_MASK);
91         BUG_ON(!h->pgalloc.pages);
92
93         nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
94
95         if (h->pgalloc.area)
96                 tegra_iovmm_free_vm(h->pgalloc.area);
97
98         for (i = 0; i < nr_page; i++)
99                 __free_page(h->pgalloc.pages[i]);
100
101         altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
102
103 out:
104         kfree(h);
105 }
106
107 extern void __flush_dcache_page(struct address_space *, struct page *);
108
109 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
110 {
111         struct page *page, *p, *e;
112         unsigned int order;
113         unsigned long base;
114
115         size = PAGE_ALIGN(size);
116         order = get_order(size);
117         page = alloc_pages(gfp, order);
118
119         if (!page)
120                 return NULL;
121
122         split_page(page, order);
123
124         e = page + (1 << order);
125         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
126                 __free_page(p);
127
128         e = page + (size >> PAGE_SHIFT);
129         for (p = page; p < e; p++)
130                 __flush_dcache_page(page_mapping(p), p);
131
132         base = page_to_phys(page);
133         outer_flush_range(base, base + size);
134         return page;
135 }
136
137 static int handle_page_alloc(struct nvmap_client *client,
138                              struct nvmap_handle *h, bool contiguous)
139 {
140         size_t size = PAGE_ALIGN(h->size);
141         unsigned int nr_page = size >> PAGE_SHIFT;
142         pgprot_t prot;
143         unsigned int i = 0;
144         struct page **pages;
145
146         pages = altalloc(nr_page * sizeof(*pages));
147         if (!pages)
148                 return -ENOMEM;
149
150         prot = nvmap_pgprot(h, pgprot_kernel);
151
152 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
153         if (nr_page == 1)
154                 contiguous = true;
155 #endif
156
157         h->pgalloc.area = NULL;
158         if (contiguous) {
159                 struct page *page;
160                 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
161                 if (!page)
162                         goto fail;
163
164                 for (i = 0; i < nr_page; i++)
165                         pages[i] = nth_page(page, i);
166
167         } else {
168                 for (i = 0; i < nr_page; i++) {
169                         pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
170                         if (!pages[i])
171                                 goto fail;
172                 }
173
174 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
175                 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
176                                                         NULL, size, prot);
177                 if (!h->pgalloc.area)
178                         goto fail;
179
180                 h->pgalloc.dirty = true;
181 #endif
182         }
183
184
185         h->size = size;
186         h->pgalloc.pages = pages;
187         h->pgalloc.contig = contiguous;
188         INIT_LIST_HEAD(&h->pgalloc.mru_list);
189         return 0;
190
191 fail:
192         while (i--)
193                 __free_page(pages[i]);
194         altfree(pages, nr_page * sizeof(*pages));
195         return -ENOMEM;
196 }
197
198 static void alloc_handle(struct nvmap_client *client, size_t align,
199                          struct nvmap_handle *h, unsigned int type)
200 {
201         BUG_ON(type & (type - 1));
202
203         if (type & NVMAP_HEAP_CARVEOUT_MASK) {
204                 struct nvmap_heap_block *b;
205                 b = nvmap_carveout_alloc(client, h->size, align,
206                                          type, h->flags);
207                 if (b) {
208                         h->carveout = b;
209                         h->heap_pgalloc = false;
210                         h->alloc = true;
211                         nvmap_carveout_commit_add(client,
212                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
213                                 h->size);
214                 }
215         } else if (type & NVMAP_HEAP_IOVMM) {
216                 size_t reserved = PAGE_ALIGN(h->size);
217                 int commit;
218                 int ret;
219
220                 BUG_ON(align > PAGE_SIZE);
221
222                 /* increment the committed IOVM space prior to allocation
223                  * to avoid race conditions with other threads simultaneously
224                  * allocating. */
225                 commit = atomic_add_return(reserved, &client->iovm_commit);
226
227                 if (commit < client->iovm_limit)
228                         ret = handle_page_alloc(client, h, false);
229                 else
230                         ret = -ENOMEM;
231
232                 if (!ret) {
233                         h->heap_pgalloc = true;
234                         h->alloc = true;
235                 } else {
236                         atomic_sub(reserved, &client->iovm_commit);
237                 }
238
239         } else if (type & NVMAP_HEAP_SYSMEM) {
240
241                 if (handle_page_alloc(client, h, true) == 0) {
242                         BUG_ON(!h->pgalloc.contig);
243                         h->heap_pgalloc = true;
244                         h->alloc = true;
245                 }
246         }
247 }
248
249 /* small allocations will try to allocate from generic OS memory before
250  * any of the limited heaps, to increase the effective memory for graphics
251  * allocations, and to reduce fragmentation of the graphics heaps with
252  * sub-page splinters */
253 static const unsigned int heap_policy_small[] = {
254         NVMAP_HEAP_CARVEOUT_IRAM,
255 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
256         NVMAP_HEAP_SYSMEM,
257 #endif
258         NVMAP_HEAP_CARVEOUT_MASK,
259         NVMAP_HEAP_IOVMM,
260         0,
261 };
262
263 static const unsigned int heap_policy_large[] = {
264         NVMAP_HEAP_CARVEOUT_IRAM,
265         NVMAP_HEAP_IOVMM,
266         NVMAP_HEAP_CARVEOUT_MASK,
267 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
268         NVMAP_HEAP_SYSMEM,
269 #endif
270         0,
271 };
272
273 int nvmap_alloc_handle_id(struct nvmap_client *client,
274                           unsigned long id, unsigned int heap_mask,
275                           size_t align, unsigned int flags)
276 {
277         struct nvmap_handle *h = NULL;
278         const unsigned int *alloc_policy;
279         int nr_page;
280         int err = -ENOMEM;
281
282         align = max_t(size_t, align, L1_CACHE_BYTES);
283
284         /* can't do greater than page size alignment with page alloc */
285         if (align > PAGE_SIZE)
286                 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
287
288         h = nvmap_get_handle_id(client, id);
289
290         if (!h)
291                 return -EINVAL;
292
293         if (h->alloc)
294                 goto out;
295
296         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
297         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
298         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
299
300         /* secure allocations can only be served from secure heaps */
301         if (h->secure)
302                 heap_mask &= NVMAP_SECURE_HEAPS;
303
304         if (!heap_mask) {
305                 err = -EINVAL;
306                 goto out;
307         }
308
309         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
310
311         while (!h->alloc && *alloc_policy) {
312                 unsigned int heap_type;
313
314                 heap_type = *alloc_policy++;
315                 heap_type &= heap_mask;
316
317                 if (!heap_type)
318                         continue;
319
320                 heap_mask &= ~heap_type;
321
322                 while (heap_type && !h->alloc) {
323                         unsigned int heap;
324
325                         /* iterate possible heaps MSB-to-LSB, since higher-
326                          * priority carveouts will have higher usage masks */
327                         heap = 1 << __fls(heap_type);
328                         alloc_handle(client, align, h, heap);
329                         heap_type &= ~heap;
330                 }
331         }
332
333 out:
334         err = (h->alloc) ? 0 : err;
335         nvmap_handle_put(h);
336         return err;
337 }
338
339 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
340 {
341         struct nvmap_handle_ref *ref;
342         struct nvmap_handle *h;
343         int pins;
344
345         nvmap_ref_lock(client);
346
347         ref = _nvmap_validate_id_locked(client, id);
348         if (!ref) {
349                 nvmap_ref_unlock(client);
350                 return;
351         }
352
353         BUG_ON(!ref->handle);
354         h = ref->handle;
355
356         if (atomic_dec_return(&ref->dupes)) {
357                 nvmap_ref_unlock(client);
358                 goto out;
359         }
360
361         smp_rmb();
362         pins = atomic_read(&ref->pin);
363         rb_erase(&ref->node, &client->handle_refs);
364
365         if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
366                 atomic_sub(h->size, &client->iovm_commit);
367
368         if (h->alloc && !h->heap_pgalloc)
369                 nvmap_carveout_commit_subtract(client,
370                 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
371                 h->size);
372
373         nvmap_ref_unlock(client);
374
375         if (pins)
376                 nvmap_err(client, "%s freeing pinned handle %p\n",
377                           current->group_leader->comm, h);
378
379         while (pins--)
380                 nvmap_unpin_handles(client, &ref->handle, 1);
381
382         if (h->owner == client)
383                 h->owner = NULL;
384
385         kfree(ref);
386
387 out:
388         BUG_ON(!atomic_read(&h->ref));
389         nvmap_handle_put(h);
390 }
391
392 static void add_handle_ref(struct nvmap_client *client,
393                            struct nvmap_handle_ref *ref)
394 {
395         struct rb_node **p, *parent = NULL;
396
397         nvmap_ref_lock(client);
398         p = &client->handle_refs.rb_node;
399         while (*p) {
400                 struct nvmap_handle_ref *node;
401                 parent = *p;
402                 node = rb_entry(parent, struct nvmap_handle_ref, node);
403                 if (ref->handle > node->handle)
404                         p = &parent->rb_right;
405                 else
406                         p = &parent->rb_left;
407         }
408         rb_link_node(&ref->node, parent, p);
409         rb_insert_color(&ref->node, &client->handle_refs);
410         nvmap_ref_unlock(client);
411 }
412
413 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
414                                              size_t size)
415 {
416         struct nvmap_handle *h;
417         struct nvmap_handle_ref *ref = NULL;
418
419         if (!size)
420                 return ERR_PTR(-EINVAL);
421
422         h = kzalloc(sizeof(*h), GFP_KERNEL);
423         if (!h)
424                 return ERR_PTR(-ENOMEM);
425
426         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
427         if (!ref) {
428                 kfree(h);
429                 return ERR_PTR(-ENOMEM);
430         }
431
432         atomic_set(&h->ref, 1);
433         atomic_set(&h->pin, 0);
434         h->owner = client;
435         h->dev = client->dev;
436         BUG_ON(!h->owner);
437         h->size = h->orig_size = size;
438         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
439         mutex_init(&h->lock);
440
441         nvmap_handle_add(client->dev, h);
442
443         atomic_set(&ref->dupes, 1);
444         ref->handle = h;
445         atomic_set(&ref->pin, 0);
446         add_handle_ref(client, ref);
447         return ref;
448 }
449
450 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
451                                                    unsigned long id)
452 {
453         struct nvmap_handle_ref *ref = NULL;
454         struct nvmap_handle *h = NULL;
455
456         BUG_ON(!client || client->dev != nvmap_dev);
457         /* on success, the reference count for the handle should be
458          * incremented, so the success paths will not call nvmap_handle_put */
459         h = nvmap_validate_get(client, id);
460
461         if (!h) {
462                 nvmap_debug(client, "%s duplicate handle failed\n",
463                             current->group_leader->comm);
464                 return ERR_PTR(-EPERM);
465         }
466
467         if (!h->alloc) {
468                 nvmap_err(client, "%s duplicating unallocated handle\n",
469                           current->group_leader->comm);
470                 nvmap_handle_put(h);
471                 return ERR_PTR(-EINVAL);
472         }
473
474         nvmap_ref_lock(client);
475         ref = _nvmap_validate_id_locked(client, (unsigned long)h);
476
477         if (ref) {
478                 /* handle already duplicated in client; just increment
479                  * the reference count rather than re-duplicating it */
480                 atomic_inc(&ref->dupes);
481                 nvmap_ref_unlock(client);
482                 return ref;
483         }
484
485         nvmap_ref_unlock(client);
486
487         /* verify that adding this handle to the process' access list
488          * won't exceed the IOVM limit */
489         if (h->heap_pgalloc && !h->pgalloc.contig && !client->super) {
490                 int oc;
491                 oc = atomic_add_return(h->size, &client->iovm_commit);
492                 if (oc > client->iovm_limit) {
493                         atomic_sub(h->size, &client->iovm_commit);
494                         nvmap_handle_put(h);
495                         nvmap_err(client, "duplicating %p in %s over-commits"
496                                   " IOVMM space\n", (void *)id,
497                                   current->group_leader->comm);
498                         return ERR_PTR(-ENOMEM);
499                 }
500         }
501
502         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
503         if (!ref) {
504                 nvmap_handle_put(h);
505                 return ERR_PTR(-ENOMEM);
506         }
507
508         if (!h->heap_pgalloc)
509                 nvmap_carveout_commit_add(client,
510                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
511                         h->size);
512
513         atomic_set(&ref->dupes, 1);
514         ref->handle = h;
515         atomic_set(&ref->pin, 0);
516         add_handle_ref(client, ref);
517         return ref;
518 }