2 * drivers/video/tegra/nvmap_handle.c
4 * Handle allocation and freeing routines for nvmap
6 * Copyright (c) 2009-2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
27 #include <linux/rbtree.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
31 #include <asm/cacheflush.h>
32 #include <asm/outercache.h>
33 #include <asm/pgtable.h>
35 #include <mach/iovmm.h>
36 #include <mach/nvmap.h>
39 #include "nvmap_mru.h"
41 #define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
42 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
43 #define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
45 #define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
47 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
48 * the kernel (i.e., not a carveout handle) includes its array of pages. to
49 * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
50 * the array is allocated using vmalloc. */
51 #define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
53 static inline void *altalloc(size_t len)
55 if (len >= PAGELIST_VMALLOC_MIN)
58 return kmalloc(len, GFP_KERNEL);
61 static inline void altfree(void *ptr, size_t len)
66 if (len >= PAGELIST_VMALLOC_MIN)
72 void _nvmap_handle_free(struct nvmap_handle *h)
74 struct nvmap_device *dev = h->dev;
75 unsigned int i, nr_page;
77 if (nvmap_handle_remove(dev, h) != 0)
83 if (!h->heap_pgalloc) {
84 nvmap_heap_free(h->carveout);
88 nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
90 BUG_ON(h->size & ~PAGE_MASK);
91 BUG_ON(!h->pgalloc.pages);
93 nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
96 tegra_iovmm_free_vm(h->pgalloc.area);
98 for (i = 0; i < nr_page; i++)
99 __free_page(h->pgalloc.pages[i]);
101 altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
107 extern void __flush_dcache_page(struct address_space *, struct page *);
109 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
111 struct page *page, *p, *e;
115 size = PAGE_ALIGN(size);
116 order = get_order(size);
117 page = alloc_pages(gfp, order);
122 split_page(page, order);
124 e = page + (1 << order);
125 for (p = page + (size >> PAGE_SHIFT); p < e; p++)
128 e = page + (size >> PAGE_SHIFT);
129 for (p = page; p < e; p++)
130 __flush_dcache_page(page_mapping(p), p);
132 base = page_to_phys(page);
133 outer_flush_range(base, base + size);
137 static int handle_page_alloc(struct nvmap_client *client,
138 struct nvmap_handle *h, bool contiguous)
140 size_t size = PAGE_ALIGN(h->size);
141 unsigned int nr_page = size >> PAGE_SHIFT;
146 pages = altalloc(nr_page * sizeof(*pages));
150 prot = nvmap_pgprot(h, pgprot_kernel);
152 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
157 h->pgalloc.area = NULL;
160 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
164 for (i = 0; i < nr_page; i++)
165 pages[i] = nth_page(page, i);
168 for (i = 0; i < nr_page; i++) {
169 pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
174 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
175 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
177 if (!h->pgalloc.area)
180 h->pgalloc.dirty = true;
186 h->pgalloc.pages = pages;
187 h->pgalloc.contig = contiguous;
188 INIT_LIST_HEAD(&h->pgalloc.mru_list);
193 __free_page(pages[i]);
194 altfree(pages, nr_page * sizeof(*pages));
198 static void alloc_handle(struct nvmap_client *client, size_t align,
199 struct nvmap_handle *h, unsigned int type)
201 BUG_ON(type & (type - 1));
203 if (type & NVMAP_HEAP_CARVEOUT_MASK) {
204 struct nvmap_heap_block *b;
205 b = nvmap_carveout_alloc(client, h->size, align,
209 h->heap_pgalloc = false;
211 nvmap_carveout_commit_add(client,
212 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
215 } else if (type & NVMAP_HEAP_IOVMM) {
216 size_t reserved = PAGE_ALIGN(h->size);
220 BUG_ON(align > PAGE_SIZE);
222 /* increment the committed IOVM space prior to allocation
223 * to avoid race conditions with other threads simultaneously
225 commit = atomic_add_return(reserved, &client->iovm_commit);
227 if (commit < client->iovm_limit)
228 ret = handle_page_alloc(client, h, false);
233 h->heap_pgalloc = true;
236 atomic_sub(reserved, &client->iovm_commit);
239 } else if (type & NVMAP_HEAP_SYSMEM) {
241 if (handle_page_alloc(client, h, true) == 0) {
242 BUG_ON(!h->pgalloc.contig);
243 h->heap_pgalloc = true;
249 /* small allocations will try to allocate from generic OS memory before
250 * any of the limited heaps, to increase the effective memory for graphics
251 * allocations, and to reduce fragmentation of the graphics heaps with
252 * sub-page splinters */
253 static const unsigned int heap_policy_small[] = {
254 NVMAP_HEAP_CARVEOUT_IRAM,
255 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
258 NVMAP_HEAP_CARVEOUT_MASK,
263 static const unsigned int heap_policy_large[] = {
264 NVMAP_HEAP_CARVEOUT_IRAM,
266 NVMAP_HEAP_CARVEOUT_MASK,
267 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
273 int nvmap_alloc_handle_id(struct nvmap_client *client,
274 unsigned long id, unsigned int heap_mask,
275 size_t align, unsigned int flags)
277 struct nvmap_handle *h = NULL;
278 const unsigned int *alloc_policy;
282 align = max_t(size_t, align, L1_CACHE_BYTES);
284 /* can't do greater than page size alignment with page alloc */
285 if (align > PAGE_SIZE)
286 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
288 h = nvmap_get_handle_id(client, id);
296 nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
297 h->secure = !!(flags & NVMAP_HANDLE_SECURE);
298 h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
300 /* secure allocations can only be served from secure heaps */
302 heap_mask &= NVMAP_SECURE_HEAPS;
309 alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
311 while (!h->alloc && *alloc_policy) {
312 unsigned int heap_type;
314 heap_type = *alloc_policy++;
315 heap_type &= heap_mask;
320 heap_mask &= ~heap_type;
322 while (heap_type && !h->alloc) {
325 /* iterate possible heaps MSB-to-LSB, since higher-
326 * priority carveouts will have higher usage masks */
327 heap = 1 << __fls(heap_type);
328 alloc_handle(client, align, h, heap);
334 err = (h->alloc) ? 0 : err;
339 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
341 struct nvmap_handle_ref *ref;
342 struct nvmap_handle *h;
345 nvmap_ref_lock(client);
347 ref = _nvmap_validate_id_locked(client, id);
349 nvmap_ref_unlock(client);
353 BUG_ON(!ref->handle);
356 if (atomic_dec_return(&ref->dupes)) {
357 nvmap_ref_unlock(client);
362 pins = atomic_read(&ref->pin);
363 rb_erase(&ref->node, &client->handle_refs);
365 if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
366 atomic_sub(h->size, &client->iovm_commit);
368 if (h->alloc && !h->heap_pgalloc)
369 nvmap_carveout_commit_subtract(client,
370 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
373 nvmap_ref_unlock(client);
376 nvmap_err(client, "%s freeing pinned handle %p\n",
377 current->group_leader->comm, h);
380 nvmap_unpin_handles(client, &ref->handle, 1);
382 if (h->owner == client)
388 BUG_ON(!atomic_read(&h->ref));
392 static void add_handle_ref(struct nvmap_client *client,
393 struct nvmap_handle_ref *ref)
395 struct rb_node **p, *parent = NULL;
397 nvmap_ref_lock(client);
398 p = &client->handle_refs.rb_node;
400 struct nvmap_handle_ref *node;
402 node = rb_entry(parent, struct nvmap_handle_ref, node);
403 if (ref->handle > node->handle)
404 p = &parent->rb_right;
406 p = &parent->rb_left;
408 rb_link_node(&ref->node, parent, p);
409 rb_insert_color(&ref->node, &client->handle_refs);
410 nvmap_ref_unlock(client);
413 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
416 struct nvmap_handle *h;
417 struct nvmap_handle_ref *ref = NULL;
420 return ERR_PTR(-EINVAL);
422 h = kzalloc(sizeof(*h), GFP_KERNEL);
424 return ERR_PTR(-ENOMEM);
426 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
429 return ERR_PTR(-ENOMEM);
432 atomic_set(&h->ref, 1);
433 atomic_set(&h->pin, 0);
435 h->dev = client->dev;
437 h->size = h->orig_size = size;
438 h->flags = NVMAP_HANDLE_WRITE_COMBINE;
439 mutex_init(&h->lock);
441 nvmap_handle_add(client->dev, h);
443 atomic_set(&ref->dupes, 1);
445 atomic_set(&ref->pin, 0);
446 add_handle_ref(client, ref);
450 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
453 struct nvmap_handle_ref *ref = NULL;
454 struct nvmap_handle *h = NULL;
456 BUG_ON(!client || client->dev != nvmap_dev);
457 /* on success, the reference count for the handle should be
458 * incremented, so the success paths will not call nvmap_handle_put */
459 h = nvmap_validate_get(client, id);
462 nvmap_debug(client, "%s duplicate handle failed\n",
463 current->group_leader->comm);
464 return ERR_PTR(-EPERM);
468 nvmap_err(client, "%s duplicating unallocated handle\n",
469 current->group_leader->comm);
471 return ERR_PTR(-EINVAL);
474 nvmap_ref_lock(client);
475 ref = _nvmap_validate_id_locked(client, (unsigned long)h);
478 /* handle already duplicated in client; just increment
479 * the reference count rather than re-duplicating it */
480 atomic_inc(&ref->dupes);
481 nvmap_ref_unlock(client);
485 nvmap_ref_unlock(client);
487 /* verify that adding this handle to the process' access list
488 * won't exceed the IOVM limit */
489 if (h->heap_pgalloc && !h->pgalloc.contig && !client->super) {
491 oc = atomic_add_return(h->size, &client->iovm_commit);
492 if (oc > client->iovm_limit) {
493 atomic_sub(h->size, &client->iovm_commit);
495 nvmap_err(client, "duplicating %p in %s over-commits"
496 " IOVMM space\n", (void *)id,
497 current->group_leader->comm);
498 return ERR_PTR(-ENOMEM);
502 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
505 return ERR_PTR(-ENOMEM);
508 if (!h->heap_pgalloc)
509 nvmap_carveout_commit_add(client,
510 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
513 atomic_set(&ref->dupes, 1);
515 atomic_set(&ref->pin, 0);
516 add_handle_ref(client, ref);