2 * drivers/video/tegra/nvmap/nvmap_ioctl.c
4 * User-space interface to nvmap
6 * Copyright (c) 2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/dma-mapping.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/outercache.h>
31 #include <asm/tlbflush.h>
33 #include <mach/iovmm.h>
34 #include <mach/nvmap.h>
36 #include "nvmap_ioctl.h"
39 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
40 int is_read, unsigned long h_offs,
41 unsigned long sys_addr, unsigned long h_stride,
42 unsigned long sys_stride, unsigned long elem_size,
45 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
46 unsigned long start, unsigned long end, unsigned int op);
49 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
51 struct nvmap_pin_handle op;
52 struct nvmap_handle *h;
53 unsigned long on_stack[16];
55 unsigned long __user *output;
59 if (copy_from_user(&op, arg, sizeof(op)))
66 size_t bytes = op.count * sizeof(unsigned long *);
68 if (op.count > ARRAY_SIZE(on_stack))
69 refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
76 if (copy_from_user(refs, (void *)op.handles, bytes)) {
82 on_stack[0] = (unsigned long)op.handles;
86 err = nvmap_pin_ids(filp->private_data, op.count, refs);
88 nvmap_unpin_ids(filp->private_data, op.count, refs);
90 /* skip the output stage on unpin */
94 /* it is guaranteed that if nvmap_pin_ids returns 0 that
95 * all of the handle_ref objects are valid, so dereferencing
96 * directly here is safe */
98 output = (unsigned long __user *)op.addr;
100 struct nvmap_pin_handle __user *tmp = arg;
101 output = (unsigned long __user *)&(tmp->addr);
107 for (i = 0; i < op.count && !err; i++) {
110 h = (struct nvmap_handle *)refs[i];
112 if (h->heap_pgalloc && h->pgalloc.contig)
113 addr = page_to_phys(h->pgalloc.pages[0]);
114 else if (h->heap_pgalloc)
115 addr = h->pgalloc.area->iovm_start;
117 addr = h->carveout->base;
119 err = put_user(addr, &output[i]);
123 nvmap_unpin_ids(filp->private_data, op.count, refs);
126 if (refs != on_stack)
132 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
134 struct nvmap_client *client = filp->private_data;
135 struct nvmap_create_handle op;
136 struct nvmap_handle *h = NULL;
138 if (copy_from_user(&op, arg, sizeof(op)))
144 h = nvmap_get_handle_id(client, op.handle);
150 if (client == h->owner)
155 return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
158 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
160 struct nvmap_alloc_handle op;
161 struct nvmap_client *client = filp->private_data;
163 if (copy_from_user(&op, arg, sizeof(op)))
169 if (op.align & (op.align - 1))
172 /* user-space handles are aligned to page boundaries, to prevent
174 op.align = max_t(size_t, op.align, PAGE_SIZE);
176 return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
180 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
182 struct nvmap_create_handle op;
183 struct nvmap_handle_ref *ref = NULL;
184 struct nvmap_client *client = filp->private_data;
187 if (copy_from_user(&op, arg, sizeof(op)))
193 if (cmd == NVMAP_IOC_CREATE) {
194 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
196 ref->handle->orig_size = op.size;
197 } else if (cmd == NVMAP_IOC_FROM_ID) {
198 ref = nvmap_duplicate_handle_id(client, op.id);
206 op.handle = nvmap_ref_to_id(ref);
207 if (copy_to_user(arg, &op, sizeof(op))) {
209 nvmap_free_handle_id(client, op.handle);
215 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
217 struct nvmap_client *client = filp->private_data;
218 struct nvmap_map_caller op;
219 struct nvmap_vma_priv *vpriv;
220 struct vm_area_struct *vma;
221 struct nvmap_handle *h = NULL;
224 if (copy_from_user(&op, arg, sizeof(op)))
230 h = nvmap_get_handle_id(client, op.handle);
235 down_read(¤t->mm->mmap_sem);
237 vma = find_vma(current->mm, op.addr);
238 if (!vma || !vma->vm_private_data) {
243 if (op.offset & ~PAGE_MASK) {
248 if ((op.offset + op.length) > h->size) {
249 err = -EADDRNOTAVAIL;
253 vpriv = vma->vm_private_data;
256 /* the VMA must exactly match the requested mapping operation, and the
257 * VMA that is targetted must have been created by this driver
259 if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
260 (vma->vm_end-vma->vm_start != op.length)) {
265 /* verify that each mmap() system call creates a unique VMA */
267 if (vpriv->handle && (h == vpriv->handle)) {
269 } else if (vpriv->handle) {
270 err = -EADDRNOTAVAIL;
274 if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
280 vpriv->offs = op.offset;
282 vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
285 up_read(¤t->mm->mmap_sem);
291 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
293 struct nvmap_handle_param op;
294 struct nvmap_client *client = filp->private_data;
295 struct nvmap_handle *h;
298 if (copy_from_user(&op, arg, sizeof(op)))
301 h = nvmap_get_handle_id(client, op.handle);
306 case NVMAP_HANDLE_PARAM_SIZE:
307 op.result = h->orig_size;
309 case NVMAP_HANDLE_PARAM_ALIGNMENT:
312 else if (h->heap_pgalloc)
313 op.result = PAGE_SIZE;
314 else if (h->carveout->base)
315 op.result = (h->carveout->base & -h->carveout->base);
319 case NVMAP_HANDLE_PARAM_BASE:
320 if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
322 else if (!h->heap_pgalloc)
323 op.result = h->carveout->base;
324 else if (h->pgalloc.contig)
325 op.result = page_to_phys(h->pgalloc.pages[0]);
326 else if (h->pgalloc.area)
327 op.result = h->pgalloc.area->iovm_start;
331 case NVMAP_HANDLE_PARAM_HEAP:
334 else if (!h->heap_pgalloc)
335 op.result = nvmap_carveout_usage(client, h->carveout);
336 else if (h->pgalloc.contig)
337 op.result = NVMAP_HEAP_SYSMEM;
339 op.result = NVMAP_HEAP_IOVMM;
346 if (!err && copy_to_user(arg, &op, sizeof(op)))
353 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
355 struct nvmap_client *client = filp->private_data;
356 struct nvmap_rw_handle __user *uarg = arg;
357 struct nvmap_rw_handle op;
358 struct nvmap_handle *h;
362 if (copy_from_user(&op, arg, sizeof(op)))
365 if (!op.handle || !op.addr || !op.count || !op.elem_size)
368 h = nvmap_get_handle_id(client, op.handle);
372 copied = rw_handle(client, h, is_read, op.offset,
373 (unsigned long)op.addr, op.hmem_stride,
374 op.user_stride, op.elem_size, op.count);
379 } else if (copied < (op.count * op.elem_size))
382 __put_user(copied, &uarg->count);
389 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
391 struct nvmap_client *client = filp->private_data;
392 struct nvmap_cache_op op;
393 struct vm_area_struct *vma;
394 struct nvmap_vma_priv *vpriv;
399 if (copy_from_user(&op, arg, sizeof(op)))
402 if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
403 op.op > NVMAP_CACHE_OP_WB_INV)
406 down_read(¤t->mm->mmap_sem);
408 vma = find_vma(current->active_mm, (unsigned long)op.addr);
409 if (!vma || !is_nvmap_vma(vma) ||
410 (unsigned long)op.addr + op.len > vma->vm_end) {
411 err = -EADDRNOTAVAIL;
415 vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
417 if ((unsigned long)vpriv->handle != op.handle) {
422 start = (unsigned long)op.addr - vma->vm_start;
423 end = start + op.len;
425 err = cache_maint(client, vpriv->handle, start, end, op.op);
427 up_read(¤t->mm->mmap_sem);
431 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
433 struct nvmap_client *client = filp->private_data;
438 nvmap_free_handle_id(client, arg);
442 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
443 unsigned long start, unsigned long end, unsigned int op)
445 enum dma_data_direction dir;
452 h = nvmap_handle_get(h);
461 if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
462 h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
466 if (WARN_ON_ONCE(op == NVMAP_CACHE_OP_WB_INV))
467 dir = DMA_BIDIRECTIONAL;
468 else if (op == NVMAP_CACHE_OP_WB)
471 dir = DMA_FROM_DEVICE;
473 if (h->heap_pgalloc) {
474 while (start < end) {
475 unsigned long next = (start + PAGE_SIZE) & PAGE_MASK;
478 page = h->pgalloc.pages[start >> PAGE_SHIFT];
479 next = min(next, end);
480 __dma_page_cpu_to_dev(page, start & ~PAGE_MASK,
487 prot = nvmap_pgprot(h, pgprot_kernel);
488 pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
495 if (start > h->size || end > h->size) {
496 nvmap_warn(client, "cache maintenance outside handle\n");
500 start += h->carveout->base;
501 end += h->carveout->base;
506 unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
507 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
508 next = min(next, end);
510 set_pte_at(&init_mm, kaddr, *pte,
511 pfn_pte(__phys_to_pfn(loop), prot));
512 flush_tlb_kernel_page(kaddr);
514 dmac_map_area(base, next - loop, dir);
518 if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
519 if (dir != DMA_FROM_DEVICE)
520 outer_clean_range(start, end);
522 outer_inv_range(start, end);
527 nvmap_free_pte(client->dev, pte);
533 static int rw_handle_page(struct nvmap_handle *h, int is_read,
534 unsigned long start, unsigned long rw_addr,
535 unsigned long bytes, unsigned long kaddr, pte_t *pte)
537 pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
538 unsigned long end = start + bytes;
541 while (!err && start < end) {
542 struct page *page = NULL;
547 if (!h->heap_pgalloc) {
548 phys = h->carveout->base + start;
550 page = h->pgalloc.pages[start >> PAGE_SHIFT];
553 phys = page_to_phys(page) + (start & ~PAGE_MASK);
556 set_pte_at(&init_mm, kaddr, pte,
557 pfn_pte(__phys_to_pfn(phys), prot));
558 flush_tlb_kernel_page(kaddr);
560 src = (void *)kaddr + (phys & ~PAGE_MASK);
561 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
562 count = min_t(size_t, end - start, phys);
565 err = copy_to_user((void *)rw_addr, src, count);
567 err = copy_from_user(src, (void *)rw_addr, count);
582 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
583 int is_read, unsigned long h_offs,
584 unsigned long sys_addr, unsigned long h_stride,
585 unsigned long sys_stride, unsigned long elem_size,
599 if (elem_size == h_stride && elem_size == sys_stride) {
601 h_stride = elem_size;
602 sys_stride = elem_size;
606 pte = nvmap_alloc_pte(client->dev, &addr);
611 if (h_offs + elem_size > h->size) {
612 nvmap_warn(client, "read/write outside of handle\n");
617 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
618 elem_size, (unsigned long)addr, *pte);
624 sys_addr += sys_stride;
628 nvmap_free_pte(client->dev, pte);
629 return ret ?: copied;