Merge remote branch 'common/android-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/dma-mapping.h>
24 #include <linux/fs.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/outercache.h>
31 #include <asm/tlbflush.h>
32
33 #include <mach/iovmm.h>
34 #include <mach/nvmap.h>
35
36 #include "nvmap_ioctl.h"
37 #include "nvmap.h"
38
39 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
40                          int is_read, unsigned long h_offs,
41                          unsigned long sys_addr, unsigned long h_stride,
42                          unsigned long sys_stride, unsigned long elem_size,
43                          unsigned long count);
44
45 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
46                        unsigned long start, unsigned long end, unsigned int op);
47
48
49 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
50 {
51         struct nvmap_pin_handle op;
52         struct nvmap_handle *h;
53         unsigned long on_stack[16];
54         unsigned long *refs;
55         unsigned long __user *output;
56         unsigned int i;
57         int err = 0;
58
59         if (copy_from_user(&op, arg, sizeof(op)))
60                 return -EFAULT;
61
62         if (!op.count)
63                 return -EINVAL;
64
65         if (op.count > 1) {
66                 size_t bytes = op.count * sizeof(unsigned long *);
67
68                 if (op.count > ARRAY_SIZE(on_stack))
69                         refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
70                 else
71                         refs = on_stack;
72
73                 if (!refs)
74                         return -ENOMEM;
75
76                 if (copy_from_user(refs, (void *)op.handles, bytes)) {
77                         err = -EFAULT;
78                         goto out;
79                 }
80         } else {
81                 refs = on_stack;
82                 on_stack[0] = (unsigned long)op.handles;
83         }
84
85         if (is_pin)
86                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
87         else
88                 nvmap_unpin_ids(filp->private_data, op.count, refs);
89
90         /* skip the output stage on unpin */
91         if (err || !is_pin)
92                 goto out;
93
94         /* it is guaranteed that if nvmap_pin_ids returns 0 that
95          * all of the handle_ref objects are valid, so dereferencing
96          * directly here is safe */
97         if (op.count > 1)
98                 output = (unsigned long __user *)op.addr;
99         else {
100                 struct nvmap_pin_handle __user *tmp = arg;
101                 output = (unsigned long __user *)&(tmp->addr);
102         }
103
104         if (!output)
105                 goto out;
106
107         for (i = 0; i < op.count && !err; i++) {
108                 unsigned long addr;
109
110                 h = (struct nvmap_handle *)refs[i];
111
112                 if (h->heap_pgalloc && h->pgalloc.contig)
113                         addr = page_to_phys(h->pgalloc.pages[0]);
114                 else if (h->heap_pgalloc)
115                         addr = h->pgalloc.area->iovm_start;
116                 else
117                         addr = h->carveout->base;
118
119                 err = put_user(addr, &output[i]);
120         }
121
122         if (err)
123                 nvmap_unpin_ids(filp->private_data, op.count, refs);
124
125 out:
126         if (refs != on_stack)
127                 kfree(refs);
128
129         return err;
130 }
131
132 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
133 {
134         struct nvmap_client *client = filp->private_data;
135         struct nvmap_create_handle op;
136         struct nvmap_handle *h = NULL;
137
138         if (copy_from_user(&op, arg, sizeof(op)))
139                 return -EFAULT;
140
141         if (!op.handle)
142                 return -EINVAL;
143
144         h = nvmap_get_handle_id(client, op.handle);
145
146         if (!h)
147                 return -EPERM;
148
149         op.id = (__u32)h;
150         if (client == h->owner)
151                 h->global = true;
152
153         nvmap_handle_put(h);
154
155         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
156 }
157
158 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
159 {
160         struct nvmap_alloc_handle op;
161         struct nvmap_client *client = filp->private_data;
162
163         if (copy_from_user(&op, arg, sizeof(op)))
164                 return -EFAULT;
165
166         if (!op.handle)
167                 return -EINVAL;
168
169         if (op.align & (op.align - 1))
170                 return -EINVAL;
171
172         /* user-space handles are aligned to page boundaries, to prevent
173          * data leakage. */
174         op.align = max_t(size_t, op.align, PAGE_SIZE);
175
176         return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
177                                      op.align, op.flags);
178 }
179
180 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
181 {
182         struct nvmap_create_handle op;
183         struct nvmap_handle_ref *ref = NULL;
184         struct nvmap_client *client = filp->private_data;
185         int err = 0;
186
187         if (copy_from_user(&op, arg, sizeof(op)))
188                 return -EFAULT;
189
190         if (!client)
191                 return -ENODEV;
192
193         if (cmd == NVMAP_IOC_CREATE) {
194                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
195                 if (!IS_ERR(ref))
196                         ref->handle->orig_size = op.size;
197         } else if (cmd == NVMAP_IOC_FROM_ID) {
198                 ref = nvmap_duplicate_handle_id(client, op.id);
199         } else {
200                 return -EINVAL;
201         }
202
203         if (IS_ERR(ref))
204                 return PTR_ERR(ref);
205
206         op.handle = nvmap_ref_to_id(ref);
207         if (copy_to_user(arg, &op, sizeof(op))) {
208                 err = -EFAULT;
209                 nvmap_free_handle_id(client, op.handle);
210         }
211
212         return err;
213 }
214
215 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
216 {
217         struct nvmap_client *client = filp->private_data;
218         struct nvmap_map_caller op;
219         struct nvmap_vma_priv *vpriv;
220         struct vm_area_struct *vma;
221         struct nvmap_handle *h = NULL;
222         int err = 0;
223
224         if (copy_from_user(&op, arg, sizeof(op)))
225                 return -EFAULT;
226
227         if (!op.handle)
228                 return -EINVAL;
229
230         h = nvmap_get_handle_id(client, op.handle);
231
232         if (!h)
233                 return -EPERM;
234
235         down_read(&current->mm->mmap_sem);
236
237         vma = find_vma(current->mm, op.addr);
238         if (!vma || !vma->vm_private_data) {
239                 err = -ENOMEM;
240                 goto out;
241         }
242
243         if (op.offset & ~PAGE_MASK) {
244                 err = -EFAULT;
245                 goto out;
246         }
247
248         if ((op.offset + op.length) > h->size) {
249                 err = -EADDRNOTAVAIL;
250                 goto out;
251         }
252
253         vpriv = vma->vm_private_data;
254         BUG_ON(!vpriv);
255
256         /* the VMA must exactly match the requested mapping operation, and the
257          * VMA that is targetted must have been created by this driver
258          */
259         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
260             (vma->vm_end-vma->vm_start != op.length)) {
261                 err = -EPERM;
262                 goto out;
263         }
264
265         /* verify that each mmap() system call creates a unique VMA */
266
267         if (vpriv->handle && (h == vpriv->handle)) {
268                 goto out;
269         } else if (vpriv->handle) {
270                 err = -EADDRNOTAVAIL;
271                 goto out;
272         }
273
274         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
275                 err = -EFAULT;
276                 goto out;
277         }
278
279         vpriv->handle = h;
280         vpriv->offs = op.offset;
281
282         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
283
284 out:
285         up_read(&current->mm->mmap_sem);
286         if (err)
287                 nvmap_handle_put(h);
288         return err;
289 }
290
291 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
292 {
293         struct nvmap_handle_param op;
294         struct nvmap_client *client = filp->private_data;
295         struct nvmap_handle *h;
296         int err = 0;
297
298         if (copy_from_user(&op, arg, sizeof(op)))
299                 return -EFAULT;
300
301         h = nvmap_get_handle_id(client, op.handle);
302         if (!h)
303                 return -EINVAL;
304
305         switch (op.param) {
306         case NVMAP_HANDLE_PARAM_SIZE:
307                 op.result = h->orig_size;
308                 break;
309         case NVMAP_HANDLE_PARAM_ALIGNMENT:
310                 if (!h->alloc)
311                         op.result = 0;
312                 else if (h->heap_pgalloc)
313                         op.result = PAGE_SIZE;
314                 else if (h->carveout->base)
315                         op.result = (h->carveout->base & -h->carveout->base);
316                 else
317                         op.result = SZ_4M;
318                 break;
319         case NVMAP_HANDLE_PARAM_BASE:
320                 if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
321                         op.result = -1ul;
322                 else if (!h->heap_pgalloc)
323                         op.result = h->carveout->base;
324                 else if (h->pgalloc.contig)
325                         op.result = page_to_phys(h->pgalloc.pages[0]);
326                 else if (h->pgalloc.area)
327                         op.result = h->pgalloc.area->iovm_start;
328                 else
329                         op.result = -1ul;
330                 break;
331         case NVMAP_HANDLE_PARAM_HEAP:
332                 if (!h->alloc)
333                         op.result = 0;
334                 else if (!h->heap_pgalloc)
335                         op.result = nvmap_carveout_usage(client, h->carveout);
336                 else if (h->pgalloc.contig)
337                         op.result = NVMAP_HEAP_SYSMEM;
338                 else
339                         op.result = NVMAP_HEAP_IOVMM;
340                 break;
341         default:
342                 err = -EINVAL;
343                 break;
344         }
345
346         if (!err && copy_to_user(arg, &op, sizeof(op)))
347                 err = -EFAULT;
348
349         nvmap_handle_put(h);
350         return err;
351 }
352
353 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
354 {
355         struct nvmap_client *client = filp->private_data;
356         struct nvmap_rw_handle __user *uarg = arg;
357         struct nvmap_rw_handle op;
358         struct nvmap_handle *h;
359         ssize_t copied;
360         int err = 0;
361
362         if (copy_from_user(&op, arg, sizeof(op)))
363                 return -EFAULT;
364
365         if (!op.handle || !op.addr || !op.count || !op.elem_size)
366                 return -EINVAL;
367
368         h = nvmap_get_handle_id(client, op.handle);
369         if (!h)
370                 return -EPERM;
371
372         copied = rw_handle(client, h, is_read, op.offset,
373                            (unsigned long)op.addr, op.hmem_stride,
374                            op.user_stride, op.elem_size, op.count);
375
376         if (copied < 0) {
377                 err = copied;
378                 copied = 0;
379         } else if (copied < (op.count * op.elem_size))
380                 err = -EINTR;
381
382         __put_user(copied, &uarg->count);
383
384         nvmap_handle_put(h);
385
386         return err;
387 }
388
389 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
390 {
391         struct nvmap_client *client = filp->private_data;
392         struct nvmap_cache_op op;
393         struct vm_area_struct *vma;
394         struct nvmap_vma_priv *vpriv;
395         unsigned long start;
396         unsigned long end;
397         int err = 0;
398
399         if (copy_from_user(&op, arg, sizeof(op)))
400                 return -EFAULT;
401
402         if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
403             op.op > NVMAP_CACHE_OP_WB_INV)
404                 return -EINVAL;
405
406         down_read(&current->mm->mmap_sem);
407
408         vma = find_vma(current->active_mm, (unsigned long)op.addr);
409         if (!vma || !is_nvmap_vma(vma) ||
410             (unsigned long)op.addr + op.len > vma->vm_end) {
411                 err = -EADDRNOTAVAIL;
412                 goto out;
413         }
414
415         vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
416
417         if ((unsigned long)vpriv->handle != op.handle) {
418                 err = -EFAULT;
419                 goto out;
420         }
421
422         start = (unsigned long)op.addr - vma->vm_start;
423         end = start + op.len;
424
425         err = cache_maint(client, vpriv->handle, start, end, op.op);
426 out:
427         up_read(&current->mm->mmap_sem);
428         return err;
429 }
430
431 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
432 {
433         struct nvmap_client *client = filp->private_data;
434
435         if (!arg)
436                 return 0;
437
438         nvmap_free_handle_id(client, arg);
439         return 0;
440 }
441
442 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
443                        unsigned long start, unsigned long end, unsigned int op)
444 {
445         enum dma_data_direction dir;
446         pgprot_t prot;
447         pte_t **pte = NULL;
448         unsigned long kaddr;
449         unsigned long loop;
450         int err = 0;
451
452         h = nvmap_handle_get(h);
453         if (!h)
454                 return -EFAULT;
455
456         if (!h->alloc) {
457                 err = -EFAULT;
458                 goto out;
459         }
460
461         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
462             h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
463             start == end)
464                 goto out;
465
466         if (WARN_ON_ONCE(op == NVMAP_CACHE_OP_WB_INV))
467                 dir = DMA_BIDIRECTIONAL;
468         else if (op == NVMAP_CACHE_OP_WB)
469                 dir = DMA_TO_DEVICE;
470         else
471                 dir = DMA_FROM_DEVICE;
472
473         if (h->heap_pgalloc) {
474                 while (start < end) {
475                         unsigned long next = (start + PAGE_SIZE) & PAGE_MASK;
476                         struct page *page;
477
478                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
479                         next = min(next, end);
480                         __dma_page_cpu_to_dev(page, start & ~PAGE_MASK,
481                                               next - start, dir);
482                         start = next;
483                 }
484                 goto out;
485         }
486
487         prot = nvmap_pgprot(h, pgprot_kernel);
488         pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
489         if (IS_ERR(pte)) {
490                 err = PTR_ERR(pte);
491                 pte = NULL;
492                 goto out;
493         }
494
495         if (start > h->size || end > h->size) {
496                 nvmap_warn(client, "cache maintenance outside handle\n");
497                 return -EINVAL;
498         }
499
500         start += h->carveout->base;
501         end += h->carveout->base;
502
503         loop = start;
504
505         while (loop < end) {
506                 unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
507                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
508                 next = min(next, end);
509
510                 set_pte_at(&init_mm, kaddr, *pte,
511                            pfn_pte(__phys_to_pfn(loop), prot));
512                 flush_tlb_kernel_page(kaddr);
513
514                 dmac_map_area(base, next - loop, dir);
515                 loop = next;
516         }
517
518         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
519                 if (dir != DMA_FROM_DEVICE)
520                         outer_clean_range(start, end);
521                 else
522                         outer_inv_range(start, end);
523         }
524
525 out:
526         if (pte)
527                 nvmap_free_pte(client->dev, pte);
528         nvmap_handle_put(h);
529         wmb();
530         return err;
531 }
532
533 static int rw_handle_page(struct nvmap_handle *h, int is_read,
534                           unsigned long start, unsigned long rw_addr,
535                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
536 {
537         pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
538         unsigned long end = start + bytes;
539         int err = 0;
540
541         while (!err && start < end) {
542                 struct page *page = NULL;
543                 unsigned long phys;
544                 size_t count;
545                 void *src;
546
547                 if (!h->heap_pgalloc) {
548                         phys = h->carveout->base + start;
549                 } else {
550                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
551                         BUG_ON(!page);
552                         get_page(page);
553                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
554                 }
555
556                 set_pte_at(&init_mm, kaddr, pte,
557                            pfn_pte(__phys_to_pfn(phys), prot));
558                 flush_tlb_kernel_page(kaddr);
559
560                 src = (void *)kaddr + (phys & ~PAGE_MASK);
561                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
562                 count = min_t(size_t, end - start, phys);
563
564                 if (is_read)
565                         err = copy_to_user((void *)rw_addr, src, count);
566                 else
567                         err = copy_from_user(src, (void *)rw_addr, count);
568
569                 if (err)
570                         err = -EFAULT;
571
572                 rw_addr += count;
573                 start += count;
574
575                 if (page)
576                         put_page(page);
577         }
578
579         return err;
580 }
581
582 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
583                          int is_read, unsigned long h_offs,
584                          unsigned long sys_addr, unsigned long h_stride,
585                          unsigned long sys_stride, unsigned long elem_size,
586                          unsigned long count)
587 {
588         ssize_t copied = 0;
589         pte_t **pte;
590         void *addr;
591         int ret = 0;
592
593         if (!elem_size)
594                 return -EINVAL;
595
596         if (!h->alloc)
597                 return -EFAULT;
598
599         if (elem_size == h_stride && elem_size == sys_stride) {
600                 elem_size *= count;
601                 h_stride = elem_size;
602                 sys_stride = elem_size;
603                 count = 1;
604         }
605
606         pte = nvmap_alloc_pte(client->dev, &addr);
607         if (IS_ERR(pte))
608                 return PTR_ERR(pte);
609
610         while (count--) {
611                 if (h_offs + elem_size > h->size) {
612                         nvmap_warn(client, "read/write outside of handle\n");
613                         ret = -EFAULT;
614                         break;
615                 }
616
617                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
618                                      elem_size, (unsigned long)addr, *pte);
619
620                 if (ret)
621                         break;
622
623                 copied += elem_size;
624                 sys_addr += sys_stride;
625                 h_offs += h_stride;
626         }
627
628         nvmap_free_pte(client->dev, pte);
629         return ret ?: copied;
630 }