Merge tag 'soc2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[firefly-linux-kernel-4.4.55.git] / drivers / media / video / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 struct vb2_dma_sg_buf {
25         void                            *vaddr;
26         struct page                     **pages;
27         int                             write;
28         int                             offset;
29         struct vb2_dma_sg_desc          sg_desc;
30         atomic_t                        refcount;
31         struct vb2_vmarea_handler       handler;
32 };
33
34 static void vb2_dma_sg_put(void *buf_priv);
35
36 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
37 {
38         struct vb2_dma_sg_buf *buf;
39         int i;
40
41         buf = kzalloc(sizeof *buf, GFP_KERNEL);
42         if (!buf)
43                 return NULL;
44
45         buf->vaddr = NULL;
46         buf->write = 0;
47         buf->offset = 0;
48         buf->sg_desc.size = size;
49         buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50
51         buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
52                                       sizeof(*buf->sg_desc.sglist));
53         if (!buf->sg_desc.sglist)
54                 goto fail_sglist_alloc;
55         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
56
57         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
58                              GFP_KERNEL);
59         if (!buf->pages)
60                 goto fail_pages_array_alloc;
61
62         for (i = 0; i < buf->sg_desc.num_pages; ++i) {
63                 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
64                 if (NULL == buf->pages[i])
65                         goto fail_pages_alloc;
66                 sg_set_page(&buf->sg_desc.sglist[i],
67                             buf->pages[i], PAGE_SIZE, 0);
68         }
69
70         buf->handler.refcount = &buf->refcount;
71         buf->handler.put = vb2_dma_sg_put;
72         buf->handler.arg = buf;
73
74         atomic_inc(&buf->refcount);
75
76         printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
77                 __func__, buf->sg_desc.num_pages);
78         return buf;
79
80 fail_pages_alloc:
81         while (--i >= 0)
82                 __free_page(buf->pages[i]);
83         kfree(buf->pages);
84
85 fail_pages_array_alloc:
86         vfree(buf->sg_desc.sglist);
87
88 fail_sglist_alloc:
89         kfree(buf);
90         return NULL;
91 }
92
93 static void vb2_dma_sg_put(void *buf_priv)
94 {
95         struct vb2_dma_sg_buf *buf = buf_priv;
96         int i = buf->sg_desc.num_pages;
97
98         if (atomic_dec_and_test(&buf->refcount)) {
99                 printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
100                         buf->sg_desc.num_pages);
101                 if (buf->vaddr)
102                         vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
103                 vfree(buf->sg_desc.sglist);
104                 while (--i >= 0)
105                         __free_page(buf->pages[i]);
106                 kfree(buf->pages);
107                 kfree(buf);
108         }
109 }
110
111 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
112                                     unsigned long size, int write)
113 {
114         struct vb2_dma_sg_buf *buf;
115         unsigned long first, last;
116         int num_pages_from_user, i;
117
118         buf = kzalloc(sizeof *buf, GFP_KERNEL);
119         if (!buf)
120                 return NULL;
121
122         buf->vaddr = NULL;
123         buf->write = write;
124         buf->offset = vaddr & ~PAGE_MASK;
125         buf->sg_desc.size = size;
126
127         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
128         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
129         buf->sg_desc.num_pages = last - first + 1;
130
131         buf->sg_desc.sglist = vzalloc(
132                 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
133         if (!buf->sg_desc.sglist)
134                 goto userptr_fail_sglist_alloc;
135
136         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
137
138         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
139                              GFP_KERNEL);
140         if (!buf->pages)
141                 goto userptr_fail_pages_array_alloc;
142
143         num_pages_from_user = get_user_pages(current, current->mm,
144                                              vaddr & PAGE_MASK,
145                                              buf->sg_desc.num_pages,
146                                              write,
147                                              1, /* force */
148                                              buf->pages,
149                                              NULL);
150
151         if (num_pages_from_user != buf->sg_desc.num_pages)
152                 goto userptr_fail_get_user_pages;
153
154         sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
155                     PAGE_SIZE - buf->offset, buf->offset);
156         size -= PAGE_SIZE - buf->offset;
157         for (i = 1; i < buf->sg_desc.num_pages; ++i) {
158                 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
159                             min_t(size_t, PAGE_SIZE, size), 0);
160                 size -= min_t(size_t, PAGE_SIZE, size);
161         }
162         return buf;
163
164 userptr_fail_get_user_pages:
165         printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
166                num_pages_from_user, buf->sg_desc.num_pages);
167         while (--num_pages_from_user >= 0)
168                 put_page(buf->pages[num_pages_from_user]);
169         kfree(buf->pages);
170
171 userptr_fail_pages_array_alloc:
172         vfree(buf->sg_desc.sglist);
173
174 userptr_fail_sglist_alloc:
175         kfree(buf);
176         return NULL;
177 }
178
179 /*
180  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
181  *               be used
182  */
183 static void vb2_dma_sg_put_userptr(void *buf_priv)
184 {
185         struct vb2_dma_sg_buf *buf = buf_priv;
186         int i = buf->sg_desc.num_pages;
187
188         printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
189                __func__, buf->sg_desc.num_pages);
190         if (buf->vaddr)
191                 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
192         while (--i >= 0) {
193                 if (buf->write)
194                         set_page_dirty_lock(buf->pages[i]);
195                 put_page(buf->pages[i]);
196         }
197         vfree(buf->sg_desc.sglist);
198         kfree(buf->pages);
199         kfree(buf);
200 }
201
202 static void *vb2_dma_sg_vaddr(void *buf_priv)
203 {
204         struct vb2_dma_sg_buf *buf = buf_priv;
205
206         BUG_ON(!buf);
207
208         if (!buf->vaddr)
209                 buf->vaddr = vm_map_ram(buf->pages,
210                                         buf->sg_desc.num_pages,
211                                         -1,
212                                         PAGE_KERNEL);
213
214         /* add offset in case userptr is not page-aligned */
215         return buf->vaddr + buf->offset;
216 }
217
218 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
219 {
220         struct vb2_dma_sg_buf *buf = buf_priv;
221
222         return atomic_read(&buf->refcount);
223 }
224
225 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
226 {
227         struct vb2_dma_sg_buf *buf = buf_priv;
228         unsigned long uaddr = vma->vm_start;
229         unsigned long usize = vma->vm_end - vma->vm_start;
230         int i = 0;
231
232         if (!buf) {
233                 printk(KERN_ERR "No memory to map\n");
234                 return -EINVAL;
235         }
236
237         do {
238                 int ret;
239
240                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
241                 if (ret) {
242                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
243                         return ret;
244                 }
245
246                 uaddr += PAGE_SIZE;
247                 usize -= PAGE_SIZE;
248         } while (usize > 0);
249
250
251         /*
252          * Use common vm_area operations to track buffer refcount.
253          */
254         vma->vm_private_data    = &buf->handler;
255         vma->vm_ops             = &vb2_common_vm_ops;
256
257         vma->vm_ops->open(vma);
258
259         return 0;
260 }
261
262 static void *vb2_dma_sg_cookie(void *buf_priv)
263 {
264         struct vb2_dma_sg_buf *buf = buf_priv;
265
266         return &buf->sg_desc;
267 }
268
269 const struct vb2_mem_ops vb2_dma_sg_memops = {
270         .alloc          = vb2_dma_sg_alloc,
271         .put            = vb2_dma_sg_put,
272         .get_userptr    = vb2_dma_sg_get_userptr,
273         .put_userptr    = vb2_dma_sg_put_userptr,
274         .vaddr          = vb2_dma_sg_vaddr,
275         .mmap           = vb2_dma_sg_mmap,
276         .num_users      = vb2_dma_sg_num_users,
277         .cookie         = vb2_dma_sg_cookie,
278 };
279 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
280
281 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
282 MODULE_AUTHOR("Andrzej Pietrasiewicz");
283 MODULE_LICENSE("GPL");