9ad03507d8bb7cb7540c03e85c324f67ee7ba959
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_iommu_drm.c
1 /*
2  * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
3  * author: Jung Zhao jung.zhao@rock-chips.com
4  *         Randy Li, randy.li@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/dma-iommu.h>
17
18 #include <linux/dma-buf.h>
19 #include <drm/drmP.h>
20 #include <drm/drm_atomic.h>
21 #include <drm/drm_crtc_helper.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_sync_helper.h>
24 #include <drm/rockchip_drm.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rockchip-iovmm.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/memblock.h>
29 #include <linux/module.h>
30 #include <linux/of_address.h>
31 #include <linux/of_graph.h>
32 #include <linux/component.h>
33 #include <linux/fence.h>
34 #include <linux/console.h>
35 #include <linux/kref.h>
36 #include <linux/fdtable.h>
37
38 #include "vcodec_iommu_ops.h"
39
40 struct vcodec_drm_buffer {
41         struct list_head list;
42         struct dma_buf *dma_buf;
43         union {
44                 unsigned long iova;
45                 unsigned long phys;
46         };
47         void *cpu_addr;
48         unsigned long size;
49         int index;
50         struct dma_buf_attachment *attach;
51         struct sg_table *sgt;
52         struct page **pages;
53         struct kref ref;
54         struct vcodec_iommu_session_info *session_info;
55 };
56
57 struct vcodec_iommu_drm_info {
58         struct iommu_domain *domain;
59         bool attached;
60 };
61
62 static struct vcodec_drm_buffer *
63 vcodec_drm_get_buffer_no_lock(struct vcodec_iommu_session_info *session_info,
64                               int idx)
65 {
66         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
67
68         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
69                                  list) {
70                 if (drm_buffer->index == idx)
71                         return drm_buffer;
72         }
73
74         return NULL;
75 }
76
77 static struct vcodec_drm_buffer *
78 vcodec_drm_get_buffer_fd_no_lock(struct vcodec_iommu_session_info *session_info,
79                                  int fd)
80 {
81         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
82         struct dma_buf *dma_buf = NULL;
83
84         dma_buf = dma_buf_get(fd);
85
86         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
87                                  list) {
88                 if (drm_buffer->dma_buf == dma_buf) {
89                         dma_buf_put(dma_buf);
90                         return drm_buffer;
91                 }
92         }
93
94         dma_buf_put(dma_buf);
95
96         return NULL;
97 }
98
99 static void vcodec_drm_detach(struct vcodec_iommu_info *iommu_info)
100 {
101         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
102         struct device *dev = iommu_info->dev;
103         struct iommu_domain *domain = drm_info->domain;
104
105         mutex_lock(&iommu_info->iommu_mutex);
106
107         if (!drm_info->attached) {
108                 mutex_unlock(&iommu_info->iommu_mutex);
109                 return;
110         }
111
112         iommu_detach_device(domain, dev);
113         drm_info->attached = false;
114
115         mutex_unlock(&iommu_info->iommu_mutex);
116 }
117
118 static int vcodec_drm_attach_unlock(struct vcodec_iommu_info *iommu_info)
119 {
120         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
121         struct device *dev = iommu_info->dev;
122         struct iommu_domain *domain = drm_info->domain;
123         int ret = 0;
124
125         ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
126         if (ret)
127                 return ret;
128
129         dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
130         ret = iommu_attach_device(domain, dev);
131         if (ret) {
132                 dev_err(dev, "Failed to attach iommu device\n");
133                 return ret;
134         }
135
136         if (!common_iommu_setup_dma_ops(dev, 0x10000000, SZ_2G, domain->ops)) {
137                 dev_err(dev, "Failed to set dma_ops\n");
138                 iommu_detach_device(domain, dev);
139                 ret = -ENODEV;
140         }
141
142         return ret;
143 }
144
145 static int vcodec_drm_attach(struct vcodec_iommu_info *iommu_info)
146 {
147         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
148         int ret;
149
150         mutex_lock(&iommu_info->iommu_mutex);
151
152         if (drm_info->attached) {
153                 mutex_unlock(&iommu_info->iommu_mutex);
154                 return 0;
155         }
156
157         ret = vcodec_drm_attach_unlock(iommu_info);
158         if (ret) {
159                 mutex_unlock(&iommu_info->iommu_mutex);
160                 return ret;
161         }
162
163         drm_info->attached = true;
164
165         mutex_unlock(&iommu_info->iommu_mutex);
166
167         return ret;
168 }
169
170 static void *vcodec_drm_sgt_map_kernel(struct vcodec_drm_buffer *drm_buffer)
171 {
172         struct vcodec_iommu_session_info *session_info =
173                 drm_buffer->session_info;
174         struct device *dev = session_info->dev;
175         struct scatterlist *sgl, *sg;
176         int nr_pages = PAGE_ALIGN(drm_buffer->size) >> PAGE_SHIFT;
177         int i = 0, j = 0, k = 0;
178         struct page *page;
179
180         drm_buffer->pages = kmalloc_array(nr_pages, sizeof(*drm_buffer->pages),
181                                           GFP_KERNEL);
182         if (!(drm_buffer->pages)) {
183                 dev_err(dev, "drm map can not alloc pages\n");
184
185                 return NULL;
186         }
187
188         sgl = drm_buffer->sgt->sgl;
189
190         for_each_sg(sgl, sg, drm_buffer->sgt->nents, i) {
191                 page = sg_page(sg);
192                 for (j = 0; j < sg->length / PAGE_SIZE; j++)
193                         drm_buffer->pages[k++] = page++;
194         }
195
196         return vmap(drm_buffer->pages, nr_pages, VM_MAP,
197                     pgprot_noncached(PAGE_KERNEL));
198 }
199
200 static void vcodec_drm_sgt_unmap_kernel(struct vcodec_drm_buffer *drm_buffer)
201 {
202         vunmap(drm_buffer->cpu_addr);
203         kfree(drm_buffer->pages);
204 }
205
206 static void vcodec_drm_clear_map(struct kref *ref)
207 {
208         struct vcodec_drm_buffer *drm_buffer =
209                 container_of(ref, struct vcodec_drm_buffer, ref);
210         struct vcodec_iommu_session_info *session_info =
211                 drm_buffer->session_info;
212         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
213         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
214         struct device *dev = session_info->dev;
215         struct iommu_domain *domain = drm_info->domain;
216
217         mutex_lock(&iommu_info->iommu_mutex);
218         drm_info = session_info->iommu_info->private;
219         if (!drm_info->attached) {
220                 if (vcodec_drm_attach_unlock(session_info->iommu_info))
221                         dev_err(dev, "can't clea map, attach iommu failed.\n");
222         }
223
224         if (drm_buffer->cpu_addr) {
225                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
226                 drm_buffer->cpu_addr = NULL;
227         }
228
229         if (drm_buffer->attach) {
230                 dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
231                                          DMA_BIDIRECTIONAL);
232                 dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
233                 dma_buf_put(drm_buffer->dma_buf);
234                 drm_buffer->attach = NULL;
235         }
236
237         if (!drm_info->attached)
238                 iommu_detach_device(domain, dev);
239
240         mutex_unlock(&iommu_info->iommu_mutex);
241 }
242
243 static void vcdoec_drm_dump_info(struct vcodec_iommu_session_info *session_info)
244 {
245         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
246
247         vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
248                         "still there are below buffers stored in list\n");
249         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
250                                  list) {
251                 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
252                                 "index %d drm_buffer dma_buf %p cpu_addr %p\n",
253                                 drm_buffer->index,
254                                 drm_buffer->dma_buf, drm_buffer->cpu_addr);
255         }
256 }
257
258 static int vcodec_drm_free(struct vcodec_iommu_session_info *session_info,
259                            int idx)
260 {
261         struct device *dev = session_info->dev;
262         /* please double-check all maps have been release */
263         struct vcodec_drm_buffer *drm_buffer;
264
265         mutex_lock(&session_info->list_mutex);
266         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
267
268         if (!drm_buffer) {
269                 dev_err(dev, "can not find %d buffer in list\n", idx);
270                 mutex_unlock(&session_info->list_mutex);
271
272                 return -EINVAL;
273         }
274
275         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
276                 dma_buf_put(drm_buffer->dma_buf);
277                 list_del_init(&drm_buffer->list);
278                 kfree(drm_buffer);
279         }
280         mutex_unlock(&session_info->list_mutex);
281
282         return 0;
283 }
284
285 static int
286 vcodec_drm_unmap_iommu(struct vcodec_iommu_session_info *session_info,
287                        int idx)
288 {
289         struct device *dev = session_info->dev;
290         struct vcodec_drm_buffer *drm_buffer;
291
292         /* Force to flush iommu table */
293         if (of_machine_is_compatible("rockchip,rk3288"))
294                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
295
296         mutex_lock(&session_info->list_mutex);
297         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
298         mutex_unlock(&session_info->list_mutex);
299
300         if (!drm_buffer) {
301                 dev_err(dev, "can not find %d buffer in list\n", idx);
302                 return -EINVAL;
303         }
304
305         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
306
307         return 0;
308 }
309
310 static int vcodec_drm_map_iommu(struct vcodec_iommu_session_info *session_info,
311                                 int idx,
312                                 unsigned long *iova,
313                                 unsigned long *size)
314 {
315         struct device *dev = session_info->dev;
316         struct vcodec_drm_buffer *drm_buffer;
317
318         /* Force to flush iommu table */
319         if (of_machine_is_compatible("rockchip,rk3288"))
320                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
321
322         mutex_lock(&session_info->list_mutex);
323         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
324         mutex_unlock(&session_info->list_mutex);
325
326         if (!drm_buffer) {
327                 dev_err(dev, "can not find %d buffer in list\n", idx);
328                 return -EINVAL;
329         }
330
331         kref_get(&drm_buffer->ref);
332         if (iova)
333                 *iova = drm_buffer->iova;
334         if (size)
335                 *size = drm_buffer->size;
336         return 0;
337 }
338
339 static int
340 vcodec_drm_unmap_kernel(struct vcodec_iommu_session_info *session_info, int idx)
341 {
342         struct device *dev = session_info->dev;
343         struct vcodec_drm_buffer *drm_buffer;
344
345         mutex_lock(&session_info->list_mutex);
346         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
347         mutex_unlock(&session_info->list_mutex);
348
349         if (!drm_buffer) {
350                 dev_err(dev, "can not find %d buffer in list\n", idx);
351
352                 return -EINVAL;
353         }
354
355         if (drm_buffer->cpu_addr) {
356                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
357                 drm_buffer->cpu_addr = NULL;
358         }
359
360         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
361         return 0;
362 }
363
364 static int
365 vcodec_drm_free_fd(struct vcodec_iommu_session_info *session_info, int fd)
366 {
367         struct device *dev = session_info->dev;
368         /* please double-check all maps have been release */
369         struct vcodec_drm_buffer *drm_buffer = NULL;
370
371         mutex_lock(&session_info->list_mutex);
372         drm_buffer = vcodec_drm_get_buffer_fd_no_lock(session_info, fd);
373
374         if (!drm_buffer) {
375                 dev_err(dev, "can not find %d buffer in list\n", fd);
376                 mutex_unlock(&session_info->list_mutex);
377
378                 return -EINVAL;
379         }
380         mutex_unlock(&session_info->list_mutex);
381
382         vcodec_drm_unmap_iommu(session_info, drm_buffer->index);
383
384         mutex_lock(&session_info->list_mutex);
385         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
386                 dma_buf_put(drm_buffer->dma_buf);
387                 list_del_init(&drm_buffer->list);
388                 kfree(drm_buffer);
389         }
390         mutex_unlock(&session_info->list_mutex);
391
392         return 0;
393 }
394
395 static void
396 vcodec_drm_clear_session(struct vcodec_iommu_session_info *session_info)
397 {
398         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
399
400         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
401                                  list) {
402                 kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
403                 vcodec_drm_free(session_info, drm_buffer->index);
404         }
405 }
406
407 static void *
408 vcodec_drm_map_kernel(struct vcodec_iommu_session_info *session_info, int idx)
409 {
410         struct device *dev = session_info->dev;
411         struct vcodec_drm_buffer *drm_buffer;
412
413         mutex_lock(&session_info->list_mutex);
414         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
415         mutex_unlock(&session_info->list_mutex);
416
417         if (!drm_buffer) {
418                 dev_err(dev, "can not find %d buffer in list\n", idx);
419                 return NULL;
420         }
421
422         if (!drm_buffer->cpu_addr)
423                 drm_buffer->cpu_addr =
424                         vcodec_drm_sgt_map_kernel(drm_buffer);
425
426         kref_get(&drm_buffer->ref);
427
428         return drm_buffer->cpu_addr;
429 }
430
431 static int vcodec_drm_import(struct vcodec_iommu_session_info *session_info,
432                              int fd)
433 {
434         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
435         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
436         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
437         struct device *dev = session_info->dev;
438         struct dma_buf_attachment *attach;
439         struct sg_table *sgt;
440         struct dma_buf *dma_buf;
441         int ret = 0;
442
443         dma_buf = dma_buf_get(fd);
444         if (IS_ERR(dma_buf)) {
445                 ret = PTR_ERR(dma_buf);
446                 return ret;
447         }
448
449         list_for_each_entry_safe(drm_buffer, n,
450                                  &session_info->buffer_list, list) {
451                 if (drm_buffer->dma_buf == dma_buf) {
452                         dma_buf_put(dma_buf);
453                         return drm_buffer->index;
454                 }
455         }
456
457         drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
458         if (!drm_buffer) {
459                 ret = -ENOMEM;
460                 return ret;
461         }
462
463         drm_buffer->dma_buf = dma_buf;
464         drm_buffer->session_info = session_info;
465
466         kref_init(&drm_buffer->ref);
467
468         mutex_lock(&iommu_info->iommu_mutex);
469         drm_info = session_info->iommu_info->private;
470         if (!drm_info->attached) {
471                 ret = vcodec_drm_attach_unlock(session_info->iommu_info);
472                 if (ret)
473                         goto fail_out;
474         }
475
476         attach = dma_buf_attach(drm_buffer->dma_buf, dev);
477         if (IS_ERR(attach)) {
478                 ret = PTR_ERR(attach);
479                 goto fail_out;
480         }
481
482         get_dma_buf(drm_buffer->dma_buf);
483
484         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
485         if (IS_ERR(sgt)) {
486                 ret = PTR_ERR(sgt);
487                 goto fail_detach;
488         }
489
490         drm_buffer->iova = sg_dma_address(sgt->sgl);
491         drm_buffer->size = drm_buffer->dma_buf->size;
492
493         drm_buffer->attach = attach;
494         drm_buffer->sgt = sgt;
495
496         mutex_unlock(&iommu_info->iommu_mutex);
497
498         INIT_LIST_HEAD(&drm_buffer->list);
499         mutex_lock(&session_info->list_mutex);
500         drm_buffer->index = session_info->max_idx;
501         list_add_tail(&drm_buffer->list, &session_info->buffer_list);
502         session_info->max_idx++;
503         if ((session_info->max_idx & 0xfffffff) == 0)
504                 session_info->max_idx = 0;
505         mutex_unlock(&session_info->list_mutex);
506
507         return drm_buffer->index;
508
509 fail_detach:
510         dev_err(dev, "dmabuf map attach failed\n");
511         dma_buf_detach(drm_buffer->dma_buf, attach);
512         dma_buf_put(drm_buffer->dma_buf);
513 fail_out:
514         kfree(drm_buffer);
515         mutex_unlock(&iommu_info->iommu_mutex);
516
517         return ret;
518 }
519
520 static int vcodec_drm_create(struct vcodec_iommu_info *iommu_info)
521 {
522         struct vcodec_iommu_drm_info *drm_info;
523         int ret;
524
525         iommu_info->private = kzalloc(sizeof(*drm_info),
526                                       GFP_KERNEL);
527         drm_info = iommu_info->private;
528         if (!drm_info)
529                 return -ENOMEM;
530
531         drm_info->domain = iommu_domain_alloc(&platform_bus_type);
532         drm_info->attached = false;
533         if (!drm_info->domain)
534                 return -ENOMEM;
535
536         ret = iommu_get_dma_cookie(drm_info->domain);
537         if (ret)
538                 goto err_free_domain;
539
540         vcodec_drm_attach(iommu_info);
541
542         return 0;
543
544 err_free_domain:
545         iommu_domain_free(drm_info->domain);
546
547         return ret;
548 }
549
550 static int vcodec_drm_destroy(struct vcodec_iommu_info *iommu_info)
551 {
552         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
553
554         vcodec_drm_detach(iommu_info);
555         iommu_put_dma_cookie(drm_info->domain);
556         iommu_domain_free(drm_info->domain);
557
558         kfree(drm_info);
559         iommu_info->private = NULL;
560
561         return 0;
562 }
563
564 static struct vcodec_iommu_ops drm_ops = {
565         .create = vcodec_drm_create,
566         .import = vcodec_drm_import,
567         .free = vcodec_drm_free,
568         .free_fd = vcodec_drm_free_fd,
569         .map_kernel = vcodec_drm_map_kernel,
570         .unmap_kernel = vcodec_drm_unmap_kernel,
571         .map_iommu = vcodec_drm_map_iommu,
572         .unmap_iommu = vcodec_drm_unmap_iommu,
573         .destroy = vcodec_drm_destroy,
574         .dump = vcdoec_drm_dump_info,
575         .attach = vcodec_drm_attach,
576         .detach = vcodec_drm_detach,
577         .clear = vcodec_drm_clear_session,
578 };
579
580 void vcodec_iommu_drm_set_ops(struct vcodec_iommu_info *iommu_info)
581 {
582         if (!iommu_info)
583                 return;
584         iommu_info->ops = &drm_ops;
585 }