23d03427ef1bcd69d6e6399efd01ed04c288ac38
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_iommu_drm.c
1 /*
2  * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
3  * author: Jung Zhao jung.zhao@rock-chips.com
4  *         Randy Li, randy.li@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/dma-iommu.h>
17
18 #include <linux/dma-buf.h>
19 #include <drm/drmP.h>
20 #include <drm/drm_atomic.h>
21 #include <drm/drm_crtc_helper.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_sync_helper.h>
24 #include <drm/rockchip_drm.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rockchip-iovmm.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/memblock.h>
29 #include <linux/module.h>
30 #include <linux/of_address.h>
31 #include <linux/of_graph.h>
32 #include <linux/component.h>
33 #include <linux/fence.h>
34 #include <linux/console.h>
35 #include <linux/kref.h>
36 #include <linux/fdtable.h>
37 #include <linux/ktime.h>
38 #include <linux/iova.h>
39
40 #include "vcodec_iommu_ops.h"
41
42 struct vcodec_drm_buffer {
43         struct list_head list;
44         struct dma_buf *dma_buf;
45         union {
46                 unsigned long iova;
47                 unsigned long phys;
48         };
49         void *cpu_addr;
50         unsigned long size;
51         int index;
52         struct dma_buf_attachment *attach;
53         struct sg_table *sgt;
54         struct sg_table *copy_sgt;
55         struct page **pages;
56         struct kref ref;
57         struct vcodec_iommu_session_info *session_info;
58         ktime_t last_used;
59 };
60
61 struct vcodec_iommu_drm_info {
62         struct iommu_domain *domain;
63         bool attached;
64 };
65
66 static struct vcodec_drm_buffer *
67 vcodec_drm_get_buffer_no_lock(struct vcodec_iommu_session_info *session_info,
68                               int idx)
69 {
70         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
71
72         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
73                                  list) {
74                 if (drm_buffer->index == idx) {
75                         drm_buffer->last_used = ktime_get();
76                         return drm_buffer;
77                 }
78         }
79
80         return NULL;
81 }
82
83 static struct vcodec_drm_buffer *
84 vcodec_drm_get_buffer_fd_no_lock(struct vcodec_iommu_session_info *session_info,
85                                  int fd)
86 {
87         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
88         struct dma_buf *dma_buf = NULL;
89
90         dma_buf = dma_buf_get(fd);
91
92         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
93                                  list) {
94                 if (drm_buffer->dma_buf == dma_buf) {
95                         drm_buffer->last_used = ktime_get();
96                         dma_buf_put(dma_buf);
97                         return drm_buffer;
98                 }
99         }
100
101         dma_buf_put(dma_buf);
102
103         return NULL;
104 }
105
106 static void vcodec_drm_detach(struct vcodec_iommu_info *iommu_info)
107 {
108         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
109         struct device *dev = iommu_info->dev;
110         struct iommu_domain *domain = drm_info->domain;
111
112         mutex_lock(&iommu_info->iommu_mutex);
113
114         if (!drm_info->attached) {
115                 mutex_unlock(&iommu_info->iommu_mutex);
116                 return;
117         }
118
119         iommu_detach_device(domain, dev);
120         drm_info->attached = false;
121
122         mutex_unlock(&iommu_info->iommu_mutex);
123 }
124
125 static int vcodec_drm_attach_unlock(struct vcodec_iommu_info *iommu_info)
126 {
127         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
128         struct device *dev = iommu_info->dev;
129         struct iommu_domain *domain = drm_info->domain;
130         int ret = 0;
131
132         ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
133         if (ret)
134                 return ret;
135
136         dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
137         ret = iommu_attach_device(domain, dev);
138         if (ret) {
139                 dev_err(dev, "Failed to attach iommu device\n");
140                 return ret;
141         }
142
143         return ret;
144 }
145
146 static int vcodec_drm_attach(struct vcodec_iommu_info *iommu_info)
147 {
148         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
149         int ret;
150
151         mutex_lock(&iommu_info->iommu_mutex);
152
153         if (drm_info->attached) {
154                 mutex_unlock(&iommu_info->iommu_mutex);
155                 return 0;
156         }
157
158         ret = vcodec_drm_attach_unlock(iommu_info);
159         if (ret) {
160                 mutex_unlock(&iommu_info->iommu_mutex);
161                 return ret;
162         }
163
164         drm_info->attached = true;
165
166         mutex_unlock(&iommu_info->iommu_mutex);
167
168         return ret;
169 }
170
171 static void *vcodec_drm_sgt_map_kernel(struct vcodec_drm_buffer *drm_buffer)
172 {
173         struct vcodec_iommu_session_info *session_info =
174                 drm_buffer->session_info;
175         struct device *dev = session_info->dev;
176         struct scatterlist *sgl, *sg;
177         int nr_pages = PAGE_ALIGN(drm_buffer->size) >> PAGE_SHIFT;
178         int i = 0, j = 0, k = 0;
179         struct page *page;
180
181         drm_buffer->pages = kmalloc_array(nr_pages, sizeof(*drm_buffer->pages),
182                                           GFP_KERNEL);
183         if (!(drm_buffer->pages)) {
184                 dev_err(dev, "drm map can not alloc pages\n");
185
186                 return NULL;
187         }
188
189         sgl = drm_buffer->sgt->sgl;
190
191         for_each_sg(sgl, sg, drm_buffer->sgt->nents, i) {
192                 page = sg_page(sg);
193                 for (j = 0; j < sg->length / PAGE_SIZE; j++)
194                         drm_buffer->pages[k++] = page++;
195         }
196
197         return vmap(drm_buffer->pages, nr_pages, VM_MAP,
198                     pgprot_noncached(PAGE_KERNEL));
199 }
200
201 static void vcodec_drm_sgt_unmap_kernel(struct vcodec_drm_buffer *drm_buffer)
202 {
203         vunmap(drm_buffer->cpu_addr);
204         kfree(drm_buffer->pages);
205 }
206
207 static int vcodec_finalise_sg(struct scatterlist *sg,
208                               int nents,
209                               dma_addr_t dma_addr)
210 {
211         struct scatterlist *s, *cur = sg;
212         unsigned long seg_mask = DMA_BIT_MASK(32);
213         unsigned int cur_len = 0, max_len = DMA_BIT_MASK(32);
214         int i, count = 0;
215
216         for_each_sg(sg, s, nents, i) {
217                 /* Restore this segment's original unaligned fields first */
218                 unsigned int s_iova_off = sg_dma_address(s);
219                 unsigned int s_length = sg_dma_len(s);
220                 unsigned int s_iova_len = s->length;
221
222                 s->offset += s_iova_off;
223                 s->length = s_length;
224                 sg_dma_address(s) = DMA_ERROR_CODE;
225                 sg_dma_len(s) = 0;
226
227                 /*
228                  * Now fill in the real DMA data. If...
229                  * - there is a valid output segment to append to
230                  * - and this segment starts on an IOVA page boundary
231                  * - but doesn't fall at a segment boundary
232                  * - and wouldn't make the resulting output segment too long
233                  */
234                 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
235                     (cur_len + s_length <= max_len)) {
236                         /* ...then concatenate it with the previous one */
237                         cur_len += s_length;
238                 } else {
239                         /* Otherwise start the next output segment */
240                         if (i > 0)
241                                 cur = sg_next(cur);
242                         cur_len = s_length;
243                         count++;
244
245                         sg_dma_address(cur) = dma_addr + s_iova_off;
246                 }
247
248                 sg_dma_len(cur) = cur_len;
249                 dma_addr += s_iova_len;
250
251                 if (s_length + s_iova_off < s_iova_len)
252                         cur_len = 0;
253         }
254         return count;
255 }
256
257 static void vcodec_invalidate_sg(struct scatterlist *sg, int nents)
258 {
259         struct scatterlist *s;
260         int i;
261
262         for_each_sg(sg, s, nents, i) {
263                 if (sg_dma_address(s) != DMA_ERROR_CODE)
264                         s->offset += sg_dma_address(s);
265                 if (sg_dma_len(s))
266                         s->length = sg_dma_len(s);
267                 sg_dma_address(s) = DMA_ERROR_CODE;
268                 sg_dma_len(s) = 0;
269         }
270 }
271
272 static dma_addr_t vcodec_dma_map_sg(struct iommu_domain *domain,
273                                     struct scatterlist *sg,
274                                     int nents, int prot)
275 {
276         struct iova_domain *iovad = domain->iova_cookie;
277         struct iova *iova;
278         struct scatterlist *s, *prev = NULL;
279         dma_addr_t dma_addr;
280         size_t iova_len = 0;
281         unsigned long mask = DMA_BIT_MASK(32);
282         unsigned long shift = iova_shift(iovad);
283         int i;
284
285         /*
286          * Work out how much IOVA space we need, and align the segments to
287          * IOVA granules for the IOMMU driver to handle. With some clever
288          * trickery we can modify the list in-place, but reversibly, by
289          * stashing the unaligned parts in the as-yet-unused DMA fields.
290          */
291         for_each_sg(sg, s, nents, i) {
292                 size_t s_iova_off = iova_offset(iovad, s->offset);
293                 size_t s_length = s->length;
294                 size_t pad_len = (mask - iova_len + 1) & mask;
295
296                 sg_dma_address(s) = s_iova_off;
297                 sg_dma_len(s) = s_length;
298                 s->offset -= s_iova_off;
299                 s_length = iova_align(iovad, s_length + s_iova_off);
300                 s->length = s_length;
301
302                 /*
303                  * Due to the alignment of our single IOVA allocation, we can
304                  * depend on these assumptions about the segment boundary mask:
305                  * - If mask size >= IOVA size, then the IOVA range cannot
306                  *   possibly fall across a boundary, so we don't care.
307                  * - If mask size < IOVA size, then the IOVA range must start
308                  *   exactly on a boundary, therefore we can lay things out
309                  *   based purely on segment lengths without needing to know
310                  *   the actual addresses beforehand.
311                  * - The mask must be a power of 2, so pad_len == 0 if
312                  *   iova_len == 0, thus we cannot dereference prev the first
313                  *   time through here (i.e. before it has a meaningful value).
314                  */
315                 if (pad_len && pad_len < s_length - 1) {
316                         prev->length += pad_len;
317                         iova_len += pad_len;
318                 }
319
320                 iova_len += s_length;
321                 prev = s;
322         }
323
324         iova = alloc_iova(iovad, iova_align(iovad, iova_len) >> shift,
325                                           mask >> shift, true);
326         if (!iova)
327                 goto out_restore_sg;
328
329         /*
330          * We'll leave any physical concatenation to the IOMMU driver's
331          * implementation - it knows better than we do.
332          */
333         dma_addr = iova_dma_addr(iovad, iova);
334         if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
335                 goto out_free_iova;
336
337         return vcodec_finalise_sg(sg, nents, dma_addr);
338
339 out_free_iova:
340         __free_iova(iovad, iova);
341 out_restore_sg:
342         vcodec_invalidate_sg(sg, nents);
343         return 0;
344 }
345
346 static void vcodec_dma_unmap_sg(struct iommu_domain *domain,
347                                 dma_addr_t dma_addr)
348 {
349         struct iova_domain *iovad = domain->iova_cookie;
350         unsigned long shift = iova_shift(iovad);
351         unsigned long pfn = dma_addr >> shift;
352         struct iova *iova = find_iova(iovad, pfn);
353         size_t size;
354
355         if (WARN_ON(!iova))
356                 return;
357
358         size = iova_size(iova) << shift;
359         size -= iommu_unmap(domain, pfn << shift, size);
360         /* ...and if we can't, then something is horribly, horribly wrong */
361         WARN_ON(size > 0);
362         __free_iova(iovad, iova);
363 }
364
365 static void vcodec_drm_clear_map(struct kref *ref)
366 {
367         struct vcodec_drm_buffer *drm_buffer =
368                 container_of(ref, struct vcodec_drm_buffer, ref);
369         struct vcodec_iommu_session_info *session_info =
370                 drm_buffer->session_info;
371         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
372         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
373
374         mutex_lock(&iommu_info->iommu_mutex);
375         drm_info = session_info->iommu_info->private;
376
377         if (drm_buffer->cpu_addr) {
378                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
379                 drm_buffer->cpu_addr = NULL;
380         }
381
382         if (drm_buffer->attach) {
383                 vcodec_dma_unmap_sg(drm_info->domain, drm_buffer->iova);
384                 sg_free_table(drm_buffer->copy_sgt);
385                 kfree(drm_buffer->copy_sgt);
386                 dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
387                                          DMA_BIDIRECTIONAL);
388                 dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
389                 dma_buf_put(drm_buffer->dma_buf);
390                 drm_buffer->attach = NULL;
391         }
392
393         mutex_unlock(&iommu_info->iommu_mutex);
394 }
395
396 static void vcdoec_drm_dump_info(struct vcodec_iommu_session_info *session_info)
397 {
398         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
399
400         vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
401                         "still there are below buffers stored in list\n");
402         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
403                                  list) {
404                 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
405                                 "index %d drm_buffer dma_buf %p cpu_addr %p\n",
406                                 drm_buffer->index,
407                                 drm_buffer->dma_buf, drm_buffer->cpu_addr);
408         }
409 }
410
411 static int vcodec_drm_free(struct vcodec_iommu_session_info *session_info,
412                            int idx)
413 {
414         struct device *dev = session_info->dev;
415         /* please double-check all maps have been release */
416         struct vcodec_drm_buffer *drm_buffer;
417
418         mutex_lock(&session_info->list_mutex);
419         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
420
421         if (!drm_buffer) {
422                 dev_err(dev, "can not find %d buffer in list\n", idx);
423                 mutex_unlock(&session_info->list_mutex);
424
425                 return -EINVAL;
426         }
427
428         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
429                 dma_buf_put(drm_buffer->dma_buf);
430                 list_del_init(&drm_buffer->list);
431                 kfree(drm_buffer);
432                 session_info->buffer_nums--;
433                 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
434                                 "buffer nums %d\n", session_info->buffer_nums);
435         }
436         mutex_unlock(&session_info->list_mutex);
437
438         return 0;
439 }
440
441 static int
442 vcodec_drm_unmap_iommu(struct vcodec_iommu_session_info *session_info,
443                        int idx)
444 {
445         struct device *dev = session_info->dev;
446         struct vcodec_drm_buffer *drm_buffer;
447
448         /* Force to flush iommu table */
449         if (of_machine_is_compatible("rockchip,rk3288"))
450                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
451
452         mutex_lock(&session_info->list_mutex);
453         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
454         mutex_unlock(&session_info->list_mutex);
455
456         if (!drm_buffer) {
457                 dev_err(dev, "can not find %d buffer in list\n", idx);
458                 return -EINVAL;
459         }
460
461         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
462
463         return 0;
464 }
465
466 static int vcodec_drm_map_iommu(struct vcodec_iommu_session_info *session_info,
467                                 int idx,
468                                 unsigned long *iova,
469                                 unsigned long *size)
470 {
471         struct device *dev = session_info->dev;
472         struct vcodec_drm_buffer *drm_buffer;
473
474         /* Force to flush iommu table */
475         if (of_machine_is_compatible("rockchip,rk3288"))
476                 rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
477
478         mutex_lock(&session_info->list_mutex);
479         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
480         mutex_unlock(&session_info->list_mutex);
481
482         if (!drm_buffer) {
483                 dev_err(dev, "can not find %d buffer in list\n", idx);
484                 return -EINVAL;
485         }
486
487         kref_get(&drm_buffer->ref);
488         if (iova)
489                 *iova = drm_buffer->iova;
490         if (size)
491                 *size = drm_buffer->size;
492         return 0;
493 }
494
495 static int
496 vcodec_drm_unmap_kernel(struct vcodec_iommu_session_info *session_info, int idx)
497 {
498         struct device *dev = session_info->dev;
499         struct vcodec_drm_buffer *drm_buffer;
500
501         mutex_lock(&session_info->list_mutex);
502         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
503         mutex_unlock(&session_info->list_mutex);
504
505         if (!drm_buffer) {
506                 dev_err(dev, "can not find %d buffer in list\n", idx);
507
508                 return -EINVAL;
509         }
510
511         if (drm_buffer->cpu_addr) {
512                 vcodec_drm_sgt_unmap_kernel(drm_buffer);
513                 drm_buffer->cpu_addr = NULL;
514         }
515
516         kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
517         return 0;
518 }
519
520 static int
521 vcodec_drm_free_fd(struct vcodec_iommu_session_info *session_info, int fd)
522 {
523         struct device *dev = session_info->dev;
524         /* please double-check all maps have been release */
525         struct vcodec_drm_buffer *drm_buffer = NULL;
526
527         mutex_lock(&session_info->list_mutex);
528         drm_buffer = vcodec_drm_get_buffer_fd_no_lock(session_info, fd);
529
530         if (!drm_buffer) {
531                 dev_err(dev, "can not find %d buffer in list\n", fd);
532                 mutex_unlock(&session_info->list_mutex);
533
534                 return -EINVAL;
535         }
536         mutex_unlock(&session_info->list_mutex);
537
538         vcodec_drm_unmap_iommu(session_info, drm_buffer->index);
539
540         mutex_lock(&session_info->list_mutex);
541         if (atomic_read(&drm_buffer->ref.refcount) == 0) {
542                 dma_buf_put(drm_buffer->dma_buf);
543                 list_del_init(&drm_buffer->list);
544                 kfree(drm_buffer);
545                 session_info->buffer_nums--;
546                 vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
547                                 "buffer nums %d\n", session_info->buffer_nums);
548         }
549         mutex_unlock(&session_info->list_mutex);
550
551         return 0;
552 }
553
554 static void
555 vcodec_drm_clear_session(struct vcodec_iommu_session_info *session_info)
556 {
557         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
558
559         list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
560                                  list) {
561                 kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
562                 vcodec_drm_free(session_info, drm_buffer->index);
563         }
564 }
565
566 static void *
567 vcodec_drm_map_kernel(struct vcodec_iommu_session_info *session_info, int idx)
568 {
569         struct device *dev = session_info->dev;
570         struct vcodec_drm_buffer *drm_buffer;
571
572         mutex_lock(&session_info->list_mutex);
573         drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
574         mutex_unlock(&session_info->list_mutex);
575
576         if (!drm_buffer) {
577                 dev_err(dev, "can not find %d buffer in list\n", idx);
578                 return NULL;
579         }
580
581         if (!drm_buffer->cpu_addr)
582                 drm_buffer->cpu_addr =
583                         vcodec_drm_sgt_map_kernel(drm_buffer);
584
585         kref_get(&drm_buffer->ref);
586
587         return drm_buffer->cpu_addr;
588 }
589
590 static int vcodec_drm_import(struct vcodec_iommu_session_info *session_info,
591                              int fd)
592 {
593         struct vcodec_drm_buffer *drm_buffer = NULL, *n;
594         struct vcodec_drm_buffer *oldest_buffer = NULL, *loop_buffer = NULL;
595         struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
596         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
597         struct device *dev = session_info->dev;
598         struct dma_buf_attachment *attach;
599         struct sg_table *sgt;
600         struct dma_buf *dma_buf;
601         ktime_t oldest_time = ktime_set(0, 0);
602         struct scatterlist *sg, *s;
603         int i;
604         int ret = 0;
605
606         dma_buf = dma_buf_get(fd);
607         if (IS_ERR(dma_buf)) {
608                 ret = PTR_ERR(dma_buf);
609                 return ret;
610         }
611
612         list_for_each_entry_safe(drm_buffer, n,
613                                  &session_info->buffer_list, list) {
614                 if (drm_buffer->dma_buf == dma_buf) {
615                         dma_buf_put(dma_buf);
616                         drm_buffer->last_used = ktime_get();
617                         return drm_buffer->index;
618                 }
619         }
620
621         drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
622         if (!drm_buffer) {
623                 ret = -ENOMEM;
624                 return ret;
625         }
626
627         drm_buffer->dma_buf = dma_buf;
628         drm_buffer->session_info = session_info;
629         drm_buffer->last_used = ktime_get();
630
631         kref_init(&drm_buffer->ref);
632
633         mutex_lock(&iommu_info->iommu_mutex);
634         drm_info = session_info->iommu_info->private;
635
636         attach = dma_buf_attach(drm_buffer->dma_buf, dev);
637         if (IS_ERR(attach)) {
638                 ret = PTR_ERR(attach);
639                 goto fail_out;
640         }
641
642         get_dma_buf(drm_buffer->dma_buf);
643
644         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
645         if (IS_ERR(sgt)) {
646                 ret = PTR_ERR(sgt);
647                 goto fail_detach;
648         }
649
650         /*
651          * Since we call dma_buf_map_attachment outside attach/detach, this
652          * will cause incorrectly map. we have to re-build map table native
653          * and for avoiding destroy their origin map table, we need use a
654          * copy one sg_table.
655          */
656         drm_buffer->copy_sgt = kmalloc(sizeof(*drm_buffer->copy_sgt),
657                                        GFP_KERNEL);
658         if (!drm_buffer->copy_sgt) {
659                 ret = -ENOMEM;
660                 goto fail_detach;
661         }
662
663         ret = sg_alloc_table(drm_buffer->copy_sgt, sgt->nents, GFP_KERNEL);
664         s = drm_buffer->copy_sgt->sgl;
665         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
666                 sg_set_page(s, sg_page(sg),
667                             PAGE_SIZE << compound_order(sg_page(sg)), 0);
668                 sg_dma_address(s) = page_to_phys(sg_page(sg));
669                 s->offset = sg->offset;
670                 s->length = sg->length;
671                 s = sg_next(s);
672         }
673
674         ret = vcodec_dma_map_sg(drm_info->domain, drm_buffer->copy_sgt->sgl,
675                                 drm_buffer->copy_sgt->nents,
676                                 IOMMU_READ | IOMMU_WRITE);
677         if (!ret) {
678                 ret = -ENOMEM;
679                 goto fail_alloc;
680         }
681         drm_buffer->iova = sg_dma_address(drm_buffer->copy_sgt->sgl);
682         drm_buffer->size = drm_buffer->dma_buf->size;
683
684         drm_buffer->attach = attach;
685         drm_buffer->sgt = sgt;
686
687         mutex_unlock(&iommu_info->iommu_mutex);
688
689         INIT_LIST_HEAD(&drm_buffer->list);
690         mutex_lock(&session_info->list_mutex);
691         session_info->buffer_nums++;
692         vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
693                         "buffer nums %d\n", session_info->buffer_nums);
694         if (session_info->buffer_nums > BUFFER_LIST_MAX_NUMS) {
695                 list_for_each_entry_safe(loop_buffer, n,
696                                  &session_info->buffer_list, list) {
697                         if (ktime_to_ns(oldest_time) == 0 ||
698                             ktime_after(oldest_time,
699                                         loop_buffer->last_used)) {
700                                 oldest_time = loop_buffer->last_used;
701                                 oldest_buffer = loop_buffer;
702                         }
703                 }
704                 kref_put(&oldest_buffer->ref, vcodec_drm_clear_map);
705                 dma_buf_put(oldest_buffer->dma_buf);
706                 list_del_init(&oldest_buffer->list);
707                 kfree(oldest_buffer);
708                 session_info->buffer_nums--;
709         }
710         drm_buffer->index = session_info->max_idx;
711         list_add_tail(&drm_buffer->list, &session_info->buffer_list);
712         session_info->max_idx++;
713         if ((session_info->max_idx & 0xfffffff) == 0)
714                 session_info->max_idx = 0;
715         mutex_unlock(&session_info->list_mutex);
716
717         return drm_buffer->index;
718
719 fail_alloc:
720         sg_free_table(drm_buffer->copy_sgt);
721         kfree(drm_buffer->copy_sgt);
722         dma_buf_unmap_attachment(attach, sgt,
723                                  DMA_BIDIRECTIONAL);
724 fail_detach:
725         dma_buf_detach(drm_buffer->dma_buf, attach);
726         dma_buf_put(drm_buffer->dma_buf);
727 fail_out:
728         kfree(drm_buffer);
729         mutex_unlock(&iommu_info->iommu_mutex);
730
731         return ret;
732 }
733
734 static int vcodec_drm_create(struct vcodec_iommu_info *iommu_info)
735 {
736         struct vcodec_iommu_drm_info *drm_info;
737         struct iommu_group *group;
738         int ret;
739
740         iommu_info->private = kzalloc(sizeof(*drm_info),
741                                       GFP_KERNEL);
742         drm_info = iommu_info->private;
743         if (!drm_info)
744                 return -ENOMEM;
745
746         drm_info->domain = iommu_domain_alloc(&platform_bus_type);
747         drm_info->attached = false;
748         if (!drm_info->domain)
749                 return -ENOMEM;
750
751         ret = iommu_get_dma_cookie(drm_info->domain);
752         if (ret)
753                 goto err_free_domain;
754
755         group = iommu_group_get(iommu_info->dev);
756         if (!group) {
757                 group = iommu_group_alloc();
758                 if (IS_ERR(group)) {
759                         dev_err(iommu_info->dev,
760                                 "Failed to allocate IOMMU group\n");
761                         goto err_put_cookie;
762                 }
763                 ret = iommu_group_add_device(group, iommu_info->dev);
764                 if (ret) {
765                         dev_err(iommu_info->dev,
766                                 "failed to add device to IOMMU group\n");
767                         goto err_put_cookie;
768                 }
769         }
770         iommu_dma_init_domain(drm_info->domain, 0x10000000, SZ_2G);
771         iommu_group_put(group);
772
773         return 0;
774
775 err_put_cookie:
776         iommu_put_dma_cookie(drm_info->domain);
777 err_free_domain:
778         iommu_domain_free(drm_info->domain);
779
780         return ret;
781 }
782
783 static int vcodec_drm_destroy(struct vcodec_iommu_info *iommu_info)
784 {
785         struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
786
787         iommu_put_dma_cookie(drm_info->domain);
788         iommu_domain_free(drm_info->domain);
789
790         kfree(drm_info);
791         iommu_info->private = NULL;
792
793         return 0;
794 }
795
796 static struct vcodec_iommu_ops drm_ops = {
797         .create = vcodec_drm_create,
798         .import = vcodec_drm_import,
799         .free = vcodec_drm_free,
800         .free_fd = vcodec_drm_free_fd,
801         .map_kernel = vcodec_drm_map_kernel,
802         .unmap_kernel = vcodec_drm_unmap_kernel,
803         .map_iommu = vcodec_drm_map_iommu,
804         .unmap_iommu = vcodec_drm_unmap_iommu,
805         .destroy = vcodec_drm_destroy,
806         .dump = vcdoec_drm_dump_info,
807         .attach = vcodec_drm_attach,
808         .detach = vcodec_drm_detach,
809         .clear = vcodec_drm_clear_session,
810 };
811
812 void vcodec_iommu_drm_set_ops(struct vcodec_iommu_info *iommu_info)
813 {
814         if (!iommu_info)
815                 return;
816         iommu_info->ops = &drm_ops;
817 }