2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_gem.h>
18 #include <drm/drm_sync_helper.h>
19 #include <drm/drm_vma_manager.h>
20 #include <drm/rockchip_drm.h>
22 #include <linux/completion.h>
23 #include <linux/dma-attrs.h>
24 #include <linux/dma-buf.h>
25 #include <linux/reservation.h>
26 #include <linux/iommu.h>
27 #include <linux/pagemap.h>
29 #include "rockchip_drm_drv.h"
30 #include "rockchip_drm_gem.h"
34 struct list_head list;
39 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
41 struct drm_device *drm = rk_obj->base.dev;
42 struct rockchip_drm_private *private = drm->dev_private;
43 int prot = IOMMU_READ | IOMMU_WRITE;
46 mutex_lock(&private->mm_lock);
48 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
49 rk_obj->base.size, PAGE_SIZE,
52 mutex_unlock(&private->mm_lock);
54 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
58 rk_obj->dma_addr = rk_obj->mm.start;
60 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
61 rk_obj->sgt->nents, prot);
62 if (ret < rk_obj->base.size) {
63 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
64 ret, rk_obj->base.size);
74 drm_mm_remove_node(&rk_obj->mm);
79 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
81 struct drm_device *drm = rk_obj->base.dev;
82 struct rockchip_drm_private *private = drm->dev_private;
84 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
86 mutex_lock(&private->mm_lock);
88 drm_mm_remove_node(&rk_obj->mm);
90 mutex_unlock(&private->mm_lock);
95 static void rockchip_gem_free_list(struct list_head lists[])
97 struct page_info *info, *tmp_info;
100 for (i = 0; i < PG_ROUND; i++) {
101 list_for_each_entry_safe(info, tmp_info, &lists[i], list) {
102 list_del(&info->list);
108 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
110 struct drm_device *drm = rk_obj->base.dev;
112 struct scatterlist *s;
113 unsigned int cur_page;
114 struct page **pages, **dst_pages;
117 unsigned long chunk_pages;
118 unsigned long remain;
119 struct list_head lists[PG_ROUND];
122 unsigned int bit12_14;
123 unsigned int block_index[PG_ROUND] = {0};
124 struct page_info *info;
125 unsigned int maximum;
127 for (i = 0; i < PG_ROUND; i++)
128 INIT_LIST_HEAD(&lists[i]);
130 pages = drm_gem_get_pages(&rk_obj->base);
132 return PTR_ERR(pages);
134 rk_obj->pages = pages;
136 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
138 n_pages = rk_obj->num_pages;
140 dst_pages = drm_malloc_ab(n_pages, sizeof(struct page *));
148 /* look for the end of the current chunk */
150 for (j = cur_page + 1; j < n_pages; ++j) {
151 if (page_to_pfn(pages[j]) !=
152 page_to_pfn(pages[j - 1]) + 1)
156 chunk_pages = j - cur_page;
158 if (chunk_pages > 7) {
159 for (i = 0; i < chunk_pages; i++)
160 dst_pages[end + i] = pages[cur_page + i];
163 for (i = 0; i < chunk_pages; i++) {
164 info = kmalloc(sizeof(*info), GFP_KERNEL);
170 INIT_LIST_HEAD(&info->list);
171 info->page = pages[cur_page + i];
172 phys = page_to_phys(info->page);
173 bit12_14 = (phys >> 12) & 0x7;
174 list_add_tail(&info->list, &lists[bit12_14]);
175 block_index[bit12_14]++;
180 remain -= chunk_pages;
183 maximum = block_index[0];
184 for (i = 1; i < PG_ROUND; i++)
185 maximum = max(maximum, block_index[i]);
187 for (i = 0; i < maximum; i++) {
188 for (j = 0; j < PG_ROUND; j++) {
189 if (!list_empty(&lists[j])) {
190 struct page_info *info;
192 info = list_first_entry(&lists[j],
193 struct page_info, list);
194 dst_pages[end++] = info->page;
195 list_del(&info->list);
201 DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
204 rk_obj->sgt = drm_prime_pages_to_sg(dst_pages, rk_obj->num_pages);
205 if (IS_ERR(rk_obj->sgt)) {
206 ret = PTR_ERR(rk_obj->sgt);
210 rk_obj->pages = dst_pages;
213 * Fake up the SG table so that dma_sync_sg_for_device() can be used
214 * to flush the pages associated with it.
216 * TODO: Replace this by drm_clflush_sg() once it can be implemented
217 * without relying on symbols that are not exported.
219 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
220 sg_dma_address(s) = sg_phys(s);
222 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
225 drm_free_large(pages);
230 rockchip_gem_free_list(lists);
231 drm_free_large(dst_pages);
233 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
237 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
239 sg_free_table(rk_obj->sgt);
241 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
244 static int rockchip_gem_alloc_cma(struct rockchip_gem_object *rk_obj)
246 struct drm_gem_object *obj = &rk_obj->base;
247 struct drm_device *drm = obj->dev;
248 struct sg_table *sgt;
251 init_dma_attrs(&rk_obj->dma_attrs);
252 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
253 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
255 rk_obj->cookie = dma_alloc_attrs(drm->dev, obj->size,
256 &rk_obj->dma_handle, GFP_KERNEL,
258 if (!rk_obj->cookie) {
259 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
269 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->cookie,
270 rk_obj->dma_handle, obj->size,
273 DRM_ERROR("failed to allocate sgt, %d\n", ret);
277 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
279 rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
280 sizeof(*rk_obj->pages));
281 if (!rk_obj->pages) {
282 DRM_ERROR("failed to allocate pages.\n");
283 goto err_sg_table_free;
286 if (drm_prime_sg_to_page_addr_arrays(sgt, rk_obj->pages, NULL,
287 rk_obj->num_pages)) {
288 DRM_ERROR("invalid sgtable.\n");
298 drm_free_large(rk_obj->pages);
304 dma_free_attrs(drm->dev, obj->size, rk_obj->cookie,
305 rk_obj->dma_addr, &rk_obj->dma_attrs);
310 static void rockchip_gem_free_cma(struct rockchip_gem_object *rk_obj)
312 struct drm_gem_object *obj = &rk_obj->base;
313 struct drm_device *drm = obj->dev;
315 drm_free_large(rk_obj->pages);
316 sg_free_table(rk_obj->sgt);
318 dma_free_attrs(drm->dev, obj->size, rk_obj->cookie,
319 rk_obj->dma_addr, &rk_obj->dma_attrs);
322 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
325 struct drm_gem_object *obj = &rk_obj->base;
326 struct drm_device *drm = obj->dev;
327 struct rockchip_drm_private *private = drm->dev_private;
330 if (!private->domain)
331 rk_obj->flags |= ROCKCHIP_BO_CONTIG;
333 if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
334 rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
335 ret = rockchip_gem_alloc_cma(rk_obj);
339 rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SHMEM;
340 ret = rockchip_gem_get_pages(rk_obj);
345 if (private->domain) {
346 ret = rockchip_gem_iommu_map(rk_obj);
350 WARN_ON(!rk_obj->dma_handle);
351 rk_obj->dma_addr = rk_obj->dma_handle;
355 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
356 pgprot_writecombine(PAGE_KERNEL));
357 if (!rk_obj->kvaddr) {
358 DRM_ERROR("failed to vmap() buffer\n");
368 rockchip_gem_iommu_unmap(rk_obj);
370 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
371 rockchip_gem_free_cma(rk_obj);
373 rockchip_gem_put_pages(rk_obj);
377 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
379 struct drm_device *drm = rk_obj->base.dev;
380 struct rockchip_drm_private *private = drm->dev_private;
383 rockchip_gem_iommu_unmap(rk_obj);
385 vunmap(rk_obj->kvaddr);
387 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
388 rockchip_gem_put_pages(rk_obj);
390 rockchip_gem_free_cma(rk_obj);
394 static int rockchip_drm_gem_object_mmap_shm(struct drm_gem_object *obj,
395 struct vm_area_struct *vma)
397 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
398 unsigned int i, count = obj->size >> PAGE_SHIFT;
399 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
400 unsigned long uaddr = vma->vm_start;
401 unsigned long offset = vma->vm_pgoff;
402 unsigned long end = user_count + offset;
410 for (i = offset; i < end; i++) {
411 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
420 static int rockchip_drm_gem_object_mmap_cma(struct drm_gem_object *obj,
421 struct vm_area_struct *vma)
423 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
424 struct drm_device *drm = obj->dev;
426 return dma_mmap_attrs(drm->dev, vma, rk_obj->cookie, rk_obj->dma_handle,
427 obj->size, &rk_obj->dma_attrs);
430 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
431 struct vm_area_struct *vma)
434 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
437 if (rk_obj->flags & ROCKCHIP_BO_CACHABLE)
438 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
441 * We allocated a struct page table for rk_obj, so clear
442 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
444 vma->vm_flags &= ~VM_PFNMAP;
446 if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM)
447 ret = rockchip_drm_gem_object_mmap_shm(obj, vma);
449 ret = rockchip_drm_gem_object_mmap_cma(obj, vma);
452 drm_gem_vm_close(vma);
457 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
458 struct vm_area_struct *vma)
462 ret = drm_gem_mmap_obj(obj, obj->size, vma);
466 return rockchip_drm_gem_object_mmap(obj, vma);
469 /* drm driver mmap file operations */
470 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
472 struct drm_gem_object *obj;
475 ret = drm_gem_mmap(filp, vma);
480 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
481 * whole buffer from the start.
485 obj = vma->vm_private_data;
487 return rockchip_drm_gem_object_mmap(obj, vma);
490 static struct rockchip_gem_object *
491 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
493 struct address_space *mapping;
494 struct rockchip_gem_object *rk_obj;
495 struct drm_gem_object *obj;
497 size = round_up(size, PAGE_SIZE);
499 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
501 return ERR_PTR(-ENOMEM);
505 drm_gem_object_init(drm, obj, size);
507 if (IS_ENABLED(CONFIG_ARM_LPAE)) {
508 mapping = file_inode(obj->filp)->i_mapping;
509 mapping_set_gfp_mask(mapping,
510 mapping_gfp_mask(mapping) | __GFP_DMA32);
516 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
518 drm_gem_object_release(&rk_obj->base);
522 struct rockchip_gem_object *
523 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
524 bool alloc_kmap, unsigned int flags)
526 struct rockchip_gem_object *rk_obj;
529 rk_obj = rockchip_gem_alloc_object(drm, size);
532 rk_obj->flags = flags;
534 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
536 goto err_free_rk_obj;
541 rockchip_gem_release_object(rk_obj);
546 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
549 void rockchip_gem_free_object(struct drm_gem_object *obj)
551 struct drm_device *drm = obj->dev;
552 struct rockchip_drm_private *private = drm->dev_private;
553 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
555 if (obj->import_attach) {
556 if (private->domain) {
557 rockchip_gem_iommu_unmap(rk_obj);
559 dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
560 rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
563 rockchip_gem_free_buf(rk_obj);
566 #ifdef CONFIG_DRM_DMA_SYNC
567 drm_fence_signal_and_put(&rk_obj->acquire_fence);
570 rockchip_gem_release_object(rk_obj);
574 * rockchip_gem_create_with_handle - allocate an object with the given
575 * size and create a gem handle on it
577 * returns a struct rockchip_gem_object* on success or ERR_PTR values
580 static struct rockchip_gem_object *
581 rockchip_gem_create_with_handle(struct drm_file *file_priv,
582 struct drm_device *drm, unsigned int size,
583 unsigned int *handle, unsigned int flags)
585 struct rockchip_gem_object *rk_obj;
586 struct drm_gem_object *obj;
589 rk_obj = rockchip_gem_create_object(drm, size, false, flags);
591 return ERR_CAST(rk_obj);
596 * allocate a id of idr table where the obj is registered
597 * and handle has the id what user can see.
599 ret = drm_gem_handle_create(file_priv, obj, handle);
601 goto err_handle_create;
603 /* drop reference from allocate - handle holds it now. */
604 drm_gem_object_unreference_unlocked(obj);
609 rockchip_gem_free_object(obj);
614 int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
615 struct drm_device *dev, uint32_t handle,
618 struct drm_gem_object *obj;
621 obj = drm_gem_object_lookup(dev, file_priv, handle);
623 DRM_ERROR("failed to lookup gem object.\n");
627 ret = drm_gem_create_mmap_offset(obj);
631 *offset = drm_vma_node_offset_addr(&obj->vma_node);
632 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
635 drm_gem_object_unreference_unlocked(obj);
641 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
644 * This aligns the pitch and size arguments to the minimum required. wrap
645 * this into your own function if you need bigger alignment.
647 int rockchip_gem_dumb_create(struct drm_file *file_priv,
648 struct drm_device *dev,
649 struct drm_mode_create_dumb *args)
651 struct rockchip_gem_object *rk_obj;
652 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
655 * align to 64 bytes since Mali requires it.
657 args->pitch = ALIGN(min_pitch, 64);
658 args->size = args->pitch * args->height;
660 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
661 &args->handle, args->flags);
663 return PTR_ERR_OR_ZERO(rk_obj);
666 int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
667 struct drm_file *file_priv)
669 struct drm_rockchip_gem_map_off *args = data;
671 return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle,
675 int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *file_priv)
678 struct drm_rockchip_gem_phys *args = data;
679 struct rockchip_gem_object *rk_obj;
680 struct drm_gem_object *obj;
682 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
684 DRM_ERROR("failed to lookup gem object.\n");
687 rk_obj = to_rockchip_obj(obj);
689 if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
690 DRM_ERROR("Can't get phys address from non-continus buf.\n");
694 args->phy_addr = page_to_phys(rk_obj->pages[0]);
699 int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
700 struct drm_file *file_priv)
702 struct drm_rockchip_gem_create *args = data;
703 struct rockchip_gem_object *rk_obj;
705 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
706 &args->handle, args->flags);
707 return PTR_ERR_OR_ZERO(rk_obj);
710 static struct reservation_object *drm_gem_get_resv(struct drm_gem_object *gem)
712 struct dma_buf *dma_buf = gem->dma_buf;
713 return dma_buf ? dma_buf->resv : NULL;
716 #ifdef CONFIG_DRM_DMA_SYNC
717 static void rockchip_gem_acquire_complete(struct drm_reservation_cb *rcb,
720 struct completion *compl = context;
724 static int rockchip_gem_acquire(struct drm_device *dev,
725 struct rockchip_gem_object *rockchip_gem_obj,
729 struct rockchip_drm_private *dev_priv = dev->dev_private;
730 struct reservation_object *resv =
731 drm_gem_get_resv(&rockchip_gem_obj->base);
733 struct drm_reservation_cb rcb;
734 DECLARE_COMPLETION_ONSTACK(compl);
740 !rockchip_gem_obj->acquire_exclusive &&
741 rockchip_gem_obj->acquire_fence) {
742 atomic_inc(&rockchip_gem_obj->acquire_shared_count);
746 fence = drm_sw_fence_new(dev_priv->cpu_fence_context,
747 atomic_add_return(1, &dev_priv->cpu_fence_seqno));
749 ret = PTR_ERR(fence);
750 DRM_ERROR("Failed to create acquire fence %d.\n", ret);
753 ww_mutex_lock(&resv->lock, NULL);
755 ret = reservation_object_reserve_shared(resv);
757 DRM_ERROR("Failed to reserve space for shared fence %d.\n",
762 drm_reservation_cb_init(&rcb, rockchip_gem_acquire_complete, &compl);
763 ret = drm_reservation_cb_add(&rcb, resv, exclusive);
765 DRM_ERROR("Failed to add reservation to callback %d.\n", ret);
768 drm_reservation_cb_done(&rcb);
770 reservation_object_add_excl_fence(resv, fence);
772 reservation_object_add_shared_fence(resv, fence);
774 ww_mutex_unlock(&resv->lock);
775 mutex_unlock(&dev->struct_mutex);
776 ret = wait_for_completion_interruptible(&compl);
777 mutex_lock(&dev->struct_mutex);
779 DRM_ERROR("Failed wait for reservation callback %d.\n", ret);
780 drm_reservation_cb_fini(&rcb);
781 /* somebody else may be already waiting on it */
782 drm_fence_signal_and_put(&fence);
785 rockchip_gem_obj->acquire_fence = fence;
786 rockchip_gem_obj->acquire_exclusive = exclusive;
787 atomic_set(&rockchip_gem_obj->acquire_shared_count, 1);
791 ww_mutex_unlock(&resv->lock);
796 static void rockchip_gem_release(struct rockchip_gem_object *rockchip_gem_obj)
798 BUG_ON(!rockchip_gem_obj->acquire_fence);
799 if (atomic_sub_and_test(1,
800 &rockchip_gem_obj->acquire_shared_count))
801 drm_fence_signal_and_put(&rockchip_gem_obj->acquire_fence);
805 int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file)
808 struct drm_rockchip_gem_cpu_acquire *args = data;
809 struct rockchip_drm_file_private *file_priv = file->driver_priv;
810 struct drm_gem_object *obj;
811 struct rockchip_gem_object *rockchip_gem_obj;
812 struct rockchip_gem_object_node *gem_node;
815 DRM_DEBUG_KMS("[BO:%u] flags: 0x%x\n", args->handle, args->flags);
817 mutex_lock(&dev->struct_mutex);
819 obj = drm_gem_object_lookup(dev, file, args->handle);
821 DRM_ERROR("failed to lookup gem object.\n");
826 rockchip_gem_obj = to_rockchip_obj(obj);
828 if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
829 /* If there is no reservation object present, there is no
830 * cross-process/cross-device sharing and sync is unnecessary.
836 #ifdef CONFIG_DRM_DMA_SYNC
837 ret = rockchip_gem_acquire(dev, rockchip_gem_obj,
838 args->flags & DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE);
843 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
845 DRM_ERROR("Failed to allocate rockchip_drm_gem_obj_node.\n");
850 gem_node->rockchip_gem_obj = rockchip_gem_obj;
851 list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
852 mutex_unlock(&dev->struct_mutex);
856 #ifdef CONFIG_DRM_DMA_SYNC
857 rockchip_gem_release(rockchip_gem_obj);
860 drm_gem_object_unreference(obj);
863 mutex_unlock(&dev->struct_mutex);
867 int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void *data,
868 struct drm_file *file)
870 struct drm_rockchip_gem_cpu_release *args = data;
871 struct rockchip_drm_file_private *file_priv = file->driver_priv;
872 struct drm_gem_object *obj;
873 struct rockchip_gem_object *rockchip_gem_obj;
874 struct list_head *cur;
877 DRM_DEBUG_KMS("[BO:%u]\n", args->handle);
879 mutex_lock(&dev->struct_mutex);
881 obj = drm_gem_object_lookup(dev, file, args->handle);
883 DRM_ERROR("failed to lookup gem object.\n");
888 rockchip_gem_obj = to_rockchip_obj(obj);
890 if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
891 /* If there is no reservation object present, there is no
892 * cross-process/cross-device sharing and sync is unnecessary.
898 list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
899 struct rockchip_gem_object_node *node = list_entry(
900 cur, struct rockchip_gem_object_node, list);
901 if (node->rockchip_gem_obj == rockchip_gem_obj)
904 if (cur == &file_priv->gem_cpu_acquire_list) {
905 DRM_ERROR("gem object not acquired for current process.\n");
910 #ifdef CONFIG_DRM_DMA_SYNC
911 rockchip_gem_release(rockchip_gem_obj);
915 kfree(list_entry(cur, struct rockchip_gem_object_node, list));
916 /* unreference for the reference held since cpu_acquire_ioctl */
917 drm_gem_object_unreference(obj);
921 /* unreference for the reference from drm_gem_object_lookup() */
922 drm_gem_object_unreference(obj);
925 mutex_unlock(&dev->struct_mutex);
930 * Allocate a sg_table for this GEM object.
931 * Note: Both the table's contents, and the sg_table itself must be freed by
933 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
935 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
937 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
939 WARN_ON(!rk_obj->pages);
941 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
944 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
947 struct scatterlist *s;
948 dma_addr_t expected = sg_dma_address(sgt->sgl);
950 unsigned long size = 0;
952 for_each_sg(sgt->sgl, s, count, i) {
953 if (sg_dma_address(s) != expected)
955 expected = sg_dma_address(s) + sg_dma_len(s);
956 size += sg_dma_len(s);
962 rockchip_gem_iommu_map_sg(struct drm_device *drm,
963 struct dma_buf_attachment *attach,
965 struct rockchip_gem_object *rk_obj)
968 return rockchip_gem_iommu_map(rk_obj);
972 rockchip_gem_dma_map_sg(struct drm_device *drm,
973 struct dma_buf_attachment *attach,
975 struct rockchip_gem_object *rk_obj)
977 int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
982 if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
983 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
984 dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
989 rk_obj->dma_addr = sg_dma_address(sg->sgl);
994 struct drm_gem_object *
995 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
996 struct dma_buf_attachment *attach,
999 struct rockchip_drm_private *private = drm->dev_private;
1000 struct rockchip_gem_object *rk_obj;
1003 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
1005 return ERR_CAST(rk_obj);
1007 if (private->domain)
1008 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
1010 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
1013 DRM_ERROR("failed to import sg table: %d\n", ret);
1014 goto err_free_rk_obj;
1017 return &rk_obj->base;
1020 rockchip_gem_release_object(rk_obj);
1021 return ERR_PTR(ret);
1024 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
1026 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1029 return rk_obj->kvaddr;
1031 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
1032 pgprot_writecombine(PAGE_KERNEL));
1034 return rk_obj->kvaddr;
1037 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
1039 /* Unmap buffer on buffer destroy. */
1042 int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
1043 size_t start, size_t len,
1044 enum dma_data_direction dir)
1046 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1047 struct drm_device *drm = obj->dev;
1052 dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl,
1053 rk_obj->sgt->nents, dir);
1057 void rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
1058 size_t start, size_t len,
1059 enum dma_data_direction dir)
1061 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1062 struct drm_device *drm = obj->dev;
1067 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
1068 rk_obj->sgt->nents, dir);