drm/radeon: add the infrastructure for concurrent buffer access
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / radeon_ttm.c
index c8a8a5144ec16a51686acf3658dd149261867ac8..eca2ce60d4406fd38795cc8be3a5e05c5ec43cec 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/swiotlb.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
 #include <linux/debugfs.h>
 #include "radeon_reg.h"
 #include "radeon.h"
@@ -176,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 static void radeon_evict_flags(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement)
 {
+       static struct ttm_place placements = {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+       };
+
        struct radeon_bo *rbo;
-       static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
        if (!radeon_ttm_bo_is_radeon_bo(bo)) {
-               placement->fpfn = 0;
-               placement->lpfn = 0;
                placement->placement = &placements;
                placement->busy_placement = &placements;
                placement->num_placement = 1;
@@ -228,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
        struct radeon_fence *fence;
+       unsigned num_pages;
        int r, ridx;
 
        rdev = radeon_get_rdev(bo->bdev);
@@ -264,13 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 
        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
-       /* sync other rings */
-       fence = bo->sync_obj;
-       r = radeon_copy(rdev, old_start, new_start,
-                       new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
-                       &fence);
-       /* FIXME: handle copy error */
-       r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+       num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+       fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+       if (IS_ERR(fence))
+               return PTR_ERR(fence);
+
+       r = ttm_bo_move_accel_cleanup(bo, &fence->base,
                                      evict, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
@@ -284,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
        struct ttm_mem_reg tmp_mem;
-       u32 placements;
+       struct ttm_place placements;
        struct ttm_placement placement;
        int r;
 
        rdev = radeon_get_rdev(bo->bdev);
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       placement.fpfn = 0;
-       placement.lpfn = 0;
        placement.num_placement = 1;
        placement.placement = &placements;
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
-       placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.fpfn = 0;
+       placements.lpfn = 0;
+       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
        if (unlikely(r)) {
@@ -332,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        struct ttm_mem_reg *old_mem = &bo->mem;
        struct ttm_mem_reg tmp_mem;
        struct ttm_placement placement;
-       u32 placements;
+       struct ttm_place placements;
        int r;
 
        rdev = radeon_get_rdev(bo->bdev);
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       placement.fpfn = 0;
-       placement.lpfn = 0;
        placement.num_placement = 1;
        placement.placement = &placements;
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
-       placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.fpfn = 0;
+       placements.lpfn = 0;
+       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
        if (unlikely(r)) {
@@ -483,53 +488,131 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 {
 }
 
-static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
-{
-       return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
-}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+       struct ttm_dma_tt               ttm;
+       struct radeon_device            *rdev;
+       u64                             offset;
+
+       uint64_t                        userptr;
+       struct mm_struct                *usermm;
+       uint32_t                        userflags;
+};
 
-static int radeon_sync_obj_flush(void *sync_obj)
+/* prepare the sg table with the user pages */
+static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 {
+       struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       unsigned pinned = 0, nents;
+       int r;
+
+       int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+       enum dma_data_direction direction = write ?
+               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+       if (current->mm != gtt->usermm)
+               return -EPERM;
+
+       if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
+               /* check that we only pin down anonymous memory
+                  to prevent problems with writeback */
+               unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+               struct vm_area_struct *vma;
+               vma = find_vma(gtt->usermm, gtt->userptr);
+               if (!vma || vma->vm_file || vma->vm_end < end)
+                       return -EPERM;
+       }
+
+       do {
+               unsigned num_pages = ttm->num_pages - pinned;
+               uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
+               struct page **pages = ttm->pages + pinned;
+
+               r = get_user_pages(current, current->mm, userptr, num_pages,
+                                  write, 0, pages, NULL);
+               if (r < 0)
+                       goto release_pages;
+
+               pinned += r;
+
+       } while (pinned < ttm->num_pages);
+
+       r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+                                     ttm->num_pages << PAGE_SHIFT,
+                                     GFP_KERNEL);
+       if (r)
+               goto release_sg;
+
+       r = -ENOMEM;
+       nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+       if (nents != ttm->sg->nents)
+               goto release_sg;
+
+       drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+                                        gtt->ttm.dma_address, ttm->num_pages);
+
        return 0;
-}
 
-static void radeon_sync_obj_unref(void **sync_obj)
-{
-       radeon_fence_unref((struct radeon_fence **)sync_obj);
-}
+release_sg:
+       kfree(ttm->sg);
 
-static void *radeon_sync_obj_ref(void *sync_obj)
-{
-       return radeon_fence_ref((struct radeon_fence *)sync_obj);
+release_pages:
+       release_pages(ttm->pages, pinned, 0);
+       return r;
 }
 
-static bool radeon_sync_obj_signaled(void *sync_obj)
+static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 {
-       return radeon_fence_signaled((struct radeon_fence *)sync_obj);
-}
+       struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       struct scatterlist *sg;
+       int i;
 
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_tt {
-       struct ttm_dma_tt               ttm;
-       struct radeon_device            *rdev;
-       u64                             offset;
-};
+       int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+       enum dma_data_direction direction = write ?
+               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+       /* free the sg table and pages again */
+       dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+
+       for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
+               struct page *page = sg_page(sg);
+
+               if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
+                       set_page_dirty(page);
+
+               mark_page_accessed(page);
+               page_cache_release(page);
+       }
+
+       sg_free_table(ttm->sg);
+}
 
 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
        struct radeon_ttm_tt *gtt = (void*)ttm;
+       uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+               RADEON_GART_PAGE_WRITE;
        int r;
 
+       if (gtt->userptr) {
+               radeon_ttm_tt_pin_userptr(ttm);
+               flags &= ~RADEON_GART_PAGE_WRITE;
+       }
+
        gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
        if (!ttm->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
        }
-       r = radeon_gart_bind(gtt->rdev, gtt->offset,
-                            ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+       if (ttm->caching_state == tt_cached)
+               flags |= RADEON_GART_PAGE_SNOOP;
+       r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+                            ttm->pages, gtt->ttm.dma_address, flags);
        if (r) {
                DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
                          ttm->num_pages, (unsigned)gtt->offset);
@@ -543,6 +626,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
        struct radeon_ttm_tt *gtt = (void *)ttm;
 
        radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+
+       if (gtt->userptr)
+               radeon_ttm_tt_unpin_userptr(ttm);
+
        return 0;
 }
 
@@ -599,6 +686,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
        if (ttm->state != tt_unpopulated)
                return 0;
 
+       if (gtt->userptr) {
+               ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+               if (!ttm->sg)
+                       return -ENOMEM;
+
+               ttm->page_flags |= TTM_PAGE_FLAG_SG;
+               ttm->state = tt_unbound;
+               return 0;
+       }
+
        if (slave && ttm->sg) {
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 gtt->ttm.dma_address, ttm->num_pages);
@@ -648,6 +745,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
        unsigned i;
        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
+       if (gtt->userptr) {
+               kfree(ttm->sg);
+               ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+               return;
+       }
+
        if (slave)
                return;
 
@@ -676,6 +779,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
        ttm_pool_unpopulate(ttm);
 }
 
+int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+                             uint32_t flags)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt == NULL)
+               return -EINVAL;
+
+       gtt->userptr = addr;
+       gtt->usermm = current->mm;
+       gtt->userflags = flags;
+       return 0;
+}
+
+bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt == NULL)
+               return false;
+
+       return !!gtt->userptr;
+}
+
+bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt == NULL)
+               return false;
+
+       return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
+}
+
 static struct ttm_bo_driver radeon_bo_driver = {
        .ttm_tt_create = &radeon_ttm_tt_create,
        .ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -685,11 +822,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
        .evict_flags = &radeon_evict_flags,
        .move = &radeon_bo_move,
        .verify_access = &radeon_verify_access,
-       .sync_obj_signaled = &radeon_sync_obj_signaled,
-       .sync_obj_wait = &radeon_sync_obj_wait,
-       .sync_obj_flush = &radeon_sync_obj_flush,
-       .sync_obj_unref = &radeon_sync_obj_unref,
-       .sync_obj_ref = &radeon_sync_obj_ref,
        .move_notify = &radeon_bo_move_notify,
        .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
        .io_mem_reserve = &radeon_ttm_io_mem_reserve,
@@ -726,7 +858,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
        r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
-                            RADEON_GEM_DOMAIN_VRAM,
+                            RADEON_GEM_DOMAIN_VRAM, 0,
                             NULL, &rdev->stollen_vga_memory);
        if (r) {
                return r;