drm/rockchip: gem: reorder pages if page chunk less than 8
authorSimon <xxm@rock-chips.com>
Tue, 17 Jan 2017 01:23:19 +0000 (09:23 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Tue, 7 Mar 2017 02:05:10 +0000 (10:05 +0800)
Change-Id: I03a91d2f9c017086b3cb35edeaf6b7913b147b9b
Signed-off-by: Simon <xxm@rock-chips.com>
drivers/gpu/drm/rockchip/rockchip_drm_gem.c

index 4a7bc553e75e9ba71c64ff3c08a74a5214b5f07a..30d8efcb5132eed2f13667ee6d1b01e323f00b29 100644 (file)
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 
+struct page_info {
+       struct page *page;
+       struct list_head list;
+};
+
+#define PG_ROUND       8
+
 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
 {
        struct drm_device *drm = rk_obj->base.dev;
@@ -84,24 +91,123 @@ static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
        return 0;
 }
 
+static void rockchip_gem_free_list(struct list_head lists[])
+{
+       struct page_info *info, *tmp_info;
+       int i;
+
+       for (i = 0; i < PG_ROUND; i++) {
+               list_for_each_entry_safe(info, tmp_info, &lists[i], list) {
+                       list_del(&info->list);
+                       kfree(info);
+               }
+       }
+}
+
 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
 {
        struct drm_device *drm = rk_obj->base.dev;
        int ret, i;
        struct scatterlist *s;
-
-       rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
-       if (IS_ERR(rk_obj->pages))
-               return PTR_ERR(rk_obj->pages);
+       unsigned int cur_page;
+       struct page **pages, **dst_pages;
+       int j;
+       int n_pages;
+       unsigned long chunk_pages;
+       unsigned long remain;
+       struct list_head lists[PG_ROUND];
+       dma_addr_t phys;
+       int end = 0;
+       unsigned int bit12_14;
+       unsigned int block_index[PG_ROUND] = {0};
+       struct page_info *info;
+       unsigned int maximum;
+
+       for (i = 0; i < PG_ROUND; i++)
+               INIT_LIST_HEAD(&lists[i]);
+
+       pages = drm_gem_get_pages(&rk_obj->base);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       rk_obj->pages = pages;
 
        rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
 
-       rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+       n_pages = rk_obj->num_pages;
+
+       dst_pages = drm_malloc_ab(n_pages, sizeof(struct page *));
+       if (!dst_pages) {
+               ret = -ENOMEM;
+               goto err_put_pages;
+       }
+
+       cur_page = 0;
+       remain = n_pages;
+       /* look for the end of the current chunk */
+       while (remain) {
+               for (j = cur_page + 1; j < n_pages; ++j) {
+                       if (page_to_pfn(pages[j]) !=
+                               page_to_pfn(pages[j - 1]) + 1)
+                       break;
+               }
+
+               chunk_pages = j - cur_page;
+
+               if (chunk_pages > 7) {
+                       for (i = 0; i < chunk_pages; i++)
+                               dst_pages[end + i] = pages[cur_page + i];
+                       end += chunk_pages;
+               } else {
+                       for (i = 0; i < chunk_pages; i++) {
+                               info = kmalloc(sizeof(*info), GFP_KERNEL);
+                               if (!info) {
+                                       ret = -ENOMEM;
+                                       goto err_put_list;
+                               }
+
+                               INIT_LIST_HEAD(&info->list);
+                               info->page = pages[cur_page + i];
+                               phys = page_to_phys(info->page);
+                               bit12_14 = (phys >> 12) & 0x7;
+                               list_add_tail(&info->list, &lists[bit12_14]);
+                               block_index[bit12_14]++;
+                       }
+               }
+
+               cur_page = j;
+               remain -= chunk_pages;
+       }
+
+       maximum = block_index[0];
+       for (i = 1; i < PG_ROUND; i++)
+               maximum = max(maximum, block_index[i]);
+
+       for (i = 0; i < maximum; i++) {
+               for (j = 0; j < PG_ROUND; j++) {
+                       if (!list_empty(&lists[j])) {
+                               struct page_info *info;
+
+                               info = list_first_entry(&lists[j],
+                                                     struct page_info, list);
+                               dst_pages[end++] = info->page;
+                               list_del(&info->list);
+                               kfree(info);
+                       }
+               }
+       }
+
+       DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
+                end, n_pages);
+
+       rk_obj->sgt = drm_prime_pages_to_sg(dst_pages, rk_obj->num_pages);
        if (IS_ERR(rk_obj->sgt)) {
                ret = PTR_ERR(rk_obj->sgt);
-               goto err_put_pages;
+               goto err_put_list;
        }
 
+       rk_obj->pages = dst_pages;
+
        /*
         * Fake up the SG table so that dma_sync_sg_for_device() can be used
         * to flush the pages associated with it.
@@ -115,8 +221,13 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
        dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
                               DMA_TO_DEVICE);
 
+       drm_free_large(pages);
+
        return 0;
 
+err_put_list:
+       rockchip_gem_free_list(lists);
+       drm_free_large(dst_pages);
 err_put_pages:
        drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
        return ret;