lib/scatterlist: sg_page_iter: support sg lists w/o backing pages
authorImre Deak <imre.deak@intel.com>
Tue, 26 Mar 2013 13:14:18 +0000 (15:14 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 27 Mar 2013 16:13:44 +0000 (17:13 +0100)
The i915 driver uses sg lists for memory without backing 'struct page'
pages, similarly to other IO memory regions, setting only the DMA
address for these. It does this, so that it can program the HW MMU
tables in a uniform way both for sg lists with and without backing pages.

Without a valid page pointer we can't call nth_page to get the current
page in __sg_page_iter_next, so add a helper that relevant users can
call separately. Also add a helper to get the DMA address of the current
page (idea from Daniel).

Convert all places in i915, to use the new API.

Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
include/linux/scatterlist.h
lib/scatterlist.c

index bc8edbeca3fd9217234a6aff74c4c136765cbc9b..bb8f58012189af7651c20c2d9b1da1ed390ebcfd 100644 (file)
@@ -109,7 +109,7 @@ drm_clflush_sg(struct sg_table *st)
 
                mb();
                for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-                       drm_clflush_page(sg_iter.page);
+                       drm_clflush_page(sg_page_iter_page(&sg_iter));
                mb();
 
                return;
index 1d091ea12fadbbba7f66d5578d98d6e9ce16d830..f69538508d8c416aeb70835d5db02726fbc520a9 100644 (file)
@@ -1543,7 +1543,7 @@ static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *
        struct sg_page_iter sg_iter;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
-               return sg_iter.page;
+               return sg_page_iter_page(&sg_iter);
 
        return NULL;
 }
index a1123a32dc27fabf465b06ff0b9bc145e5e0938e..911bd40ef5132949f49e0c9f15d569577b18c372 100644 (file)
@@ -442,7 +442,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
-               struct page *page = sg_iter.page;
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (remain <= 0)
                        break;
@@ -765,7 +765,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
-               struct page *page = sg_iter.page;
+               struct page *page = sg_page_iter_page(&sg_iter);
                int partial_cacheline_write;
 
                if (remain <= 0)
@@ -1647,7 +1647,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                obj->dirty = 0;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_iter.page;
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (obj->dirty)
                        set_page_dirty(page);
@@ -1827,7 +1827,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 err_pages:
        sg_mark_end(sg);
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               page_cache_release(sg_iter.page);
+               page_cache_release(sg_page_iter_page(&sg_iter));
        sg_free_table(st);
        kfree(st);
        return PTR_ERR(page);
index 898615d2d5e23ad70aa6355c479f4e62a953c977..c6dfc1466e3a1ec2c775ea1b178e3dbce20858e7 100644 (file)
@@ -130,7 +130,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 
        i = 0;
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
-               pages[i++] = sg_iter.page;
+               pages[i++] = sg_page_iter_page(&sg_iter);
 
        obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
        drm_free_large(pages);
index 4cbae7bbb8338c483aa57a1602298ed84d1fbbd8..24a23b31b55fe188779d392276986065f2b7a975 100644 (file)
@@ -123,8 +123,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
                dma_addr_t page_addr;
 
-               page_addr = sg_dma_address(sg_iter.sg) +
-                               (sg_iter.sg_pgoffset << PAGE_SHIFT);
+               page_addr = sg_page_iter_dma_address(&sg_iter);
                pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
                                                    cache_level);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
@@ -424,8 +423,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
        dma_addr_t addr;
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
-               addr = sg_dma_address(sg_iter.sg) +
-                       (sg_iter.sg_pgoffset << PAGE_SHIFT);
+               addr = sg_page_iter_dma_address(&sg_iter);
                iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
                i++;
        }
index f799708bcb85232d7aaaffd2adaae4ffc024b50d..c807eb93755b7f00ac3f785e251684ecaf25fed6 100644 (file)
@@ -481,7 +481,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
        i = 0;
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_iter.page;
+               struct page *page = sg_page_iter_page(&sg_iter);
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
@@ -511,7 +511,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
        i = 0;
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               if (page_to_phys(sg_iter.page) & (1 << 17))
+               if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
                        __clear_bit(i, obj->bit_17);
index 2d8bdaef96116517cb8c6c1862a3cb82a3a863db..e96b9546c4c6ec68e26300c562bfd5620ba77e45 100644 (file)
@@ -235,13 +235,13 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  * sg page iterator
  *
  * Iterates over sg entries page-by-page.  On each successful iteration,
- * @piter->page points to the current page, @piter->sg to the sg holding this
- * page and @piter->sg_pgoffset to the page's page offset within the sg. The
- * iteration will stop either when a maximum number of sg entries was reached
- * or a terminating sg (sg_last(sg) == true) was reached.
+ * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
+ * to get the current page and its dma address. @piter->sg will point to the
+ * sg holding this page and @piter->sg_pgoffset to the page's page offset
+ * within the sg. The iteration will stop either when a maximum number of sg
+ * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
  */
 struct sg_page_iter {
-       struct page             *page;          /* current page */
        struct scatterlist      *sg;            /* sg holding the page */
        unsigned int            sg_pgoffset;    /* page offset within the sg */
 
@@ -255,6 +255,24 @@ bool __sg_page_iter_next(struct sg_page_iter *piter);
 void __sg_page_iter_start(struct sg_page_iter *piter,
                          struct scatterlist *sglist, unsigned int nents,
                          unsigned long pgoffset);
+/**
+ * sg_page_iter_page - get the current page held by the page iterator
+ * @piter:     page iterator holding the page
+ */
+static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
+{
+       return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+}
+
+/**
+ * sg_page_iter_dma_address - get the dma address of the current page held by
+ * the page iterator.
+ * @piter:     page iterator holding the page
+ */
+static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+{
+       return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+}
 
 /**
  * for_each_sg_page - iterate over the pages of the given sg list
index b83c144d731f3707402be43e83d2041ab4df82b7..a1cf8cae60e70dd298735f4feab09e55f8141e16 100644 (file)
@@ -401,7 +401,6 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
        piter->__pg_advance = 0;
        piter->__nents = nents;
 
-       piter->page = NULL;
        piter->sg = sglist;
        piter->sg_pgoffset = pgoffset;
 }
@@ -426,7 +425,6 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
                if (!--piter->__nents || !piter->sg)
                        return false;
        }
-       piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
 
        return true;
 }
@@ -496,7 +494,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
                miter->__remaining = min_t(unsigned long, miter->__remaining,
                                           PAGE_SIZE - miter->__offset);
        }
-       miter->page = miter->piter.page;
+       miter->page = sg_page_iter_page(&miter->piter);
        miter->consumed = miter->length = miter->__remaining;
 
        if (miter->__flags & SG_MITER_ATOMIC)