ion: fix dma APIs
authorColin Cross <ccross@android.com>
Wed, 18 Sep 2013 17:30:20 +0000 (12:30 -0500)
committerColin Cross <ccross@android.com>
Sat, 16 Nov 2013 00:10:14 +0000 (16:10 -0800)
__dma_page_cpu_to_dev is a private ARM api that is not available
on 3.10 and was never available on other architectures.  We can
get the same behavior by calling dma_sync_sg_for_device with a
scatterlist containing a single page.  It's still not quite a
kosher use of the dma apis, we still conflate physical addresses
with bus addresses, but it should at least compile on all
platforms, and work on any platform that doesn't have a physical
to bus address translation.

Change-Id: I8451c2dae4bf85841015c016640684ac28430a5a
Signed-off-by: Colin Cross <ccross@android.com>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_chunk_heap.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c

index a8d4735b462c55200494f4fb5cceb0b086b9167a..38e3d6ff3a3ed71e4241497c62e9369be1d1b969 100644 (file)
@@ -840,6 +840,22 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
 }
 
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir)
+{
+       struct scatterlist sg;
+
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       /*
+        * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+        * for the the targeted device, but this works on the currently targeted
+        * hardware.
+        */
+       sg_dma_address(&sg) = page_to_phys(page);
+       dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
 struct ion_vma_list {
        struct list_head list;
        struct vm_area_struct *vma;
@@ -864,7 +880,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
                struct page *page = buffer->pages[i];
 
                if (ion_buffer_page_is_dirty(page))
-                       __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
+                       ion_pages_sync_for_device(dev, ion_buffer_page(page),
+                                                       PAGE_SIZE, dir);
+
                ion_buffer_page_clean(buffer->pages + i);
        }
        list_for_each_entry(vma_list, &buffer->vmas, list) {
index a2b2e1b7af387e28c265314fafe2b4aac64ea870..c16350e9ffcc55d5c97a46c14be4be1b881855bf 100644 (file)
@@ -106,11 +106,11 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
 
        ion_heap_buffer_zero(buffer);
 
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+                                                               DMA_BIDIRECTIONAL);
+
        for_each_sg(table->sgl, sg, table->nents, i) {
-               if (ion_buffer_cached(buffer))
-                       arm_dma_ops.sync_single_for_device(NULL,
-                               pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
-                               sg_dma_len(sg), DMA_BIDIRECTIONAL);
                gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
                              sg_dma_len(sg));
        }
@@ -148,7 +148,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
        pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
        int i, ret;
 
-
        chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
        if (!chunk_heap)
                return ERR_PTR(-ENOMEM);
@@ -181,9 +180,9 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
        }
        free_vm_area(vm_struct);
 
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
-               heap_data->size, DMA_BIDIRECTIONAL);
+       ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
+                       heap_data->size, DMA_BIDIRECTIONAL);
+
        gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
        chunk_heap->heap.ops = &chunk_heap_ops;
        chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
index 7e00f51292e12f8186da08b6686e115c190cc3a3..b052ff6bf38301e55711a760c02b4d862e58277c 100644 (file)
@@ -34,13 +34,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 
        if (!page)
                return NULL;
-       /* this is only being used to flush the page for dma,
-          this api is not really suitable for calling from a driver
-          but no better way to flush a page for dma exist at this time */
-       arm_dma_ops.sync_single_for_device(NULL,
-                                          pfn_to_dma(NULL, page_to_pfn(page)),
-                                          PAGE_SIZE << pool->order,
-                                          DMA_BIDIRECTIONAL);
+       ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+                                               DMA_BIDIRECTIONAL);
        return page;
 }
 
index 0ccf409cde7de4c3e01156677363230eb9ca150a..ea87b54987e781301bdb010ea005065730365dbf 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef _ION_PRIV_H
 #define _ION_PRIV_H
 
+#include <linux/dma-direction.h>
 #include <linux/kref.h>
 #include <linux/mm_types.h>
 #include <linux/mutex.h>
@@ -357,4 +358,15 @@ void ion_page_pool_free(struct ion_page_pool *, struct page *);
 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
                          int nr_to_scan);
 
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ *                             device
+ * @dev:               the device the pages will be used with
+ * @page:              the first page to be flushed
+ * @size:              size in bytes of region to be flushed
+ * @dir:               direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir);
+
 #endif /* _ION_PRIV_H */
index 06e0702396a414bf8ed197991580f1ecedd84b67..90b2e04157ca1064132575de5301bb903d12867d 100644 (file)
@@ -77,9 +77,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
                page = ion_heap_alloc_pages(buffer, gfp_flags, order);
                if (!page)
                        return 0;
-               arm_dma_ops.sync_single_for_device(NULL,
-                       pfn_to_dma(NULL, page_to_pfn(page)),
-                       PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+               ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+                                               DMA_BIDIRECTIONAL);
        }
        if (!page)
                return 0;