#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/rockchip_ion.h>
+#include <linux/rockchip-iovmm.h>
#include "ion.h"
#include "ion_priv.h"
* as soon as it will avalaible.
*/
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
+ dma_addr_t handle, size_t size)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = phys_to_page(handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info;
+ DEFINE_DMA_ATTRS(attrs);
+#ifdef CONFIG_ION_CMA_HIGHMEM
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+#endif
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
if (buffer->flags & ION_FLAG_CACHED)
return ION_CMA_ALLOCATE_FAILED;
}
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
+ info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO, &attrs);
if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
+ dev_err(dev, "Fail to allocate(%lx) buffer\n", len);
goto err;
}
goto free_mem;
}
- if (ion_cma_get_sgtable
- (dev, info->table, info->cpu_addr, info->handle, len))
+ if (ion_cma_get_sgtable(dev, info->table, info->handle, len))
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
free_table:
kfree(info->table);
free_mem:
- dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+ dma_free_attrs(dev, len, info->cpu_addr, info->handle, &attrs);
err:
kfree(info);
return ION_CMA_ALLOCATE_FAILED;
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
+ DEFINE_DMA_ATTRS(attrs);
+#ifdef CONFIG_ION_CMA_HIGHMEM
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+#endif
dev_dbg(dev, "Release buffer %p\n", buffer);
/* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ dma_free_attrs(dev, buffer->size, info->cpu_addr, info->handle, &attrs);
/* release sg table */
sg_free_table(info->table);
kfree(info->table);
buffer->size);
}
+#ifndef CONFIG_ION_CMA_HIGHMEM
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_buffer *buffer)
{
}
+#endif
+
+#ifdef CONFIG_ROCKCHIP_IOMMU
+// get device's vaddr
+static int ion_cma_map_iommu(struct ion_buffer *buffer,
+ struct device *iommu_dev,
+ struct ion_iommu_map *data,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret = 0;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ data->iova_addr = rockchip_iovmm_map(iommu_dev, info->table->sgl, 0, iova_length);
+ pr_debug("%s: map %x -> %lx\n", __func__, info->table->sgl->dma_address,
+ data->iova_addr);
+ if (!data->iova_addr || IS_ERR_VALUE(data->iova_addr)) {
+ pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__, data->iova_addr);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data->mapped_size = iova_length;
+
+out:
+ return ret;
+}
+
+void ion_cma_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
+{
+ pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size, data->iova_addr);
+ rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
+
+ return;
+}
+#endif
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.unmap_dma = ion_cma_heap_unmap_dma,
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
+#ifdef CONFIG_ION_CMA_HIGHMEM
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+#else
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
+#endif
+#ifdef CONFIG_ROCKCHIP_IOMMU
+ .map_iommu = ion_cma_map_iommu,
+ .unmap_iommu = ion_cma_unmap_iommu,
+#endif
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)