2 * drivers/staging/android/ion/ion_cma_heap.c
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/dma-mapping.h>
23 #ifdef CONFIG_RK_IOMMU
24 #include <linux/rockchip-iovmm.h>
30 #define ION_CMA_ALLOCATE_FAILED -1
37 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
39 struct ion_cma_buffer_info {
42 struct sg_table *table;
46 /* ION CMA heap operations functions */
47 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
48 unsigned long len, unsigned long align,
51 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
52 struct device *dev = cma_heap->dev;
53 struct ion_cma_buffer_info *info;
55 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
57 if (buffer->flags & ION_FLAG_CACHED)
60 if (align > PAGE_SIZE)
63 info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
65 return ION_CMA_ALLOCATE_FAILED;
67 info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
68 GFP_HIGHUSER | __GFP_ZERO);
70 if (!info->cpu_addr) {
71 dev_err(dev, "Fail to allocate buffer\n");
75 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
79 if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
82 /* keep this for memory release */
83 buffer->priv_virt = info;
84 dev_dbg(dev, "Allocate buffer %p\n", buffer);
90 dma_free_coherent(dev, len, info->cpu_addr, info->handle);
93 return ION_CMA_ALLOCATE_FAILED;
96 static void ion_cma_free(struct ion_buffer *buffer)
98 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
99 struct device *dev = cma_heap->dev;
100 struct ion_cma_buffer_info *info = buffer->priv_virt;
102 dev_dbg(dev, "Release buffer %p\n", buffer);
104 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
105 /* release sg table */
106 sg_free_table(info->table);
111 /* return physical address in addr */
112 static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
113 ion_phys_addr_t *addr, size_t *len)
115 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
116 struct device *dev = cma_heap->dev;
117 struct ion_cma_buffer_info *info = buffer->priv_virt;
119 dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
122 *addr = info->handle;
128 static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
129 struct ion_buffer *buffer)
131 struct ion_cma_buffer_info *info = buffer->priv_virt;
136 static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
137 struct ion_buffer *buffer)
141 static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
142 struct vm_area_struct *vma)
144 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
145 struct device *dev = cma_heap->dev;
146 struct ion_cma_buffer_info *info = buffer->priv_virt;
148 return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
152 static void *ion_cma_map_kernel(struct ion_heap *heap,
153 struct ion_buffer *buffer)
155 struct ion_cma_buffer_info *info = buffer->priv_virt;
156 /* kernel memory mapping has been done at allocation time */
157 return info->cpu_addr;
160 static void ion_cma_unmap_kernel(struct ion_heap *heap,
161 struct ion_buffer *buffer)
165 #ifdef CONFIG_RK_IOMMU
166 static int ion_cma_map_iommu(struct ion_buffer *buffer,
167 struct device *iommu_dev,
168 struct ion_iommu_map *data,
169 unsigned long iova_length,
173 struct ion_cma_buffer_info *info = buffer->priv_virt;
175 data->iova_addr = rockchip_iovmm_map(iommu_dev,
179 pr_debug("%s: map %pad -> %lx\n", __func__,
180 &info->table->sgl->dma_address,
182 if (IS_ERR_VALUE(data->iova_addr)) {
183 pr_err("%s: failed: %lx\n", __func__, data->iova_addr);
184 ret = data->iova_addr;
188 data->mapped_size = iova_length;
194 void ion_cma_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
196 pr_debug("%s: unmap %x@%lx\n",
200 rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
204 static struct ion_heap_ops ion_cma_ops = {
205 .allocate = ion_cma_allocate,
206 .free = ion_cma_free,
207 .map_dma = ion_cma_heap_map_dma,
208 .unmap_dma = ion_cma_heap_unmap_dma,
209 .phys = ion_cma_phys,
210 .map_user = ion_cma_mmap,
211 .map_kernel = ion_cma_map_kernel,
212 .unmap_kernel = ion_cma_unmap_kernel,
213 #ifdef CONFIG_RK_IOMMU
214 .map_iommu = ion_cma_map_iommu,
215 .unmap_iommu = ion_cma_unmap_iommu,
219 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
221 struct ion_cma_heap *cma_heap;
223 cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
226 return ERR_PTR(-ENOMEM);
228 cma_heap->heap.ops = &ion_cma_ops;
230 * get device from private heaps data, later it will be
231 * used to make the link with reserved CMA memory
233 cma_heap->dev = data->priv;
234 cma_heap->heap.type = ION_HEAP_TYPE_DMA;
235 return &cma_heap->heap;
238 void ion_cma_heap_destroy(struct ion_heap *heap)
240 struct ion_cma_heap *cma_heap = to_cma_heap(heap);