2 * drivers/gpu/ion/ion_drm_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/rockchip-iovmm.h>
29 #define ION_DRM_ALLOCATE_FAILED -1
33 struct gen_pool *pool;
37 ion_phys_addr_t ion_drm_allocate(struct ion_heap *heap,
41 struct ion_drm_heap *drm_heap =
42 container_of(heap, struct ion_drm_heap, heap);
43 unsigned long offset = gen_pool_alloc(drm_heap->pool, size);
46 return ION_DRM_ALLOCATE_FAILED;
51 void ion_drm_free(struct ion_heap *heap, ion_phys_addr_t addr,
54 struct ion_drm_heap *drm_heap =
55 container_of(heap, struct ion_drm_heap, heap);
57 if (addr == ION_DRM_ALLOCATE_FAILED)
59 gen_pool_free(drm_heap->pool, addr, size);
62 static int ion_drm_heap_phys(struct ion_heap *heap,
63 struct ion_buffer *buffer,
64 ion_phys_addr_t *addr, size_t *len)
66 struct sg_table *table = buffer->priv_virt;
67 struct page *page = sg_page(table->sgl);
68 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
75 static int ion_drm_heap_allocate(struct ion_heap *heap,
76 struct ion_buffer *buffer,
77 unsigned long size, unsigned long align,
80 struct sg_table *table;
81 ion_phys_addr_t paddr;
84 if (align > PAGE_SIZE)
87 if (ion_buffer_cached(buffer)) {
88 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
89 __func__, heap->name);
93 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
96 ret = sg_alloc_table(table, 1, GFP_KERNEL);
100 paddr = ion_drm_allocate(heap, size, align);
101 if (paddr == ION_DRM_ALLOCATE_FAILED) {
106 sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
107 buffer->priv_virt = table;
112 sg_free_table(table);
118 static void ion_drm_heap_free(struct ion_buffer *buffer)
120 struct ion_heap *heap = buffer->heap;
121 struct sg_table *table = buffer->priv_virt;
122 struct page *page = sg_page(table->sgl);
123 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
125 ion_heap_buffer_zero(buffer);
126 ion_drm_free(heap, paddr, buffer->size);
127 sg_free_table(table);
131 static struct sg_table *ion_drm_heap_map_dma(struct ion_heap *heap,
132 struct ion_buffer *buffer)
134 return buffer->priv_virt;
137 static void ion_drm_heap_unmap_dma(struct ion_heap *heap,
138 struct ion_buffer *buffer)
143 static int ion_drm_heap_mmap(struct ion_heap *mapper,
144 struct ion_buffer *buffer,
145 struct vm_area_struct *vma)
147 pr_info("%s: mmaping from secure heap %s disallowed\n",
148 __func__, mapper->name);
152 static void *ion_drm_heap_map_kernel(struct ion_heap *heap,
153 struct ion_buffer *buffer)
155 pr_info("%s: kernel mapping from secure heap %s disallowed\n",
156 __func__, heap->name);
160 static void ion_drm_heap_unmap_kernel(struct ion_heap *heap,
161 struct ion_buffer *buffer)
166 #ifdef CONFIG_ROCKCHIP_IOMMU
167 static int ion_drm_heap_map_iommu(struct ion_buffer *buffer,
168 struct device *iommu_dev,
169 struct ion_iommu_map *data,
170 unsigned long iova_length,
174 struct sg_table *table = (struct sg_table*)buffer->priv_virt;
176 data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
177 pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address,
179 if (IS_ERR_VALUE(data->iova_addr)) {
180 pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__,
182 ret = data->iova_addr;
186 data->mapped_size = iova_length;
192 void ion_drm_heap_unmap_iommu(struct device *iommu_dev,
193 struct ion_iommu_map *data)
195 pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size,
197 rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
203 static struct ion_heap_ops drm_heap_ops = {
204 .allocate = ion_drm_heap_allocate,
205 .free = ion_drm_heap_free,
206 .phys = ion_drm_heap_phys,
207 .map_dma = ion_drm_heap_map_dma,
208 .unmap_dma = ion_drm_heap_unmap_dma,
209 .map_user = ion_drm_heap_mmap,
210 .map_kernel = ion_drm_heap_map_kernel,
211 .unmap_kernel = ion_drm_heap_unmap_kernel,
212 #ifdef CONFIG_ROCKCHIP_IOMMU
213 .map_iommu = ion_drm_heap_map_iommu,
214 .unmap_iommu = ion_drm_heap_unmap_iommu,
218 struct ion_heap *ion_drm_heap_create(struct ion_platform_heap *heap_data)
220 struct ion_drm_heap *drm_heap;
226 page = pfn_to_page(PFN_DOWN(heap_data->base));
227 size = heap_data->size;
229 printk("%s: %x@%lx\n", __func__, size, heap_data->base);
231 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
233 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
237 drm_heap = kzalloc(sizeof(struct ion_drm_heap), GFP_KERNEL);
239 return ERR_PTR(-ENOMEM);
241 drm_heap->pool = gen_pool_create(8, -1); // 256KB align
242 if (!drm_heap->pool) {
244 return ERR_PTR(-ENOMEM);
246 drm_heap->base = heap_data->base;
247 gen_pool_add(drm_heap->pool, drm_heap->base, heap_data->size, -1);
248 drm_heap->heap.ops = &drm_heap_ops;
249 drm_heap->heap.type = ION_HEAP_TYPE_DRM;
250 // drm_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
252 return &drm_heap->heap;
255 void ion_drm_heap_destroy(struct ion_heap *heap)
257 struct ion_drm_heap *drm_heap =
258 container_of(heap, struct ion_drm_heap, heap);
260 gen_pool_destroy(drm_heap->pool);